Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
L
LAE
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
文靖昊
LAE
Commits
6d1a6163
Commit
6d1a6163
authored
Apr 28, 2024
by
陈正乐
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
修改模型QA服务bug
parent
8ddfdec2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
11 additions
and
19 deletions
+11
-19
qa.py
src/server/qa.py
+11
-3
gradio_test.py
test/gradio_test.py
+0
-16
No files found.
src/server/qa.py
View file @
6d1a6163
# -*- coding: utf-8 -*-
import
sys
import
time
from
datetime
import
datetime
from
langchain.chains
import
LLMChain
from
langchain.prompts
import
PromptTemplate
from
typing
import
Awaitable
...
...
@@ -41,7 +43,7 @@ class QA:
self
.
cur_question
=
""
# 一次性直接给出所有的答案
def
chat
(
self
,
*
args
):
async
def
chat
(
self
,
*
args
):
self
.
cur_question
=
self
.
prompt
.
format
(
**
{
k
:
v
for
k
,
v
in
zip
(
self
.
prompt_kwargs
,
args
)})
self
.
cur_answer
=
""
if
not
args
:
...
...
@@ -50,7 +52,7 @@ class QA:
return
self
.
cur_answer
# 异步输出,逐渐输出答案
async
def
async_chat
_stc
(
self
,
*
args
):
async
def
async_chat
(
self
,
*
args
):
self
.
cur_question
=
self
.
prompt
.
format
(
**
{
k
:
v
for
k
,
v
in
zip
(
self
.
prompt_kwargs
,
args
)})
callback
=
AsyncIteratorCallbackHandler
()
async
def
wrap_done
(
fn
:
Awaitable
,
event
:
asyncio
.
Event
):
...
...
@@ -69,7 +71,11 @@ class QA:
async
for
token
in
callback
.
aiter
():
self
.
cur_answer
=
self
.
cur_answer
+
token
yield
f
"{self.cur_answer}"
print
(
datetime
.
now
())
await
task
print
(
'----------------'
,
self
.
cur_question
)
print
(
'================'
,
self
.
cur_answer
)
print
(
datetime
.
now
())
def
get_history
(
self
):
...
...
@@ -89,5 +95,7 @@ if __name__ == "__main__":
base_llm
=
ChatERNIESerLLM
(
chat_completion
=
ChatCompletion
(
ak
=
"pT7sV1smp4AeDl0LjyZuHBV9"
,
sk
=
"b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"
))
my_chat
=
QA
(
PROMPT1
,
base_llm
,
{
"temperature"
:
0.9
},
[
'context'
,
'question'
],
_db
=
c_db
,
_chat_id
=
'2'
)
print
(
my_chat
.
chat
(
"当别人想你说你好的时候,你也应该说你好"
,
"你好"
))
print
(
my_chat
.
async_
chat
(
"当别人想你说你好的时候,你也应该说你好"
,
"你好"
))
my_chat
.
updata_history
()
time
.
sleep
(
20
)
print
(
my_chat
.
cur_answer
)
test/gradio_test.py
deleted
100644 → 0
View file @
8ddfdec2
import
gradio
as
gr
with
gr
.
Blocks
()
as
demo
:
gr
.
HTML
(
"""<h1 align="center">辅助生成知识库</h1>"""
)
# with gr.Row():
# input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10)
with
gr
.
Row
():
input_text
=
gr
.
Textbox
(
show_label
=
True
,
placeholder
=
"输入需要处理的文档..."
,
lines
=
10
,
scale
=
9
)
model_selector
=
gr
.
Dropdown
(
choices
=
[
"ernie"
,
"chatglm3"
],
label
=
"请选择一个模型"
,
scale
=
1
,
min_width
=
50
,
value
=
"chatglm3"
)
with
gr
.
Row
():
num_selector
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
10
,
value
=
5
,
label
=
"请选择问题数量"
,
step
=
1
)
with
gr
.
Row
():
qaBtn
=
gr
.
Button
(
"QA问答对生成"
)
demo
.
queue
()
.
launch
(
share
=
False
,
inbrowser
=
True
,
server_name
=
"192.168.100.76"
,
server_port
=
8888
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment