Commit 6d1a6163 by 陈正乐

修改模型QA服务bug

parent 8ddfdec2
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import sys import sys
import time
from datetime import datetime
from langchain.chains import LLMChain from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from typing import Awaitable from typing import Awaitable
...@@ -41,7 +43,7 @@ class QA: ...@@ -41,7 +43,7 @@ class QA:
self.cur_question = "" self.cur_question = ""
# 一次性直接给出所有的答案 # 一次性直接给出所有的答案
def chat(self, *args): async def chat(self, *args):
self.cur_question = self.prompt.format(**{k: v for k, v in zip(self.prompt_kwargs, args)}) self.cur_question = self.prompt.format(**{k: v for k, v in zip(self.prompt_kwargs, args)})
self.cur_answer = "" self.cur_answer = ""
if not args: if not args:
...@@ -50,7 +52,7 @@ class QA: ...@@ -50,7 +52,7 @@ class QA:
return self.cur_answer return self.cur_answer
# 异步输出,逐渐输出答案 # 异步输出,逐渐输出答案
async def async_chat_stc(self, *args): async def async_chat(self, *args):
self.cur_question = self.prompt.format(**{k: v for k, v in zip(self.prompt_kwargs, args)}) self.cur_question = self.prompt.format(**{k: v for k, v in zip(self.prompt_kwargs, args)})
callback = AsyncIteratorCallbackHandler() callback = AsyncIteratorCallbackHandler()
async def wrap_done(fn: Awaitable, event: asyncio.Event): async def wrap_done(fn: Awaitable, event: asyncio.Event):
...@@ -69,7 +71,11 @@ class QA: ...@@ -69,7 +71,11 @@ class QA:
async for token in callback.aiter(): async for token in callback.aiter():
self.cur_answer = self.cur_answer + token self.cur_answer = self.cur_answer + token
yield f"{self.cur_answer}" yield f"{self.cur_answer}"
print(datetime.now())
await task await task
print('----------------',self.cur_question)
print('================',self.cur_answer)
print(datetime.now())
def get_history(self): def get_history(self):
...@@ -89,5 +95,7 @@ if __name__ == "__main__": ...@@ -89,5 +95,7 @@ if __name__ == "__main__":
base_llm = ChatERNIESerLLM( base_llm = ChatERNIESerLLM(
chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu")) chat_completion=ChatCompletion(ak="pT7sV1smp4AeDl0LjyZuHBV9", sk="b3N0ibo1IKTLZlSs7weZc8jdR0oHjyMu"))
my_chat = QA(PROMPT1, base_llm, {"temperature": 0.9}, ['context', 'question'], _db=c_db, _chat_id='2') my_chat = QA(PROMPT1, base_llm, {"temperature": 0.9}, ['context', 'question'], _db=c_db, _chat_id='2')
print(my_chat.chat("当别人想你说你好的时候,你也应该说你好", "你好")) print(my_chat.async_chat("当别人想你说你好的时候,你也应该说你好", "你好"))
my_chat.updata_history() my_chat.updata_history()
time.sleep(20)
print(my_chat.cur_answer)
import gradio as gr
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">辅助生成知识库</h1>""")
# with gr.Row():
# input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10)
with gr.Row():
input_text = gr.Textbox(show_label=True, placeholder="输入需要处理的文档...", lines=10, scale=9)
model_selector = gr.Dropdown(choices=["ernie", "chatglm3"], label="请选择一个模型", scale=1, min_width=50,
value="chatglm3")
with gr.Row():
num_selector = gr.Slider(minimum=0, maximum=10, value=5, label="请选择问题数量", step=1)
with gr.Row():
qaBtn = gr.Button("QA问答对生成")
demo.queue().launch(share=False, inbrowser=True,server_name="192.168.100.76",server_port=8888)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment