Commit 5132365d by 文靖昊

web改成多agent

parent b886ea6b
......@@ -47,7 +47,7 @@ from src.config.consts import (
prompt_enhancement_history_template,
prompt1
)
from src.config.prompts import PROMPT_AGENT_SYS_VARS,PROMPT_AGENT_SYS,PROMPT_AGENT_CHAT_HUMAN,PROMPT_AGENT_CHAT_HUMAN_VARS
from src.config.prompts import PROMPT_AGENT_SYS_VARS,PROMPT_AGENT_SYS,PROMPT_AGENT_CHAT_HUMAN,PROMPT_AGENT_CHAT_HUMAN_VARS,PROMPT_AGENT_EXTEND_SYS
app = FastAPI()
......@@ -85,9 +85,7 @@ base_llm = ChatOpenAI(
ext = QAExt(base_llm)
llm_chain = LLMChain(llm=base_llm, prompt=PromptTemplate(input_variables=["history","context", "question"], template=prompt1), llm_kwargs= {"temperature": 0})
tool_rag = RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template),_db=TxtDoc(k_db),_llm_chain=llm_chain)
tools = [AdministrativeDivision(),RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template),_db=TxtDoc(k_db),_llm_chain=llm_chain)]
tools = [RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template),_db=TxtDoc(k_db),_llm_chain=llm_chain)]
# input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools','chart_tool']
input_variables=[]
input_variables.extend(PROMPT_AGENT_CHAT_HUMAN_VARS)
......@@ -113,6 +111,23 @@ prompt = ChatPromptTemplate(
agent = create_chart_agent(base_llm, tools, prompt, chart_tool="chart")
agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True,handle_parsing_errors=True,return_intermediate_steps=True)
administrative_messages=[
# SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=PROMPT_AGENT_SYS_VARS, template=PROMPT_AGENT_EXTEND_SYS)),
MessagesPlaceholder(variable_name='chat_history', optional=True),
# HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=PROMPT_AGENT_CHAT_HUMAN_VARS, template=PROMPT_AGENT_CHAT_HUMAN))
]
administrative_prompt = ChatPromptTemplate(
input_variables=input_variables,
input_types=input_types,
# metadata=metadata,
messages=administrative_messages
)
AdministrativeTools =[AdministrativeDivision()]
administrative_agent = create_chart_agent(base_llm, AdministrativeTools, administrative_prompt, chart_tool="chart")
administrative_agent_executor = AgentExecutor(agent=administrative_agent, tools=AdministrativeTools,verbose=True,handle_parsing_errors=True,return_intermediate_steps=True)
# my_chat = QA(PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template), base_llm,
# {"temperature": 0.9}, ['history','context', 'question'], _db=c_db, _faiss_db=vecstore_faiss,rerank=True)
......@@ -219,8 +234,10 @@ def question(chat_request: ChatRequest, token: str = Header(None)):
# prompt = ""
# for h in history:
# prompt += "问:{}\n答:{}\n\n".format(h[0], h[1])
res_a = administrative_agent_executor.invoke({"input": question, "histories": history})
new_question = res_a['output']
res = agent_executor.invoke({"input": question, "histories": history})
res = agent_executor.invoke({"input": new_question, "histories": history})
answer = res["output"]
# answer, docs = my_chat.chat_with_history_with_ext(question,ext=matches,history=prompt, with_similarity=True)
......@@ -280,7 +297,10 @@ def re_generate(chat_request: ReGenerateRequest, token: str = Header(None)):
# prompt += "问:{}\n答:{}\n\n".format(h[0], h[1])
# answer, docs = my_chat.chat_with_history_with_ext(question,ext=matches, history=prompt, with_similarity=True)
# docs_json = []
res = agent_executor.invoke({"input": question, "histories": history})
res_a = administrative_agent_executor.invoke({"input": question, "histories": history})
new_question = res_a['output']
res = agent_executor.invoke({"input": new_question, "histories": history})
answer = res["output"]
docs_json = []
for step in res["intermediate_steps"]:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment