Commit 8820b307 by tinywell

chore: Update agent tool descriptions and fix missing variable in chart tool

parent f51470c9
......@@ -17,12 +17,12 @@ from src.config.consts import (
class IssuanceArgs(BaseModel):
question: str = Field(description="对话问题")
history: str = Field(description="历史对话记录")
location: list = Field(description="行政区名称")
location: list = Field(description="行政区名称")
class RAGQuery(BaseTool):
name = "rag_query"
description = """这是一个区(县)级的水文气象地质知识库,当咨询区(县)的水文气象地质等相关信息的时候,你可以提供数据和资料。注意,这个查询只能获取单个区(县)的水文气象地质等相关信息,当需要查询省市的详细信息时,需要获取改省市下的具体行政规划,并获一一取具体的区(县)的水文气象地质等相关信息"""
description = """这是一个区(县)级的水文气象地质知识库,当咨询区(县)的水文气象地质等相关信息的时候,你可以提供数据和资料。注意,这个查询只能获取单个区(县)的水文气象地质等相关信息,当需要查询省市的详细信息时,需要获取改省市下的具体行政规划,并一一获取具体的区(县)的水文气象地质等相关信息。这个知识库中信息并不全面,有可能缺失。这个工具生成的结果要求保存到 action_cache 中。键值为 'rag_query',值为工具输出。形式为 {"rag_query":$RESULT}"""
args_schema: Type[BaseModel] = IssuanceArgs
rerank: Any # 替换 Any 为适当的类型
rerank_model: Any # 替换 Any 为适当的类型
......@@ -52,9 +52,9 @@ class RAGQuery(BaseTool):
# for l in split_str:
# split_list.append(l)
answer = self.db.find_like_doc(location)
print(len(answer))
print(len(answer) if answer else 0)
split_docs = []
for a in answer:
for a in answer if answer else []:
d = Document(page_content=a[0], metadata=json.loads(a[1]))
split_docs.append(d)
print(len(split_docs))
......@@ -67,12 +67,14 @@ class RAGQuery(BaseTool):
print(matches)
similarity = self.get_similarity_with_ext_origin(matches)
# cur_similarity = similarity.get_rerank(self.rerank_model)
cur_similarity = similarity.get_rerank_with_doc(self.rerank_model,split_docs)
print(cur_similarity)
# geo_result = "以下是详细的水文气象地质资料:"+cur_similarity+"\n 以下是原问题"+question
# cur_question = self.prompt.format(history=history, context=cur_similarity, question=question)
cur_answer = self.llm_chain.run(context=cur_similarity, question=question)
print(cur_answer)
return cur_answer
# cur_similarity = similarity.get_rerank_with_doc(self.rerank_model,split_docs)
docs = similarity.get_rerank_docs()
# print(cur_similarity)
# # geo_result = "以下是详细的水文气象地质资料:"+cur_similarity+"\n 以下是原问题"+question
# # cur_question = self.prompt.format(history=history, context=cur_similarity, question=question)
# cur_answer = self.llm_chain.run(context=cur_similarity, question=question)
# print(cur_answer)
# return cur_answer
return {"参考资料": docs, "原问题": question}
......@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field
from langchain_core.tools import BaseTool
class ChartArgs(BaseModel):
name: str = Field(..., description="图表名称,用于显示在图表上方")
title: str = Field(..., description="图表名称,用于显示在图表上方的字符串")
chart_type: str = Field(..., description="图表类型,如 line, bar, scatter, pie 等")
x: list = Field(..., description="x 轴数据,列表形式")
y: list = Field(..., description="y 轴数据,列表形式")
......@@ -12,7 +12,7 @@ class ChartArgs(BaseModel):
class Chart(BaseTool):
name = "chart"
description = "组装生成图表的中间数据"
description = "组装生成图表的中间数据。此工具生成的数据需要保存到 action_cache 中。键值为 'chart_data'。"
args_schema: Type[BaseModel] = ChartArgs
def _run(
......@@ -37,7 +37,7 @@ def chart_image(chart_data):
Args:
chart_data: dict 图表数据
{
"name": str, 图表名称
"title": str, 图表名称
"chart_type": str, 图表类型,如 line, bar, scatter, pie 等
"x": list, x 轴数据,列表形式
"y": list, y 轴数据,列表形式
......
......@@ -44,7 +44,7 @@ PROMPT_AGENT_CHART_SYS = """请尽量帮助人类并准确回答问题。您可
{tools}
使用 JSON 对象指定工具,提供一个 action 键(工具名称)和一个 action_input 键(工具输入), 以及 action_cache 键(必要时存储工具中间结果) 。
使用 JSON 对象指定工具,提供一个 action 键(工具名称)和一个 action_input 键(工具输入), 以及 action_cache 键(有些工具要求缓存其返回的中间结果) 。
有效的 "action" 值: "Final Answer" 或 {tool_names}
......@@ -53,7 +53,6 @@ PROMPT_AGENT_CHART_SYS = """请尽量帮助人类并准确回答问题。您可
{{
"action": $TOOL_NAME,
"action_input": $INPUT,
"action_cache": $CACHE
}}
```
......@@ -73,17 +72,29 @@ Action:
{{
"action": "Final Answer",
"action_input": "最终回复给人类",
"action_cache": "图表工具结果缓存"
"action_cache": "所有要求保存中间结果的工具操作结果汇总"
}}
```
开始!始终以有效的单个操作的 JSON 对象回复。如有必要,请使用工具,如果你知道答案,请直接回复。如果用户有生成图表的需求,请使用图表生成工具 {chart_tool},并将结果记录为 $CACHE 中。
你的回复格式为 Action:```$JSON_BLOB```然后 Observation
开始!始终以有效的单个操作的 JSON 对象回复。如有必要,请使用工具,并在最后一步按照工具要求将工具操作结果汇总到最后一个 Action 中的 action_cache。如果你知道答案,请直接回复。
你的回复格式为 Action:```$JSON_BLOB```然后 Observation,并在必要时将 Observation 结果更新到下一个 action_cache 中。
"""
PROMPT_AGENT_CHART_SYS_VARS = [ "tool_names", "tools"]
PROMPT_AGENT_HUMAN = """{input}\n\n{agent_scratchpad}\n (请注意,无论如何都要以 JSON 对象回复)"""
PROMPT_AGENT_CHAT_HUMAN = """历史纪录:
'''
{histories}
'''
问题:{input}
{agent_scratchpad}
(请注意,无论如何都要以 JSON 对象回复)"""
PROMPT_AGENT_CHAT_HUMAN_VARS = ["agent_scratchpad", "input", "histories"]
##################################################################################################################
# 结合历史对话信息,对用户提问进行扩展,生成不同角度的多个提问。用于 RAG 场景中
......
......@@ -81,7 +81,7 @@ def create_chart_agent(
) -> Runnable:
"""Create an agent aimed at supporting chart tools with multiple inputs.
"""
missing_vars = {"tools", "tool_names", "agent_scratchpad","chart_tool"}.difference(
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
......
import sys
sys.path.append('../')
from typing import List,Union
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor,create_structured_chat_agent
from langchain_core.prompts.chat import ChatPromptTemplate,HumanMessagePromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder
from langchain_core.prompts import PromptTemplate
from langchain.chains import LLMChain
import langchain_core
from src.pgdb.knowledge.similarity import VectorStore_FAISS
from src.server.get_similarity import QAExt
from src.server.agent import create_chart_agent
from src.pgdb.knowledge.k_db import PostgresDB
from src.pgdb.knowledge.txt_doc_table import TxtDoc
from langchain.chains import LLMChain
from src.agent.tool_divisions import AdministrativeDivision
from src.agent.rag_agent import RAGQuery
from src.config.consts import (
......@@ -24,6 +30,9 @@ from src.config.consts import (
prompt_enhancement_history_template,
prompt1
)
from src.config.prompts import PROMPT_AGENT_CHART_SYS_VARS,PROMPT_AGENT_CHART_SYS,PROMPT_AGENT_CHAT_HUMAN,PROMPT_AGENT_CHAT_HUMAN_VARS
base_llm = ChatOpenAI(
openai_api_key='xxxxxxxxxxxxx',
openai_api_base='http://192.168.10.14:8000/v1',
......@@ -48,25 +57,31 @@ k_db.connect()
llm_chain = LLMChain(llm=base_llm, prompt=PromptTemplate(input_variables=["history","context", "question"], template=prompt1), llm_kwargs= {"temperature": 0})
tool_rag = RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template),_db=TxtDoc(k_db),_llm_chain=llm_chain)
tools = [AdministrativeDivision(),RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template),_db=TxtDoc(k_db),_llm_chain=llm_chain)]
input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools']
# input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools','chart_tool']
input_variables=[]
input_variables.extend(PROMPT_AGENT_CHAT_HUMAN_VARS)
input_variables.extend(PROMPT_AGENT_CHART_SYS_VARS)
input_types={'chat_history': List[Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}
metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'structured-chat-agent', 'lc_hub_commit_hash': 'ea510f70a5872eb0f41a4e3b7bb004d5711dc127adee08329c664c6c8be5f13c'}
messages=[
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
# SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=PROMPT_AGENT_CHART_SYS_VARS, template=PROMPT_AGENT_CHART_SYS)),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
# HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=PROMPT_AGENT_CHAT_HUMAN_VARS, template=PROMPT_AGENT_CHAT_HUMAN))
]
prompt = ChatPromptTemplate(
input_variables=input_variables,
input_types=input_types,
metadata=metadata,
# metadata=metadata,
messages=messages
)
agent = create_structured_chat_agent(llm=base_llm, tools=tools, prompt=prompt)
# agent = create_structured_chat_agent(llm=base_llm, tools=tools, prompt=prompt)
agent = create_chart_agent(base_llm, tools, prompt, chart_tool="chart")
agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True,handle_parsing_errors=True)
history = []
h1 = []
......@@ -81,7 +96,8 @@ prompt = ""
for h in history:
prompt += "问:{}\n答:{}\n".format(h[0], h[1])
print(prompt)
res = agent_executor.invoke({"input":"以下历史对话记录: "+prompt+"以下是问题:"+"攸县、长沙县、化隆县和大通县谁的年平均降雨量大"})
# res = agent_executor.invoke({"input":"以下历史对话记录: "+prompt+"以下是问题:"+"攸县、长沙县、化隆县和大通县谁的年平均降雨量大"})
res = agent_executor.invoke({"input":"攸县、长沙县、化隆县和大通县谁的年平均降雨量大","histories":history})
print("====== result: ======")
print(res)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment