Commit d8665aff by 文靖昊

相关agent编写

parent 8d484240
from langchain_core.tools import tool,BaseTool
from pydantic import BaseModel, Field
from typing import Type, Any,List,Union
import re
from src.server.get_similarity import GetSimilarityWithExt
from langchain_openai import ChatOpenAI
from langchain.agents import initialize_agent,Tool,AgentType,AgentExecutor,create_structured_chat_agent,create_react_agent,create_openai_functions_agent
from langchain_core.prompts.chat import ChatPromptTemplate,HumanMessagePromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder
from langchain_core.prompts import PromptTemplate
import langchain_core
from src.pgdb.knowledge.similarity import VectorStore_FAISS
from src.server.get_similarity import QAExt
from src.server.rerank import BgeRerank
from src.config.consts import (
RERANK_MODEL_PATH,
CHAT_DB_USER,
CHAT_DB_HOST,
CHAT_DB_PORT,
CHAT_DB_DBNAME,
CHAT_DB_PASSWORD,
EMBEEDING_MODEL_PATH,
FAISS_STORE_PATH,
INDEX_NAME,
VEC_DB_HOST,
VEC_DB_PASSWORD,
VEC_DB_PORT,
VEC_DB_USER,
VEC_DB_DBNAME,
SIMILARITY_SHOW_NUMBER,
prompt_enhancement_history_template
)
class IssuanceArgs(BaseModel):
question: str = Field(description="对话问题")
history: list = Field(description="历史对话记录")
class RAGQuery(BaseTool):
name = "rag_query"
description = """Query the geological information of corresponding provinces, cities, and counties. Users can query geological information related to specific provinces, cities, and counties"""
args_schema: Type[BaseModel] = IssuanceArgs
rerank: Any # 替换 Any 为适当的类型
rerank_model: Any # 替换 Any 为适当的类型
faiss_db: Any # 替换 Any 为适当的类型
prompt: Any # 假设 prompt 是一个字符串
def __init__(self,_faiss_db,_rerank,_prompt):
super().__init__()
self.rerank = _rerank
self.rerank_model = BgeRerank(RERANK_MODEL_PATH)
self.faiss_db = _faiss_db
self.prompt = _prompt
def get_similarity_with_ext_origin(self, _ext):
return GetSimilarityWithExt(_question=_ext, _faiss_db=self.faiss_db)
def _run(self, question: str, history: list) -> str:
result = self.rerank.extend_query(question, history)
matches = re.findall(r'"([^"]+)"', result.content)
if len(matches) > 3:
matches = matches[:3]
print(matches)
prompt = ""
for h in history:
prompt += "问:{}\n答:{}\n\n".format(h[0], h[1])
similarity = self.get_similarity_with_ext_origin(matches)
cur_similarity = similarity.get_rerank(self.rerank_model)
cur_question = self.prompt.format(history=prompt, context=cur_similarity, question=question)
return cur_question
base_llm = ChatOpenAI(
openai_api_key='xxxxxxxxxxxxx',
openai_api_base='http://192.168.10.14:8000/v1',
model_name='Qwen2-7B',
verbose=True,
temperature=0
)
vecstore_faiss = VectorStore_FAISS(
embedding_model_name=EMBEEDING_MODEL_PATH,
store_path=FAISS_STORE_PATH,
index_name=INDEX_NAME,
info={"port": VEC_DB_PORT, "host": VEC_DB_HOST, "dbname": VEC_DB_DBNAME, "username": VEC_DB_USER,
"password": VEC_DB_PASSWORD},
show_number=SIMILARITY_SHOW_NUMBER,
reset=False)
ext = QAExt(base_llm)
tools = [RAGQuery(vecstore_faiss,ext,PromptTemplate(input_variables=["history","context", "question"], template=prompt_enhancement_history_template))]
input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools']
input_types={'chat_history': List[Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}
metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'structured-chat-agent', 'lc_hub_commit_hash': 'ea510f70a5872eb0f41a4e3b7bb004d5711dc127adee08329c664c6c8be5f13c'}
messages=[
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
]
prompt = ChatPromptTemplate(
input_variables=input_variables,
input_types=input_types,
metadata=metadata,
messages=messages
)
agent = create_structured_chat_agent(llm=base_llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True,handle_parsing_errors=True)
res = agent_executor.invoke({"input":"西宁市大通县水文情况"})
print("====== result: ======")
print(res)
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_openai import ChatOpenAI
from langchain_community.utilities import SQLDatabase
from langchain.agents import initialize_agent,Tool,AgentType,AgentExecutor,create_structured_chat_agent,create_react_agent,create_openai_functions_agent
from langchain_core.prompts.chat import ChatPromptTemplate,HumanMessagePromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder
from langchain_core.prompts import PromptTemplate
import langchain_core
from typing import Type, Any,List,Union
base_llm = ChatOpenAI(
openai_api_key='xxxxxxxxxxxxx',
openai_api_base='http://192.168.10.14:8000/v1',
model_name='Qwen2-7B',
verbose=True,
temperature=0
)
db = SQLDatabase.from_uri("postgresql://postgres:111111@192.168.10.189:5433/lae")
toolkit = SQLDatabaseToolkit(llm=base_llm, db=db)
tools = toolkit.get_tools()
print(tools)
input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools']
input_types={'chat_history': List[Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}
metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'structured-chat-agent', 'lc_hub_commit_hash': 'ea510f70a5872eb0f41a4e3b7bb004d5711dc127adee08329c664c6c8be5f13c'}
messages=[
SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n "action": "Final Answer",\n "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
]
prompt = ChatPromptTemplate(
input_variables=input_variables,
input_types=input_types,
metadata=metadata,
messages=messages
)
print(prompt)
agent = create_structured_chat_agent(llm=base_llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True,handle_parsing_errors=True)
res = agent_executor.invoke({"input":"武汉有什么好玩的景点"})
print("====== result: ======")
print(res)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment