from langchain_core.tools import tool,BaseTool
from pydantic import BaseModel, Field
from typing import Type, Any,List,Union
from langchain_openai import ChatOpenAI
from langchain.agents import initialize_agent,Tool,AgentType,AgentExecutor,create_structured_chat_agent,create_react_agent,create_openai_functions_agent
from langchain_core.prompts.chat import ChatPromptTemplate,HumanMessagePromptTemplate,SystemMessagePromptTemplate,MessagesPlaceholder
from langchain_core.prompts import PromptTemplate
import langchain_core
import random

class IssuanceArgs(BaseModel):
    location: str = Field(description="行政区名称")
    type: list = Field(description="问题类型,如水文,气象,地质等")


class GeoQuery(BaseTool):
    name = "geo_query"
    description = """你是一个区(县)级的水文气象地质知识库,当咨询区(县)的水文气象地质等相关信息的时候,你可以提供数据和资料。注意,这个查询只能获取单个区(县)的水文气象地质等相关信息"""
    args_schema: Type[BaseModel] = IssuanceArgs

    def __init__(self):
        super().__init__()

    def _run(self, location: str, type: str) -> str:
        print(location)
        print(type)
        n = random.randint(0,100)
        print(n)
        mock_str = "平均降水量是"+str(n)+"ml,没发生特殊的地质灾害"
        return mock_str


base_llm = ChatOpenAI(
    openai_api_key='xxxxxxxxxxxxx',
    openai_api_base='http://192.168.10.14:8000/v1',
    model_name='Qwen2-7B',
    verbose=True,
    temperature=0
)

tools = [GeoQuery()]

input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools']
input_types={'chat_history': List[Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}
metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'structured-chat-agent', 'lc_hub_commit_hash': 'ea510f70a5872eb0f41a4e3b7bb004d5711dc127adee08329c664c6c8be5f13c'}
messages=[
  SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=['tool_names', 'tools'], template='Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n\n{tools}\n\nUse a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{\n  "action": $TOOL_NAME,\n  "action_input": $INPUT\n}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{\n  "action": "Final Answer",\n  "action_input": "Final response to human"\n}}\n\nBegin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation')),
  MessagesPlaceholder(variable_name='chat_history', optional=True),
  HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['agent_scratchpad', 'input'], template='{input}\n\n{agent_scratchpad}\n (reminder to respond in a JSON blob no matter what)'))
]

prompt = ChatPromptTemplate(
    input_variables=input_variables,
    input_types=input_types,
    metadata=metadata,
    messages=messages
)


agent = create_structured_chat_agent(llm=base_llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True,handle_parsing_errors=True)
res = agent_executor.invoke({"input":"西宁市的平均降水量谁多"})
# res = agent_executor.invoke({"input":"海东市化隆县的降水量是多少"})

print("====== result: ======")
print(res)