a
    ag4                     @   s2  d Z ddlmZmZmZmZmZmZmZ ddl	m
Z
 ddlmZmZ ddlmZmZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZmZmZ dd
lmZmZ ddl m!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- e
ddddG dd de)Z.eee! eedddZ/dS )zHModule implements an agent that uses OpenAI's APIs function enabled API.    )AnyListOptionalSequenceTupleTypeUnion)
deprecated)AgentActionAgentFinish)BaseCallbackManager	Callbacks)BaseLanguageModel)BaseMessageSystemMessage)BasePromptTemplate)BaseMessagePromptTemplateChatPromptTemplateHumanMessagePromptTemplateMessagesPlaceholder)RunnableRunnablePassthrough)BaseToolconvert_to_openai_function)model_validator)Self)BaseSingleActionAgent"format_to_openai_function_messages) OpenAIFunctionsAgentOutputParserz0.1.0create_openai_functions_agentz1.0)alternativeZremovalc                
   @   sx  e Zd ZU dZeed< ee ed< eed< e	Z
ee	 ed< ee dddZed	d
edddZeee dddZeee dddZd$eeeef  eeeeeef dddZd%eeeef  eeeeef dddZeeeeef  eedddZe e!dddfe"e! e"ee#  e$ddd Z%e dde!ddfeee e"e& e"ee#  e"e! ee'd!d"d#Z(dS )&OpenAIFunctionsAgentaz  An Agent driven by OpenAIs function powered API.

    Args:
        llm: This should be an instance of ChatOpenAI, specifically a model
            that supports using `functions`.
        tools: The tools this agent has access to.
        prompt: The prompt for this agent, should support agent_scratchpad as one
            of the variables. For an easy way to construct this prompt, use
            `OpenAIFunctionsAgent.create_prompt(...)`
        output_parser: The output parser for this agent. Should be an instance of
            OpenAIFunctionsAgentOutputParser.
            Defaults to OpenAIFunctionsAgentOutputParser.
    llmtoolspromptoutput_parser)returnc                 C   s   dd | j D S )zGet allowed tools.c                 S   s   g | ]
}|j qS  )name.0tr)   r)   z/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/agents/openai_functions_agent/base.py
<listcomp><       z:OpenAIFunctionsAgent.get_allowed_tools.<locals>.<listcomp>r%   selfr)   r)   r.   get_allowed_tools:   s    z&OpenAIFunctionsAgent.get_allowed_toolsafter)modec                 C   s$   | j }d|jvr td|j | S )zValidate prompt.

        Args:
            values: Values to validate.

        Returns:
            Validated values.

        Raises:
            ValueError: If `agent_scratchpad` is not in the prompt.
        agent_scratchpadzE`agent_scratchpad` should be one of the variables in the prompt, got )r&   input_variables
ValueError)r3   r&   r)   r)   r.   validate_prompt>   s    
z$OpenAIFunctionsAgent.validate_promptc                 C   s   dgS )z0Get input keys. Input refers to user input here.inputr)   r2   r)   r)   r.   
input_keysS   s    zOpenAIFunctionsAgent.input_keysc                 C   s   dd | j D S )zGet functions.c                 S   s   g | ]}t t|qS r)   )dictr   r+   r)   r)   r.   r/   \   r0   z2OpenAIFunctionsAgent.functions.<locals>.<listcomp>r1   r2   r)   r)   r.   	functionsX   s    zOpenAIFunctionsAgent.functionsNT)intermediate_steps	callbackswith_functionskwargsr(   c                    s   t |} fdd| jjD }tf i |d|i}| jjf i |}| }	|rh| jj|	| j|d}
n| jj|	|d}
| j	
|
}|S )a
  Given input, decided what to do.

        Args:
            intermediate_steps: Steps the LLM has taken to date,
                along with observations.
            callbacks: Callbacks to use. Defaults to None.
            with_functions: Whether to use functions. Defaults to True.
            **kwargs: User inputs.

        Returns:
            Action specifying what tool to use.
            If the agent is finished, returns an AgentFinish.
            If the agent is not finished, returns an AgentAction.
        c                    s   i | ]}|d kr| | qS r7   r)   r,   krB   r)   r.   
<dictcomp>t   s   z-OpenAIFunctionsAgent.plan.<locals>.<dictcomp>r7   r>   r@   )r@   )r   r&   r8   r=   format_promptto_messagesr$   Zpredict_messagesr>   r'   _parse_ai_message)r3   r?   r@   rA   rB   r7   selected_inputsfull_inputsr&   messagespredicted_messageagent_decisionr)   rF   r.   plan^   s&    
zOpenAIFunctionsAgent.plan)r?   r@   rB   r(   c                    sx   t |} fdd| jjD }tf i |d|i}| jjf i |}| }| jj|| j|dI dH }	| j	
|	}
|
S )a  Async given input, decided what to do.

        Args:
            intermediate_steps: Steps the LLM has taken to date,
                along with observations.
            callbacks: Callbacks to use. Defaults to None.
            **kwargs: User inputs.

        Returns:
            Action specifying what tool to use.
            If the agent is finished, returns an AgentFinish.
            If the agent is not finished, returns an AgentAction.
        c                    s   i | ]}|d kr| | qS rC   r)   rD   rF   r)   r.   rG      s   z.OpenAIFunctionsAgent.aplan.<locals>.<dictcomp>r7   rH   N)r   r&   r8   r=   rI   rJ   r$   Zapredict_messagesr>   r'   rK   )r3   r?   r@   rB   r7   rL   rM   r&   rN   rO   rP   r)   rF   r.   aplan   s    
zOpenAIFunctionsAgent.aplan)early_stopping_methodr?   rB   r(   c                 K   sd   |dkrt ddidS |dkrR| j|fddi|}t|t rB|S td| ntd	| d
S )a  Return response when agent has been stopped due to max iterations.

        Args:
            early_stopping_method: The early stopping method to use.
            intermediate_steps: Intermediate steps.
            **kwargs: User inputs.

        Returns:
            AgentFinish.

        Raises:
            ValueError: If `early_stopping_method` is not `force` or `generate`.
            ValueError: If `agent_decision` is not an AgentAction.
        forceoutputz3Agent stopped due to iteration limit or time limit. generaterA   Fz,got AgentAction with no functions provided: zBearly_stopping_method should be one of `force` or `generate`, got N)r   rQ   
isinstancer9   )r3   rS   r?   rB   rP   r)   r)   r.   return_stopped_response   s,    
z,OpenAIFunctionsAgent.return_stopped_responsezYou are a helpful AI assistant.)content)system_messageextra_prompt_messagesr(   c                 C   sD   |pg }|r|g}ng }| g |tdtdd t|dS )a  Create prompt for this agent.

        Args:
            system_message: Message to use as the system message that will be the
                first in the prompt.
            extra_prompt_messages: Prompt messages that will be placed between the
                system message and the new human input.

        Returns:
            A prompt template to pass into this agent.
        z{input}r7   )Zvariable_name)rN   )extendr   Zfrom_templater   r   )clsr[   r\   Z_promptsrN   r)   r)   r.   create_prompt   s    z"OpenAIFunctionsAgent.create_prompt)r$   r%   callback_managerr\   r[   rB   r(   c                 K   s&   | j ||d}| f ||||d|S )a  Construct an agent from an LLM and tools.

        Args:
            llm: The LLM to use as the agent.
            tools: The tools to use.
            callback_manager: The callback manager to use. Defaults to None.
            extra_prompt_messages: Extra prompt messages to use. Defaults to None.
            system_message: The system message to use.
                Defaults to a default system message.
            kwargs: Additional parameters to pass to the agent.
        )r\   r[   )r$   r&   r%   r`   )r_   )r^   r$   r%   r`   r\   r[   rB   r&   r)   r)   r.   from_llm_and_tools   s    z'OpenAIFunctionsAgent.from_llm_and_tools)NT)N))__name__
__module____qualname____doc__r   __annotations__r   r   r   r    r'   r   r   strr4   r   r   r:   propertyr<   r=   r>   r   r
   r   boolr   r   r   rQ   rR   rY   classmethodr   r   r   r   r_   r   r   ra   r)   r)   r)   r.   r#   #   sv   
  
- 
"*
"
r#   )r$   r%   r&   r(   c                 C   s^   d|j t|j vr&td|j  d| jdd |D d}tjdd d	|B |B t B }|S )
a	  Create an agent that uses OpenAI function calling.

    Args:
        llm: LLM to use as the agent. Should work with OpenAI function calling,
            so either be an OpenAI model that supports that or a wrapper of
            a different model that adds in equivalent support.
        tools: Tools this agent has access to.
        prompt: The prompt to use. See Prompt section below for more.

    Returns:
        A Runnable sequence representing an agent. It takes as input all the same input
            variables as the prompt passed in does. It returns as output either an
            AgentAction or AgentFinish.

    Raises:
        ValueError: If `agent_scratchpad` is not in the prompt.

    Example:

        Creating an agent with no memory

        .. code-block:: python

            from langchain_community.chat_models import ChatOpenAI
            from langchain.agents import AgentExecutor, create_openai_functions_agent
            from langchain import hub

            prompt = hub.pull("hwchase17/openai-functions-agent")
            model = ChatOpenAI()
            tools = ...

            agent = create_openai_functions_agent(model, tools, prompt)
            agent_executor = AgentExecutor(agent=agent, tools=tools)

            agent_executor.invoke({"input": "hi"})

            # Using with chat history
            from langchain_core.messages import AIMessage, HumanMessage
            agent_executor.invoke(
                {
                    "input": "what's my name?",
                    "chat_history": [
                        HumanMessage(content="hi! my name is bob"),
                        AIMessage(content="Hello Bob! How can I assist you today?"),
                    ],
                }
            )

    Prompt:

        The agent prompt must have an `agent_scratchpad` key that is a
            ``MessagesPlaceholder``. Intermediate agent actions and tool output
            messages will be passed in here.

        Here's an example:

        .. code-block:: python

            from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

            prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are a helpful assistant"),
                    MessagesPlaceholder("chat_history", optional=True),
                    ("human", "{input}"),
                    MessagesPlaceholder("agent_scratchpad"),
                ]
            )
    r7   zLPrompt must have input variable `agent_scratchpad`, but wasn't found. Found z	 instead.c                 S   s   g | ]}t |qS r)   r   r+   r)   r)   r.   r/   h  r0   z1create_openai_functions_agent.<locals>.<listcomp>)r>   c                 S   s   t | d S )Nr?   r   )xr)   r)   r.   <lambda>k  s   z/create_openai_functions_agent.<locals>.<lambda>rC   )r8   listZpartial_variablesr9   bindr   Zassignr    )r$   r%   r&   Zllm_with_toolsZagentr)   r)   r.   r!     s(    H
N)0re   typingr   r   r   r   r   r   r   Zlangchain_core._apir	   Zlangchain_core.agentsr
   r   Zlangchain_core.callbacksr   r   Zlangchain_core.language_modelsr   Zlangchain_core.messagesr   r   Zlangchain_core.promptsr   Zlangchain_core.prompts.chatr   r   r   r   Zlangchain_core.runnablesr   r   Zlangchain_core.toolsr   Z%langchain_core.utils.function_callingr   Zpydanticr   Ztyping_extensionsr   Zlangchain.agentsr   Z3langchain.agents.format_scratchpad.openai_functionsr   Z0langchain.agents.output_parsers.openai_functionsr    r#   r!   r)   r)   r)   r.   <module>   s,   $ w
