a
    agR                     @  s   d Z ddlmZ ddlmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZ e	ddddG dd deZdS )zCUse a single chain to route an input to one of multiple llm chains.    )annotations)AnyDictListOptional)
deprecated)BaseLanguageModel)PromptTemplate)ConversationChain)Chain)LLMChain)MultiRouteChain)LLMRouterChainRouterOutputParser)MULTI_PROMPT_ROUTER_TEMPLATEz0.2.12z1.0zPlease see migration guide here for recommended implementation: https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/)ZsinceZremovalmessagec                   @  s>   e Zd ZdZeddddZedddd	d
d dddZdS )MultiPromptChaina  A multi-route chain that uses an LLM router chain to choose amongst prompts.

    This class is deprecated. See below for a replacement, which offers several
    benefits, including streaming and batch support.

    Below is an example implementation:

        .. code-block:: python

            from operator import itemgetter
            from typing import Literal

            from langchain_core.output_parsers import StrOutputParser
            from langchain_core.prompts import ChatPromptTemplate
            from langchain_core.runnables import RunnableConfig
            from langchain_openai import ChatOpenAI
            from langgraph.graph import END, START, StateGraph
            from typing_extensions import TypedDict

            llm = ChatOpenAI(model="gpt-4o-mini")

            # Define the prompts we will route to
            prompt_1 = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are an expert on animals."),
                    ("human", "{input}"),
                ]
            )
            prompt_2 = ChatPromptTemplate.from_messages(
                [
                    ("system", "You are an expert on vegetables."),
                    ("human", "{input}"),
                ]
            )

            # Construct the chains we will route to. These format the input query
            # into the respective prompt, run it through a chat model, and cast
            # the result to a string.
            chain_1 = prompt_1 | llm | StrOutputParser()
            chain_2 = prompt_2 | llm | StrOutputParser()


            # Next: define the chain that selects which branch to route to.
            # Here we will take advantage of tool-calling features to force
            # the output to select one of two desired branches.
            route_system = "Route the user's query to either the animal or vegetable expert."
            route_prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", route_system),
                    ("human", "{input}"),
                ]
            )


            # Define schema for output:
            class RouteQuery(TypedDict):
                """Route query to destination expert."""

                destination: Literal["animal", "vegetable"]


            route_chain = route_prompt | llm.with_structured_output(RouteQuery)


            # For LangGraph, we will define the state of the graph to hold the query,
            # destination, and final answer.
            class State(TypedDict):
                query: str
                destination: RouteQuery
                answer: str


            # We define functions for each node, including routing the query:
            async def route_query(state: State, config: RunnableConfig):
                destination = await route_chain.ainvoke(state["query"], config)
                return {"destination": destination}


            # And one node for each prompt
            async def prompt_1(state: State, config: RunnableConfig):
                return {"answer": await chain_1.ainvoke(state["query"], config)}


            async def prompt_2(state: State, config: RunnableConfig):
                return {"answer": await chain_2.ainvoke(state["query"], config)}


            # We then define logic that selects the prompt based on the classification
            def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
                if state["destination"] == "animal":
                    return "prompt_1"
                else:
                    return "prompt_2"


            # Finally, assemble the multi-prompt chain. This is a sequence of two steps:
            # 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
            # alongside the input query.
            # 2) Route the input query to chain_1 or chain_2, based on the
            # selection.
            graph = StateGraph(State)
            graph.add_node("route_query", route_query)
            graph.add_node("prompt_1", prompt_1)
            graph.add_node("prompt_2", prompt_2)

            graph.add_edge(START, "route_query")
            graph.add_conditional_edges("route_query", select_node)
            graph.add_edge("prompt_1", END)
            graph.add_edge("prompt_2", END)
            app = graph.compile()

            result = await app.ainvoke({"query": "what color are carrots"})
            print(result["destination"])
            print(result["answer"])
    z	List[str])returnc                 C  s   dgS )Ntext )selfr   r   r/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/chains/router/multi_prompt.pyoutput_keys   s    zMultiPromptChain.output_keysNr   zList[Dict[str, str]]zOptional[Chain]r   )llmprompt_infosdefault_chainkwargsr   c                 K  s   dd |D }d |}tj|d}t|dgt d}t||}	i }
|D ]6}|d }|d }t|dgd	}t||d
}||
|< qJ|pt|dd}| f |	|
|d|S )zCConvenience constructor for instantiating from destination prompts.c                 S  s"   g | ]}|d   d|d  qS )namez: descriptionr   ).0pr   r   r   
<listcomp>       z1MultiPromptChain.from_prompts.<locals>.<listcomp>
)destinationsinput)templateinput_variablesZoutput_parserr   prompt_template)r&   r'   )r   promptr   )r   Z
output_key)router_chaindestination_chainsr   )	joinr   formatr	   r   r   Zfrom_llmr   r
   )clsr   r   r   r   r$   Zdestinations_strZrouter_templateZrouter_promptr*   r+   Zp_infor   r(   r)   chainZ_default_chainr   r   r   from_prompts   s4    	

zMultiPromptChain.from_prompts)N)__name__
__module____qualname____doc__propertyr   classmethodr0   r   r   r   r   r      s   	t r   N)r4   
__future__r   typingr   r   r   r   Zlangchain_core._apir   Zlangchain_core.language_modelsr   Zlangchain_core.promptsr	   Zlangchain.chainsr
   Zlangchain.chains.baser   Zlangchain.chains.llmr   Zlangchain.chains.router.baser   Z"langchain.chains.router.llm_routerr   r   Z+langchain.chains.router.multi_prompt_promptr   r   r   r   r   r   <module>   s"   