a
    ag$                     @  s   d Z ddlmZ ddlmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZ ddddZe	ddddG dd deZdS )zLCombine documents by doing a first pass and then refining on more documents.    )annotations)AnyDictListTuple)
deprecated)	Callbacks)Document)BasePromptTemplateformat_documentPromptTemplate)
ConfigDictFieldmodel_validator)BaseCombineDocumentsChain)LLMChainr   returnc                   C  s   t dgddS )Npage_contentz{page_content})input_variablestemplater    r   r   w/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/chains/combine_documents/refine.py_get_default_document_prompt   s    r   z0.3.1z1.0zThis class is deprecated. Please see the migration guide here for a recommended replacement: https://python.langchain.com/docs/versions/migrating_chains/refine_docs_chain/)ZsinceZremovalmessagec                      s&  e Zd ZU dZded< ded< ded< ded< eedZd	ed
< dZded< e	dd fddZ
edddZeddedddddZeddedddddZd4dd dd!d"d#d$Zd5dd dd!d"d%d&Zddd!d'd(d)Zd*dd+d,d-d.Zddd+d/d0d1Ze	ddd2d3Z  ZS )6RefineDocumentsChaina&	  Combine documents by doing a first pass and then refining on more documents.

    This algorithm first calls `initial_llm_chain` on the first document, passing
    that first document in with the variable name `document_variable_name`, and
    produces a new variable with the variable name `initial_response_name`.

    Then, it loops over every remaining document. This is called the "refine" step.
    It calls `refine_llm_chain`,
    passing in that document with the variable name `document_variable_name`
    as well as the previous response with the variable name `initial_response_name`.

    Example:
        .. code-block:: python

            from langchain.chains import RefineDocumentsChain, LLMChain
            from langchain_core.prompts import PromptTemplate
            from langchain_community.llms import OpenAI

            # This controls how each document will be formatted. Specifically,
            # it will be passed to `format_document` - see that function for more
            # details.
            document_prompt = PromptTemplate(
                input_variables=["page_content"],
                 template="{page_content}"
            )
            document_variable_name = "context"
            llm = OpenAI()
            # The prompt here should take as an input variable the
            # `document_variable_name`
            prompt = PromptTemplate.from_template(
                "Summarize this content: {context}"
            )
            initial_llm_chain = LLMChain(llm=llm, prompt=prompt)
            initial_response_name = "prev_response"
            # The prompt here should take as an input variable the
            # `document_variable_name` as well as `initial_response_name`
            prompt_refine = PromptTemplate.from_template(
                "Here's your first summary: {prev_response}. "
                "Now add to it based on the following context: {context}"
            )
            refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine)
            chain = RefineDocumentsChain(
                initial_llm_chain=initial_llm_chain,
                refine_llm_chain=refine_llm_chain,
                document_prompt=document_prompt,
                document_variable_name=document_variable_name,
                initial_response_name=initial_response_name,
            )
    r   initial_llm_chainrefine_llm_chainstrdocument_variable_nameinitial_response_name)default_factoryr
   document_promptFboolreturn_intermediate_stepsz	List[str]r   c                   s   t  j}| jr|dg }|S )z2Expect input key.

        :meta private:
        intermediate_steps)superoutput_keysr%   )selfZ_output_keys	__class__r   r   r(   d   s    
z RefineDocumentsChain.output_keysTZforbid)Zarbitrary_types_allowedextrabefore)moder   r   )valuesr   c                 C  s   d|v r|d |d< |d= |S )zFor backwards compatibility.Zreturn_refine_stepsr%   r   )clsr/   r   r   r   get_return_intermediate_stepst   s    z2RefineDocumentsChain.get_return_intermediate_stepsc                 C  sp   d|vrt d|d jj}d|vrHt|dkr>|d |d< qlt dn$|d |vrlt d|d  d| |S )	z4Get default document variable name, if not provided.r   z"initial_llm_chain must be providedr       r   zWdocument_variable_name must be provided if there are multiple llm_chain input_variableszdocument_variable_name z- was not found in llm_chain input_variables: )
ValueErrorpromptr   len)r0   r/   Zllm_chain_variablesr   r   r   "get_default_document_variable_name}   s     z7RefineDocumentsChain.get_default_document_variable_nameNzList[Document]r   zTuple[str, dict])docs	callbackskwargsr   c           	      K  s   | j |fi |}| jjf d|i|}|g}|dd D ]<}| ||}i ||}| jjf d|i|}|| q:| ||S )a  Combine by mapping first chain over all, then stuffing into final chain.

        Args:
            docs: List of documents to combine
            callbacks: Callbacks to be passed through
            **kwargs: additional parameters to be passed to LLM calls (like other
                input variables besides the documents)

        Returns:
            The first element returned is the single string output. The second
            element returned is a dictionary of other keys to return.
        r8   r2   N)_construct_initial_inputsr   Zpredict_construct_refine_inputsr   append_construct_result	r)   r7   r8   r9   inputsresrefine_stepsdocbase_inputsr   r   r   combine_docs   s    z!RefineDocumentsChain.combine_docsc           	        s   | j |fi |}| jjf d|i|I dH }|g}|dd D ]B}| ||}i ||}| jjf d|i|I dH }|| q@| ||S )a  Async combine by mapping a first chain over all, then stuffing
         into a final chain.

        Args:
            docs: List of documents to combine
            callbacks: Callbacks to be passed through
            **kwargs: additional parameters to be passed to LLM calls (like other
                input variables besides the documents)

        Returns:
            The first element returned is the single string output. The second
            element returned is a dictionary of other keys to return.
        r8   Nr2   )r:   r   Zapredictr;   r   r<   r=   r>   r   r   r   acombine_docs   s    z"RefineDocumentsChain.acombine_docs)rA   r@   r   c                 C  s   | j rd|i}ni }||fS )Nr&   )r%   )r)   rA   r@   Zextra_return_dictr   r   r   r=      s    
z&RefineDocumentsChain._construct_resultr	   zDict[str, Any])rB   r@   r   c                 C  s   | j t|| j| j|iS )N)r    r   r#   r!   )r)   rB   r@   r   r   r   r;      s    z-RefineDocumentsChain._construct_refine_inputs)r7   r9   r   c                   s\   d|d j i  |d j  fdd| jjD }| j| jjf i |i}i ||}|S )Nr   r   c                   s   i | ]}| | qS r   r   ).0kZ	base_infor   r   
<dictcomp>       zBRefineDocumentsChain._construct_initial_inputs.<locals>.<dictcomp>)r   updatemetadatar#   r   r    format)r)   r7   r9   Zdocument_inforC   r?   r   rH   r   r:      s    z.RefineDocumentsChain._construct_initial_inputsc                 C  s   dS )NZrefine_documents_chainr   )r)   r   r   r   _chain_type   s    z RefineDocumentsChain._chain_type)N)N)__name__
__module____qualname____doc____annotations__r   r   r#   r%   propertyr(   r   Zmodel_configr   classmethodr1   r6   rD   rE   r=   r;   r:   rN   __classcell__r   r   r*   r   r      s<   

2
  r   N)rR   
__future__r   typingr   r   r   r   Zlangchain_core._apir   Zlangchain_core.callbacksr   Zlangchain_core.documentsr	   Zlangchain_core.promptsr
   r   Zlangchain_core.prompts.promptr   Zpydanticr   r   r   Z'langchain.chains.combine_documents.baser   Zlangchain.chains.llmr   r   r   r   r   r   r   <module>   s"   	