a
    dg4                     @   s   d Z ddlmZmZmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZmZ ddlmZ ddlmZ dd	lmZmZmZmZ dd
lmZmZ G dd deZdS )z=Chain for applying self-critique using the SmartGPT workflow.    )AnyDictListOptionalTupleType)BaseLanguageModel)Chain)get_colored_text)	LLMResultPromptValue)CallbackManagerForChainRun)BasePromptTemplate)AIMessagePromptTemplateBaseMessagePromptTemplateChatPromptTemplateHumanMessagePromptTemplate)
ConfigDictmodel_validatorc                   @   s  e Zd ZU dZG dd dZeed< dZeed< dZ	e
e ed< dZe
e ed	< dZe
e ed
< dZe
e ed< dZeed< dZeed< e Zeed< eddZeddeeeef edddZeee dddZeee dddZd6eeef e
e e e!e
ee  f dddZ"d7eeef e
e eeef d d!d"Z#e$eed#d$d%Z%eee e&e' ef  d&d'd(Z(e)dd)d*Z*e)dd+d,Z+e)dd-d.Z,d8e
ee  e
e ee d/d0d1Z-d9e
ee  e
e ed/d2d3Z.d:e
ee  e
e ed/d4d5Z/dS );SmartLLMChaina  Chain for applying self-critique using the SmartGPT workflow.

    See details at https://youtu.be/wVzuvf9D9BU

    A SmartLLMChain is an LLMChain that instead of simply passing the prompt to the LLM
    performs these 3 steps:
    1. Ideate: Pass the user prompt to an ideation LLM n_ideas times,
       each result is an "idea"
    2. Critique: Pass the ideas to a critique LLM which looks for flaws in the ideas
       & picks the best one
    3. Resolve: Pass the critique to a resolver LLM which improves upon the best idea
       & outputs only the (improved version of) the best output

    In total, SmartLLMChain pass will use n_ideas+2 LLM calls

    Note that SmartLLMChain will only improve results (compared to a basic LLMChain),
    when the underlying models have the capability for reflection, which smaller models
    often don't.

    Finally, a SmartLLMChain assumes that each underlying LLM outputs exactly 1 result.
    c                   @   s   e Zd ZU dZeed< g Zee ed< dZeed< e	e
dddZeeef ddd	Zeeef dd
dZeeef dddZdS )z"SmartLLMChain.SmartLLMChainHistory questionideascritiquereturnc                 C   s
   t | jS )N)lenr   self r   s/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_experimental/smart_llm/base.pyn_ideas0   s    z*SmartLLMChain.SmartLLMChainHistory.n_ideasc                 C   s
   d| j iS )Nr   )r   r   r   r   r    ideation_prompt_inputs4   s    z9SmartLLMChain.SmartLLMChainHistory.ideation_prompt_inputsc                 C   s   d| j idd t| jD S )Nr   c                 S   s    i | ]\}}d |d  |qS Zidea_   r   .0iidear   r   r    
<dictcomp>:       zMSmartLLMChain.SmartLLMChainHistory.critique_prompt_inputs.<locals>.<dictcomp>)r   	enumerater   r   r   r   r    critique_prompt_inputs7   s    z9SmartLLMChain.SmartLLMChainHistory.critique_prompt_inputsc                 C   s(   d| j idd t| jD d| jiS )Nr   c                 S   s    i | ]\}}d |d  |qS r#   r   r%   r   r   r    r)   @   r*   zLSmartLLMChain.SmartLLMChainHistory.resolve_prompt_inputs.<locals>.<dictcomp>r   )r   r+   r   r   r   r   r   r    resolve_prompt_inputs=   s    z8SmartLLMChain.SmartLLMChainHistory.resolve_prompt_inputsN)__name__
__module____qualname__r   str__annotations__r   r   r   propertyintr!   r   r   r"   r,   r-   r   r   r   r    SmartLLMChainHistory+   s   
r5   prompt
resolution
output_keyNideation_llmcritique_llmresolver_llmllm   r!   Freturn_intermediate_stepshistoryZforbid)extrabefore)mode)valuesr   c                 C   st   | d}| d}| d}| d}|s8|s8td|sH|sHtd|sX|sXtd|rp|rp|rp|rptd|S )	z$Ensure we have an LLM for each step.r<   r9   r:   r;   zEither ideation_llm or llm needs to be given. Pass llm, if you want to use the same llm for all steps, or pass ideation_llm, critique_llm and resolver_llm if you want to use different llms for each step.zEither critique_llm or llm needs to be given. Pass llm, if you want to use the same llm for all steps, or pass ideation_llm, critique_llm and resolver_llm if you want to use different llms for each step.zEither resolve_llm or llm needs to be given. Pass llm, if you want to use the same llm for all steps, or pass ideation_llm, critique_llm and resolver_llm if you want to use different llms for each step.zLLMs are given for each step (ideation_llm, critique_llm, resolver_llm), but backup LLM (llm) is also given, which would not be used.)get
ValueError)clsrC   r<   r9   r:   r;   r   r   r    validate_inputsY   s*    



zSmartLLMChain.validate_inputsr   c                 C   s   | j jS )zDefines the input keys.)r6   input_variablesr   r   r   r    
input_keys   s    zSmartLLMChain.input_keysc                 C   s   | j rdd| jgS | jgS )zDefines the output keys.r   r   )r>   r8   r   r   r   r    output_keys   s    zSmartLLMChain.output_keys)inputsrun_managerr   c                    s   d}d v r d } fdd| j jD }| j jf i |}t| d}d| }|rh|j|d| jd d v r d |krtd	||fS )
zPrepare prompts from inputs.Nstopc                    s   i | ]}| | qS r   r   )r&   krK   r   r    r)      r*   z.SmartLLMChain.prep_prompts.<locals>.<dictcomp>greenzPrompt after formatting:

endverbosez=If `stop` is present in any inputs, should be present in all.)r6   rH   format_promptr
   	to_stringon_textrT   rE   )r   rK   rL   rM   Zselected_inputsr6   _colored_text_textr   rO   r    prep_prompts   s    zSmartLLMChain.prep_prompts)
input_listrL   r   c                 C   st   | j ||d\}}| | j_| ||}|| j_| ||}|| j_| ||}| j	rjd|d|| j
|iS | j
|iS )N)rL   r   r   )rZ   rV   r?   r   _ideater   	_critiquer   _resolver>   r8   )r   r[   rL   r6   rM   r   r   r7   r   r   r    _call   s    zSmartLLMChain._call)resultstepr   c                 C   sP   t |jdkrtd| dt |jd dkr@td| d|jd d jS )zBetween steps, only the LLM result text is passed, not the LLMResult object.
        This function extracts the text from an LLMResult.r$   z#In SmartLLM the LLM result in step z3 is not exactly 1 element. This should never happenr   zIn SmartLLM the LLM in step zW returned more than 1 output. SmartLLM only works with LLMs returning exactly 1 output.)r   ZgenerationsrE   text)r   r`   ra   r   r   r    _get_text_from_llm_result   s    

z'SmartLLMChain._get_text_from_llm_result)stager   c                 C   s   g }| tdf |dkr|S |g dd t| jD td| j df |dkrZ|S |tdftd	| j d
fg |dkr|S td| dd S )NzkQuestion: {question}
Answer: Let's work this out in a step by step way to be sure we have the right answer:ideationc                 S   s4   g | ],}t d t|d  d t|d  d fqS )Idea r$   z: {idea_})r   r1   )r&   r'   r   r   r    
<listcomp>   s   "z4SmartLLMChain.get_prompt_strings.<locals>.<listcomp>z3You are a researcher tasked with investigating the z response options provided. List the flaws and faulty logic of each answer option. Let's work this out in a step by step way to be sure we have all the errors:r   zCritique: {critique}z7You are a resolver tasked with 1) finding which of the z answer options the researcher thought was  best, 2) improving that answer and 3) printing the answer in full. Don't output anything for step 1 or 2, only the full answer in 3. Let's work this out in a step by step way to be sure we have the right answer:resolvezGstage should be either 'ideation', 'critique' or 'resolve', but it is 'z'. This should never happen.)appendr   extendranger!   r   rE   )r   rd   Zrole_stringsr   r   r    get_prompt_strings   sN    	z SmartLLMChain.get_prompt_stringsc                 C   s   t | dS )Nre   r   Zfrom_stringsrm   r   r   r   r    ideation_prompt   s    zSmartLLMChain.ideation_promptc                 C   s   t | dS )Nr   rn   r   r   r   r    critique_prompt   s    zSmartLLMChain.critique_promptc                 C   s   t | dS )Nri   rn   r   r   r   r    resolve_prompt   s    zSmartLLMChain.resolve_prompt)rM   rL   r   c                    s   j rj nj jf i j |r8| nd r fddtjD }t	|D ]<\}}t
|d}d|d  d| }|rh|j|djd	 qh|S td
dS )z2Generate n_ideas ideas as response to user prompt.Nc                    s&   g | ]}j g d dqS )Zideatera   )rc   generate_prompt)r&   _	callbacksr<   r6   r   rM   r   r    rh     s
   z)SmartLLMChain._ideate.<locals>.<listcomp>bluerf   r$   z:
rQ   rR   &llm is none, which should never happen)r9   r<   ro   rU   r?   r"   Z	get_childrl   r!   r+   r
   rW   rT   rE   )r   rM   rL   r   r'   r(   rX   rY   r   ru   r    r\      s     
zSmartLLMChain._ideatec           	      C   s   | j r| j n| j}|  jf i | j }|r6|jnd}|r| j||g||dd}t	|d}d| }|r|j
|d| jd |S tddS )	zACritique each of the ideas from ideation stage & select best one.Nr   rr   yellowz
Critique:
rQ   rR   rx   )r:   r<   rp   rU   r?   r,   handlersrc   rs   r
   rW   rT   rE   )	r   rM   rL   r<   r6   rv   r   rX   rY   r   r   r    r]     s    
zSmartLLMChain._critiquec           	      C   s   | j r| j n| j}|  jf i | j }|r6|jnd}|r| j||g||dd}t	|d}d| }|r|j
|d| jd |S tddS )	zBImprove upon the best idea as chosen in critique step & return it.Nri   rr   rP   zResolution:
rQ   rR   rx   )r;   r<   rq   rU   r?   r-   rz   rc   rs   r
   rW   rT   rE   )	r   rM   rL   r<   r6   rv   r7   rX   rY   r   r   r    r^   2  s    
zSmartLLMChain._resolve)N)N)NN)NN)NN)0r.   r/   r0   __doc__r5   r   r2   r8   r1   r9   r   r   r:   r;   r<   r!   r4   r>   boolr?   r   Zmodel_configr   classmethodr   r   rG   r3   r   rI   rJ   r   r   r   rZ   r_   r   rc   r   r   rm   r   ro   rp   rq   r\   r]   r^   r   r   r   r    r      sz   
$	 
 

6  
  
  
r   N)r{   typingr   r   r   r   r   r   Zlangchain.base_languager   Zlangchain.chains.baser	   Zlangchain.inputr
   Zlangchain.schemar   r   Z langchain_core.callbacks.managerr   Zlangchain_core.prompts.baser   Zlangchain_core.prompts.chatr   r   r   r   Zpydanticr   r   r   r   r   r   r    <module>   s    