a
    aŠÝg  ã                   @   sL   d Z ddlZddlmZmZmZmZ ddlmZ g d¢Z	G dd„ deƒZ
dS )z4Callback Handler streams to stdout on new llm token.é    N)ÚAnyÚDictÚListÚOptional)ÚStreamingStdOutCallbackHandler)ÚFinalZAnswerú:c                       sŽ   e Zd ZdZeddœdd„Zedœdd„Zdd	d
dœee	e  eeddœ‡ fdd„Z
eeef e	e eddœdd„Zeeddœdd„Z‡  ZS )Ú#FinalStreamingStdOutCallbackHandlerz¦Callback handler for streaming in agents.
    Only works with agents using LLMs that support streaming.

    Only the final output of the agent will be streamed.
    N)ÚtokenÚreturnc                 C   sL   | j  |¡ | j | ¡ ¡ t| j ƒt| jƒkrH| j  d¡ | j d¡ d S )Nr   )Úlast_tokensÚappendÚlast_tokens_strippedÚstripÚlenÚanswer_prefix_tokensÚpop)Úselfr
   © r   ú}/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/callbacks/streaming_stdout_final_only.pyÚappend_to_last_tokens   s
    z9FinalStreamingStdOutCallbackHandler.append_to_last_tokens)r   c                 C   s"   | j r| j| jkS | j| jkS d S )N)Ústrip_tokensr   Úanswer_prefix_tokens_strippedr   r   )r   r   r   r   Úcheck_if_answer_reached   s    z;FinalStreamingStdOutCallbackHandler.check_if_answer_reachedTF)r   r   Ústream_prefix)r   r   r   r   c                   sz   t ƒ  ¡  |du rt| _n|| _|r8dd„ | jD ƒ| _n| j| _dgt| jƒ | _dgt| jƒ | _|| _|| _	d| _
dS )aÊ  Instantiate FinalStreamingStdOutCallbackHandler.

        Args:
            answer_prefix_tokens: Token sequence that prefixes the answer.
                Default is ["Final", "Answer", ":"]
            strip_tokens: Ignore white spaces and new lines when comparing
                answer_prefix_tokens to last tokens? (to determine if answer has been
                reached)
            stream_prefix: Should answer prefix itself also be streamed?
        Nc                 S   s   g | ]}|  ¡ ‘qS r   )r   )Ú.0r
   r   r   r   Ú
<listcomp>6   s   z@FinalStreamingStdOutCallbackHandler.__init__.<locals>.<listcomp>Ú F)ÚsuperÚ__init__ÚDEFAULT_ANSWER_PREFIX_TOKENSr   r   r   r   r   r   r   Úanswer_reached)r   r   r   r   ©Ú	__class__r   r   r      s    
ÿ
z,FinalStreamingStdOutCallbackHandler.__init__)Ú
serializedÚpromptsÚkwargsr   c                 K   s
   d| _ dS )zRun when LLM starts running.FN)r!   )r   r$   r%   r&   r   r   r   Úon_llm_startA   s    z0FinalStreamingStdOutCallbackHandler.on_llm_start)r
   r&   r   c                 K   sd   |   |¡ |  ¡ rDd| _| jr@| jD ]}tj |¡ q$tj ¡  dS | jr`tj |¡ tj ¡  dS )z?Run on new LLM token. Only available when streaming is enabled.TN)	r   r   r!   r   r   ÚsysÚstdoutÚwriteÚflush)r   r
   r&   Útr   r   r   Úon_llm_new_tokenG   s    


z4FinalStreamingStdOutCallbackHandler.on_llm_new_token)Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ústrr   Úboolr   r   r   r   r   r   r'   r-   Ú__classcell__r   r   r"   r   r	      s    	û
ú#þr	   )r1   r(   Útypingr   r   r   r   Zlangchain_core.callbacksr   r    r	   r   r   r   r   Ú<module>   s
   