a
    ag                     @   sl   d dl mZ d dlmZ d dlmZ G dd deeeef  ZdZ	ee	g ddZ
d	Zeeg d
dZdS )    )Tuple)BaseOutputParser)PromptTemplatec                   @   s6   e Zd ZU dZdZeed< eeeef dddZ	dS )FinishedOutputParserz4Output parser that checks if the output is finished.FINISHEDfinished_value)textreturnc                 C   s$   |  }| j|v }|| jd|fS )N )stripr   replace)selfr   cleanedfinished r   l/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/chains/flare/prompts.pyparse   s    
zFinishedOutputParser.parseN)
__name__
__module____qualname____doc__r   str__annotations__r   boolr   r   r   r   r   r      s   
r   zRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.

>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response})
user_inputcontextresponse)templateZinput_variablesa&  Given a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:

>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}

The question to which the answer is the term/entity/phrase "{uncertain_span}" is:)r   Zcurrent_responseZuncertain_spanN)typingr   Zlangchain_core.output_parsersr   Zlangchain_core.promptsr   r   r   r   ZPROMPT_TEMPLATEZPROMPTZ"QUESTION_GENERATOR_PROMPT_TEMPLATEZQUESTION_GENERATOR_PROMPTr   r   r   r   <module>   s   
