a
    bg                     @   s   d dl Z d dlmZmZmZmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d dlmZmZ d dlmZ e eZG d	d
 d
eZdS )    N)AnyDictListOptional)CallbackManagerForLLMRun)BaseLanguageModel)LLM)	AIMessage)get_from_dict_or_envpre_init)
ConfigDictc                   @   sn   e Zd ZU dZeed< eddZee	e	dddZ
deeee  ee eed	d
dZeedddZdS )OpaquePromptsaj  LLM that uses OpaquePrompts to sanitize prompts.

    Wraps another LLM and sanitizes prompts before passing it to the LLM, then
        de-sanitizes the response.

    To use, you should have the ``opaqueprompts`` python package installed,
    and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
    your API key, or pass it as a named parameter to the constructor.

    Example:
        .. code-block:: python

            from langchain_community.llms import OpaquePrompts
            from langchain_community.chat_models import ChatOpenAI

            op_llm = OpaquePrompts(base_llm=ChatOpenAI())
    base_llmZforbid)extra)valuesreturnc                 C   sZ   zddl }W n ty&   tdY n0 |jdu r:tdt|dddd}|sVtd	|S )
zFValidates that the OpaquePrompts API key and the Python package exist.r   NzhCould not import the `opaqueprompts` Python package, please install it with `pip install opaqueprompts`.zMCould not properly import `opaqueprompts`, opaqueprompts.__package__ is None.Zopaqueprompts_api_keyZOPAQUEPROMPTS_API_KEY )defaultzCould not find OPAQUEPROMPTS_API_KEY in the environment. Please set it to your OpaquePrompts API key.You can get it by creating an account on the OpaquePrompts website: https://opaqueprompts.opaque.co/ .)opaquepromptsImportError__package__
ValueErrorr
   )clsr   opZapi_key r   t/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_community/llms/opaqueprompts.pyvalidate_environment(   s$    

z"OpaquePrompts.validate_environmentN)promptstoprun_managerkwargsr   c                 K   sd   ddl }|pt }||g}|jd }| jj|d|}	t|	t	rN|	j
}	|j|	|jd}
|
jS )aC  Call base LLM with sanitization before and de-sanitization after.

        Args:
            prompt: The prompt to pass into the model.

        Returns:
            The string generated by the model.

        Example:
            .. code-block:: python

                response = op_llm.invoke("Tell me a joke.")
        r   N)r   )secure_context)r   r   Zget_noop_managersanitizeZsanitized_textsr   bindZinvoke
isinstancer	   contentZ
desanitizer!   Zdesanitized_text)selfr   r   r   r    r   Z_run_managerZsanitize_responseZsanitized_prompt_value_strZllm_responseZdesanitize_responser   r   r   _callD   s    

zOpaquePrompts._call)r   c                 C   s   dS )zSReturn type of LLM.

        This is an override of the base class method.
        r   r   )r&   r   r   r   	_llm_typeo   s    zOpaquePrompts._llm_type)NN)__name__
__module____qualname____doc__r   __annotations__r   Zmodel_configr   r   r   strr   r   r   r   r'   propertyr(   r   r   r   r   r      s$   
  
+r   )loggingtypingr   r   r   r   Zlangchain_core.callbacksr   Zlangchain_core.language_modelsr   Z#langchain_core.language_models.llmsr   Zlangchain_core.messagesr	   Zlangchain_core.utilsr
   r   Zpydanticr   	getLoggerr)   loggerr   r   r   r   r   <module>   s   
