a
    dgF                     @   s6   d dl Z d dlmZmZ d dlmZ G dd dZdS )    N)AnyOptional)ModerationPromptSafetyErrorc                   @   sV   e Zd ZdZdeee ee ee ddddZedddZdeeed	d
dZ	dS )ComprehendPromptSafetyz)Class to handle prompt safety moderation.N)clientcallback	unique_idchain_idreturnc                 C   s$   || _ |ddd| _|| _|| _d S )NZPromptSafetyZLABELS_NOT_FOUND)Zmoderation_chain_idZmoderation_typemoderation_status)r   moderation_beaconr   r   )selfr   r   r   r	    r   /var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_experimental/comprehend_moderation/prompt_safety.py__init__   s    zComprehendPromptSafety.__init__)r
   c                 C   s(   | j jj}d}d}d| d| d| S )NZ
comprehendz*document-classifier-endpoint/prompt-safetyzarn:aws::z:aws:)r   metaregion_name)r   r   ZserviceZprompt_safety_endpointr   r   r   _get_arn   s    
zComprehendPromptSafety._get_arn)prompt_valueconfigr
   c                 C   s   | d}d}|  }| jj||d}| jrH| jjrH|| jd< || jd< |d D ]$}|d |krP|d d	krPd
} qvqP| jr| jjr|rd| jd< t	| j
| j| j |rt|S )a  
        Check and validate the safety of the given prompt text.

        Args:
            prompt_value (str): The input text to be checked for unsafe text.
            config (Dict[str, Any]): Configuration settings for prompt safety checks.

        Raises:
            ValueError: If unsafe prompt is found in the prompt text based
            on the specified threshold.

        Returns:
            str: The input prompt_value.

        Note:
            This function checks the safety of the provided prompt text using
            Comprehend's classify_document API and raises an error if unsafe
            text is detected with a score above the specified threshold.

        Example:
            comprehend_client = boto3.client('comprehend')
            prompt_text = "Please tell me your credit card information."
            config = {"threshold": 0.7}
            checked_prompt = check_prompt_safety(comprehend_client, prompt_text, config)
        	thresholdF)TextZEndpointArnZmoderation_inputZmoderation_outputZClassesZScoreNameZUNSAFE_PROMPTTZLABELS_FOUNDr   )getr   r   Zclassify_documentr   Zprompt_safety_callbackr   Zintent_callbackasynciocreate_taskZon_after_intentr   r   )r   r   r   r   Zunsafe_promptZendpoint_arnresponseZclass_resultr   r   r   validate"   s2    





zComprehendPromptSafety.validate)NNN)N)
__name__
__module____qualname____doc__r   r   strr   r   r   r   r   r   r   r   	   s      r   )r   typingr   r   ZGlangchain_experimental.comprehend_moderation.base_moderation_exceptionsr   r   r   r   r   r   <module>   s   