a
    ag                  (   @   s$  U d Z ddlmZmZmZmZmZmZmZ ddl	m
Z
 ddlmZ ddlmZ ddlmZ ddlmZ ddlmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% ddl&m'Z' ddl(m)Z)m*Z*m+Z+ ddl,m-Z-m.Z. ddl/m0Z0m1Z1 e2ee dddZ3e)j4e%e)j5e$e)j6e#e)j7ee)j8e.e)j9ee)j:e-e)j;ee)j<ee)j=ee)j>e1e)j?e0e)j@ee)jAee)jBee)jCee)jDee)jEe!e)jFe'e)jGeiZHee)eee* ee ee+ f f eId< dde)ee
 eeee+f dddZJdddee) ee
 eeK eeeee+f  dddZLdS ) z Loading datasets and evaluators.    )AnyDictListOptionalSequenceTypeUnion)BaseLanguageModel)Chain)TrajectoryEvalChain)PairwiseStringEvalChain)LabeledPairwiseStringEvalChain)CriteriaEvalChainLabeledCriteriaEvalChain)EmbeddingDistanceEvalChain"PairwiseEmbeddingDistanceEvalChain)ExactMatchStringEvaluator)JsonEqualityEvaluatorJsonValidityEvaluator)JsonEditDistanceEvaluator)JsonSchemaEvaluator)ContextQAEvalChainCotQAEvalChainQAEvalChain)RegexMatchStringEvaluator)EvaluatorTypeLLMEvalChainStringEvaluator)LabeledScoreStringEvalChainScoreStringEvalChain)PairwiseStringDistanceEvalChainStringDistanceEvalChain)urireturnc                 C   sL   zddl m} W n ty*   tdY n0 |d|  }dd |d D S )a  Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_.

    Args:
        uri: The uri of the dataset to load.

    Returns:
        A list of dictionaries, each representing a row in the dataset.

    **Prerequisites**

    .. code-block:: shell

        pip install datasets

    Examples
    --------
    .. code-block:: python

        from langchain.evaluation import load_dataset
        ds = load_dataset("llm-math")
    r   )load_datasetzXload_dataset requires the `datasets` package. Please install with `pip install datasets`zLangChainDatasets/c                 S   s   g | ]}|qS  r%   ).0dr%   r%   j/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain/evaluation/loading.py
<listcomp>F       z load_dataset.<locals>.<listcomp>train)Zdatasetsr$   ImportError)r"   r$   Zdatasetr%   r%   r(   r$   '   s    
r$   _EVALUATOR_MAPN)llm)	evaluatorr.   kwargsr#   c                K   s   | t vr$td|  dtt   t |  }t|trzfzddlm} W n> ty   zddl	m} W n ty   tdY n0 Y n0 |p|dddd}W n6 t
y } ztd	| d
|W Y d}~n
d}~0 0 |jf d|i|S |f i |S dS )a<  Load the requested evaluation chain specified by a string.

    Parameters
    ----------
    evaluator : EvaluatorType
        The type of evaluator to load.
    llm : BaseLanguageModel, optional
        The language model to use for evaluation, by default None
    **kwargs : Any
        Additional keyword arguments to pass to the evaluator.

    Returns
    -------
    Chain
        The loaded evaluation chain.

    Examples
    --------
    >>> from langchain.evaluation import load_evaluator, EvaluatorType
    >>> evaluator = load_evaluator(EvaluatorType.QA)
    zUnknown evaluator type: z
Valid types are: r   )
ChatOpenAIzCould not import langchain_openai or fallback onto langchain_community. Please install langchain_openai or specify a language model explicitly. It's recommended to install langchain_openai AND specify a language model explicitly.zgpt-4*   )modelseedZtemperaturezEvaluation with the z requires a language model to function. Failed to create the default 'gpt-4' model. Please manually provide an evaluation LLM or check your openai credentials.Nr.   )r-   
ValueErrorlistkeys
issubclassr   Zlangchain_openair1   r,   Z&langchain_community.chat_models.openai	ExceptionZfrom_llm)r/   r.   r0   Zevaluator_clsr1   er%   r%   r(   load_evaluatorc   s:    



r;   )r.   config)
evaluatorsr.   r<   r0   r#   c                K   sH   g }| D ]:}|r| |i ni }|t|fd|ii || q|S )ae  Load evaluators specified by a list of evaluator types.

    Parameters
    ----------
    evaluators : Sequence[EvaluatorType]
        The list of evaluator types to load.
    llm : BaseLanguageModel, optional
        The language model to use for evaluation, if none is provided, a default
        ChatOpenAI gpt-4 model will be used.
    config : dict, optional
        A dictionary mapping evaluator types to additional keyword arguments,
        by default None
    **kwargs : Any
        Additional keyword arguments to pass to all evaluators.

    Returns
    -------
    List[Chain]
        The loaded evaluators.

    Examples
    --------
    >>> from langchain.evaluation import load_evaluators, EvaluatorType
    >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA]
    >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness")
    r.   )getappendr;   )r=   r.   r<   r0   Zloadedr/   _kwargsr%   r%   r(   load_evaluators   s
    !$rA   )M__doc__typingr   r   r   r   r   r   r   Zlangchain_core.language_modelsr	   Zlangchain.chains.baser
   Z1langchain.evaluation.agents.trajectory_eval_chainr   Zlangchain.evaluation.comparisonr   Z*langchain.evaluation.comparison.eval_chainr   Z(langchain.evaluation.criteria.eval_chainr   r   Z,langchain.evaluation.embedding_distance.baser   r   Z%langchain.evaluation.exact_match.baser   Z!langchain.evaluation.parsing.baser   r   Z*langchain.evaluation.parsing.json_distancer   Z(langchain.evaluation.parsing.json_schemar   Zlangchain.evaluation.qar   r   r   Z%langchain.evaluation.regex_match.baser   Zlangchain.evaluation.schemar   r   r   Z'langchain.evaluation.scoring.eval_chainr   r   Z)langchain.evaluation.string_distance.baser    r!   strr$   ZQAZCOT_QAZ
CONTEXT_QAZPAIRWISE_STRINGZSCORE_STRINGZLABELED_PAIRWISE_STRINGZLABELED_SCORE_STRINGZAGENT_TRAJECTORYZCRITERIAZLABELED_CRITERIAZSTRING_DISTANCEZPAIRWISE_STRING_DISTANCEZEMBEDDING_DISTANCEZPAIRWISE_EMBEDDING_DISTANCEZJSON_VALIDITYZJSON_EQUALITYZJSON_EDIT_DISTANCEZJSON_SCHEMA_VALIDATIONZREGEX_MATCHZEXACT_MATCHr-   __annotations__r;   dictrA   r%   r%   r%   r(   <module>   st   $%
F