a
    `g                     @  s  d Z ddlmZ ddlZddlmZ ddlZddlZddl	Z	ddl
Z
ddlmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddlZddlmZ ddlmZmZ ddlmZ ddlm Z! dd	l"m#Z$ dd
l%m&Z& ddl'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z: ddl;m<Z<m=Z=m>Z>m?Z? erfddl@ZAddlBmCZC eAjDZDneZDeEeFZGeeeHgeeH f eeHeHgeeH f f ZIdJddddddddddddddddddZJdKdddddddd!dd"	d#d$ZKdLd%dddddddddddd&dd'd(d)ZLG d*d+ d+e-ZMG d,d dZNdMd-d.d/d0d1dd2d3d4d5ZOd6d-d7d8d9ZPd d:d%d1dd;d<d=d>ZQed?ZRd@dAdBdCdDZSdEdFdGdHdIZTdS )NzV2 Evaluation Interface.    )annotationsN)TYPE_CHECKINGAnyAsyncIterableAsyncIterator	AwaitableCallableDictIterableListOptionalSequenceTypeVarUnioncast)run_helpers)	run_treesschemas)r   )utils)_aiter)
_warn_once)AEVALUATOR_TDATA_TEVALUATOR_TExperimentResultRow_evaluators_include_attachments_ExperimentManagerMixin_extract_feedback_keys_ForwardResults_include_attachments_is_langchain_runnable_load_examples_map_load_experiment
_load_tqdm_load_traces_resolve_data_resolve_evaluators_resolve_experiment
_to_pandas_wrap_summary_evaluators)SUMMARY_EVALUATOR_TEvaluationResultEvaluationResultsRunEvaluator)Runnable   TzNUnion[DATA_T, AsyncIterable[schemas.Example], Iterable[schemas.Example], None]z4Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]]z'Optional[Sequence[SUMMARY_EVALUATOR_T]]Optional[dict]Optional[str]Optional[int]intOptional[langsmith.Client]boolz6Optional[Union[schemas.TracerSession, str, uuid.UUID]]zVUnion[ATARGET_T, AsyncIterable[dict], Runnable, str, uuid.UUID, schemas.TracerSession]r   AsyncExperimentResults)data
evaluatorssummary_evaluatorsmetadataexperiment_prefixdescriptionmax_concurrencynum_repetitionsclientblocking
experimentupload_resultstargetkwargsreturnc                  st  t | ttjtjfr|dkt|| t|t|d}t| rhdt	dd |
 D  d}t|t | ttjfr|| n| j}td| d t| f|||||	|
d	|I d
H S t | tt	frd}t|n|rd| d}t|n|sd}t|nn|r(|r(d| d| }t|nH|s6td td|  d t| |||||||||	|
||dI d
H S d
S )a%  Evaluate an async target system on a given dataset.

    Args:
        target (AsyncCallable[[dict], dict] | AsyncIterable[dict] | Runnable | EXPERIMENT_T | Tuple[EXPERIMENT_T, EXPERIMENT_T]):
            The target system or experiment(s) to evaluate. Can be an async function
            that takes a dict and returns a dict, a langchain Runnable, an
            existing experiment ID, or a two-tuple of experiment IDs.
        data (Union[DATA_T, AsyncIterable[schemas.Example]]): The dataset to evaluate on. Can be a dataset name, a list of
            examples, an async generator of examples, or an async iterable of examples.
        evaluators (Optional[Sequence[EVALUATOR_T]]): A list of evaluators to run
            on each example. Defaults to None.
        summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): A list of summary
            evaluators to run on the entire dataset. Defaults to None.
        metadata (Optional[dict]): Metadata to attach to the experiment.
            Defaults to None.
        experiment_prefix (Optional[str]): A prefix to provide for your experiment name.
            Defaults to None.
        description (Optional[str]): A description of the experiment.
        max_concurrency (int | None): The maximum number of concurrent
            evaluations to run. If None then no limit is set. If 0 then no concurrency.
            Defaults to 0.
        num_repetitions (int): The number of times to run the evaluation.
            Each item in the dataset will be run and evaluated this many times.
            Defaults to 1.
        client (Optional[langsmith.Client]): The LangSmith client to use.
            Defaults to None.
        blocking (bool): Whether to block until the evaluation is complete.
            Defaults to True.
        experiment (Optional[schemas.TracerSession]): An existing experiment to
            extend. If provided, experiment_prefix is ignored. For advanced
            usage only.
        load_nested: Whether to load all child runs for the experiment.
            Default is to only load the top-level root runs. Should only be specified
            when evaluating an existing experiment.

    Returns:
        AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results.

    Environment:
        - LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to save time and
            cost during testing. Recommended to commit the cache files to your repository
            for faster CI/CD runs.
            Requires the 'langsmith[vcr]' package to be installed.

    Examples:
        >>> from typing import Sequence
        >>> from langsmith import Client, aevaluate
        >>> from langsmith.schemas import Example, Run
        >>> client = Client()
        >>> dataset = client.clone_public_dataset(
        ...     "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
        ... )
        >>> dataset_name = "Evaluate Examples"

        Basic usage:

        >>> def accuracy(run: Run, example: Example):
        ...     # Row-level evaluator for accuracy.
        ...     pred = run.outputs["output"]
        ...     expected = example.outputs["answer"]
        ...     return {"score": expected.lower() == pred.lower()}

        >>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
        ...     # Experiment-level evaluator for precision.
        ...     # TP / (TP + FP)
        ...     predictions = [run.outputs["output"].lower() for run in runs]
        ...     expected = [example.outputs["answer"].lower() for example in examples]
        ...     # yes and no are the only possible answers
        ...     tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
        ...     fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
        ...     return {"score": tp / (tp + fp)}

        >>> import asyncio
        >>> async def apredict(inputs: dict) -> dict:
        ...     # This can be any async function or just an API call to your app.
        ...     await asyncio.sleep(0.1)
        ...     return {"output": "Yes"}
        >>> results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=dataset_name,
        ...         evaluators=[accuracy],
        ...         summary_evaluators=[precision],
        ...         experiment_prefix="My Experiment",
        ...         description="Evaluate the accuracy of the model asynchronously.",
        ...         metadata={
        ...             "my-prompt-version": "abcd-1234",
        ...         },
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...

        Evaluating over only a subset of the examples using an async generator:

        >>> async def example_generator():
        ...     examples = client.list_examples(dataset_name=dataset_name, limit=5)
        ...     for example in examples:
        ...         yield example
        >>> results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=example_generator(),
        ...         evaluators=[accuracy],
        ...         summary_evaluators=[precision],
        ...         experiment_prefix="My Subset Experiment",
        ...         description="Evaluate a subset of examples asynchronously.",
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...

        Streaming each prediction to more easily + eagerly debug.

        >>> results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=dataset_name,
        ...         evaluators=[accuracy],
        ...         summary_evaluators=[precision],
        ...         experiment_prefix="My Streaming Experiment",
        ...         description="Streaming predictions for debugging.",
        ...         blocking=False,
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...

        >>> async def aenumerate(iterable):
        ...     async for elem in iterable:
        ...         print(elem)
        >>> asyncio.run(aenumerate(results))

        Running without concurrency:

        >>> results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=dataset_name,
        ...         evaluators=[accuracy],
        ...         summary_evaluators=[precision],
        ...         experiment_prefix="My Experiment Without Concurrency",
        ...         description="This was run without concurrency.",
        ...         max_concurrency=0,
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...

        Using Async evaluators:

        >>> async def helpfulness(run: Run, example: Example):
        ...     # Row-level evaluator for helpfulness.
        ...     await asyncio.sleep(5)  # Replace with your LLM API call
        ...     return {"score": run.outputs["output"] == "Yes"}

        >>> results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=dataset_name,
        ...         evaluators=[helpfulness],
        ...         summary_evaluators=[precision],
        ...         experiment_prefix="My Helpful Experiment",
        ...         description="Applying async evaluators example.",
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...


    .. versionchanged:: 0.2.0

        'max_concurrency' default updated from None (no limit on concurrency)
        to 0 (no concurrency at all).
    r/   )r>   rA   rB   r;   r7   zReceived invalid arguments. c                 s  s   | ]\}}|r|V  qd S N ).0kvrG   rG   k/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langsmith/evaluation/_arunner.py	<genexpr>      zaevaluate.<locals>.<genexpr>z? should not be specified when target is an existing experiment.z,Running evaluation over existing experiment z...)r8   r9   r:   r=   r?   r@   NzRunning a comparison of two existing experiments asynchronously is not currently supported. Please use the `evaluate()` method instead and make sure that your evaluators are defined as synchronous functions.zReceived unsupported arguments zC. These arguments are not supported when creating a new experiment.zDMust specify 'data' when running evaluations over a target function.zeExpected at most one of 'experiment' or 'experiment_prefix', but both were provided. Got: experiment=z, experiment_prefix=z&'upload_results' parameter is in beta.z&Running evaluation over target system )r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   )
isinstancestruuidUUIDr   TracerSessionr5   anyvaluestupleitems
ValueErroridloggerdebugaevaluate_existinglistr   
_aevaluate)rC   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rD   Zinvalid_argsmsgZ	target_idrG   rG   rK   	aevaluateN   s     @





r_   Fz,Union[str, uuid.UUID, schemas.TracerSession])	r8   r9   r:   r=   r?   load_nestedr@   rA   rE   c                  s   |p
t  }t| tjr| ntt| |I dH }tjt| ||dI dH }	tt	||I dH   fdd|	D }
t
|	|
|||||||d	I dH S )a  Evaluate existing experiment runs asynchronously.

    Args:
        experiment (Union[str, uuid.UUID]): The identifier of the experiment to evaluate.
        evaluators (Optional[Sequence[EVALUATOR_T]]): Optional sequence of evaluators to use for individual run evaluation.
        summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): Optional sequence of evaluators
            to apply over the entire dataset.
        metadata (Optional[dict]): Optional metadata to include in the evaluation results.
        max_concurrency (int | None): The maximum number of concurrent
            evaluations to run. If None then no limit is set. If 0 then no concurrency.
            Defaults to 0.
        client (Optional[langsmith.Client]): Optional Langsmith client to use for evaluation.
        load_nested: Whether to load all child runs for the experiment.
            Default is to only load the top-level root runs.
        blocking (bool): Whether to block until evaluation is complete.

    Returns:
        AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results.

    Examples:
        Define your evaluators

        >>> from typing import Sequence
        >>> from langsmith.schemas import Example, Run
        >>> def accuracy(run: Run, example: Example):
        ...     # Row-level evaluator for accuracy.
        ...     pred = run.outputs["output"]
        ...     expected = example.outputs["answer"]
        ...     return {"score": expected.lower() == pred.lower()}
        >>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
        ...     # Experiment-level evaluator for precision.
        ...     # TP / (TP + FP)
        ...     predictions = [run.outputs["output"].lower() for run in runs]
        ...     expected = [example.outputs["answer"].lower() for example in examples]
        ...     # yes and no are the only possible answers
        ...     tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
        ...     fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
        ...     return {"score": tp / (tp + fp)}

        Load the experiment and run the evaluation.

        >>> from langsmith import aevaluate, aevaluate_existing
        >>> dataset_name = "Evaluate Examples"
        >>> async def apredict(inputs: dict) -> dict:
        ...     # This can be any async function or just an API call to your app.
        ...     await asyncio.sleep(0.1)
        ...     return {"output": "Yes"}
        >>> # First run inference on the dataset
        ... results = asyncio.run(
        ...     aevaluate(
        ...         apredict,
        ...         data=dataset_name,
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...

        Then evaluate the results
        >>> experiment_name = "My Experiment:64e6e91"  # Or manually specify
        >>> results = asyncio.run(
        ...     aevaluate_existing(
        ...         experiment_name,
        ...         evaluators=[accuracy],
        ...         summary_evaluators=[precision],
        ...     )
        ... )  # doctest: +ELLIPSIS
        View the evaluation results for experiment:...


    N)r`   c                   s   g | ]} |j  qS rG   )reference_example_id)rH   runZdata_maprG   rK   
<listcomp>  rM   z&aevaluate_existing.<locals>.<listcomp>)r7   r8   r9   r:   r=   r?   r@   rA   )r   get_cached_clientrN   r   rR   
aitertoolsaio_to_threadr"   r$   r!   r]   )rA   r8   r9   r:   r=   r?   r`   r@   projectrunsr7   rG   rc   rK   r[   T  s*    P
r[   -Union[DATA_T, AsyncIterable[schemas.Example]]zFUnion[ATARGET_T, AsyncIterable[dict], Iterable[schemas.Run], Runnable])r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rE   c                  s  t | p(t| dr"t |  p(t| }|	p4t }	|r>d ntt	t
j | }tt|||	I d H \}}t||	||pv||||t| pt|dk|tt| t|  dk|d
 I d H }td }|d ur| I d H }t|| d }nd }tj||	jgd |rd|r2|jtt| ||dI d H }n|jtt| |dI d H }|r||I d H }n0|r~|j||dI d H }|r||I d H }t |}|
r|! I d H  |W  d    S 1 s0    Y  d S )N	__aiter__r   r/   )	r?   r:   rA   r<   r>   ri   include_attachmentsreuse_attachmentsrB   z.yaml)Zignore_hostsr=   )"asyncioiscoroutinefunctionhasattriscoroutinerk   r    rtre   r   r
   r   Runrf   rg   r'   _AsyncExperimentManagerr   r   r3   astartls_utilsZget_cache_dirget_dataset_idpathlibPathZwith_optional_cacheZapi_url awith_predictions_and_evaluators	ATARGET_Tawith_predictionsawith_summary_evaluatorsawith_evaluatorsr6   wait)rC   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   Zis_async_targetri   Zexperiment_manager	cache_dirZdsid
cache_pathresultsrG   rG   rK   r]     st    




r]   c                      s  e Zd ZdZdTdddd	d
d
dddddddd fddZdddddZddddZddddZddddZd dd!d"Z	d dd#d$Z
dddd%d&ZdUd'd(d)d d*d+d,ZdVd'd(d d-d.d/Zdd0d)d'd d1d2d3Zd4d d5d6d7Zd8dd9d:Zd;dd<d=ZdWd'dd(d>d?d@dAZdXdBd'd8d1dCdDZdBdEdFdEdGdHdIZd4d d5dJdKZdddLdMZdNddOdPZdQddRdSZ  ZS )Yru   a  Manage the execution of experiments asynchronously.

    Supports lazily running predictions and evaluations in parallel to facilitate
    result streaming and early debugging.

    Args:
        data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR
            a generator of examples.
        runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment
            predictions.
        experiment (Optional[schemas.TracerSession]): The tracer session
            associated with the experiment.
        experiment_prefix (Optional[str]): The prefix for the experiment name.
        description (Optional[str]): The description for the experiment.
        metadata (Optional[dict]): Additional metadata for the experiment.
        client (Optional[langsmith.Client]): The Langsmith client used for
             the experiment.
        evaluation_results (Optional[Iterable[EvaluationResults]]): The evaluation
            sresults for the experiment.
        summary_results (Optional[Iterable[EvaluationResults]]): The aggregate results
            for the experiment.
        num_repetitions (Optional[int], default=1): The number of repetitions for
            the experiment.
        include_attachments (Optional[bool], default=False): Whether to include
            attachments. This is used for when we pull the examples for the experiment.
        reuse_attachments (Optional[bool], default=False): Whether to reuse attachments
            from examples. This is True if we need to reuse attachments across multiple
            target/evaluator functions.
        upload_results (Optional[bool], default=True): Whether to upload results
            to Langsmith.
        attachment_raw_data_dict (Optional[dict]): A dictionary to store raw data
            for attachments. Only used if we reuse attachments across multiple
            target/evaluator functions.
    Nr/   FTz+Optional[Union[schemas.TracerSession, str]]r0   zBOptional[Union[Iterable[schemas.Run], AsyncIterable[schemas.Run]]]r4   z*Optional[AsyncIterable[EvaluationResults]]r1   r3   r5   rj   )rA   r:   ri   r?   evaluation_resultssummary_resultsr<   r>   rl   rm   rB   attachment_raw_data_dictr7   c                  sf   t  j||||d || _d | _|d ur2t|nd | _|| _|| _|	| _	|
| _
|| _|| _|| _d S )N)rA   r:   r?   r<   )super__init___data	_examplesrf   ensure_async_iterator_runs_evaluation_results_summary_results_num_repetitionsr   _reuse_attachments_upload_results_attachment_raw_data_dict)selfr7   rA   r:   ri   r?   r   r   r<   r>   rl   rm   rB   r   	__class__rG   rK   r   /  s"    z _AsyncExperimentManager.__init__schemas.Example)examplerE   c                 C  s   t |dr|js|S i }|j D ]^\}}| jdurxt|j| | jv rx|d t| jt|j|  |d d||< q"|||< q"tj	|j|j
|j|j|j|j|j|j|j||j|jdS )a  Reset attachment readers for an example.

        This is only in the case that an attachment is going to be used by more
        than 1 callable (target + evaluators). In that case we keep a single copy
        of the attachment data in self._attachment_raw_data_dict, and create
        readers from that data. This makes it so that we don't have to keep
        copies of the same data in memory, instead we can just create readers
        from the same data.
        attachmentsNpresigned_url	mime_typer   readerr   rX   
created_at
dataset_idinputsoutputsr:   modified_atri   source_run_idr   	_host_url
_tenant_id)rq   r   rV   r   rO   rX   ioBytesIOr   Exampler   r   r   r   r:   r   ri   r   r   r   )r   r   new_attachmentsname
attachmentrG   rG   rK   _reset_example_attachmentsS  s:    

z2_AsyncExperimentManager._reset_example_attachmentsAsyncIterator[schemas.Example]rE   c                   s   j d u rtjjjd_ jrXjd u rXtj \}_ dd |2 I d H _j	dkrdd j 2 I d H  t
 fddtj	D _ tjtj dt d	\_ }|S )
Nr?   rl   c                   sF   i | z<3 d H W }|j pi  D ] \}}t|j| |d  qq6 S )Nr   )r   rV   rO   rX   read)rH   er   valuerG   rG   rK   
<dictcomp>  s   z9_AsyncExperimentManager.aget_examples.<locals>.<dictcomp>r/   c                   s   g | z3 d H W }|q6 S rF   rG   rH   r   rG   rG   rK   rd     rM   z9_AsyncExperimentManager.aget_examples.<locals>.<listcomp>c                   s"   g | ]}t fd d D qS )c                   s   g | ]}  |qS rG   )r   r   r   rG   rK   rd     s   zD_AsyncExperimentManager.aget_examples.<locals>.<listcomp>.<listcomp>)async_iter_from_list)rH   _Zexamples_listr   rG   rK   rd     s   
   lock)r   _aresolve_datar   r?   r   r   r   rf   ateer   async_chain_from_iterableranger   ro   Lock)r   Zexamples_copyZexamples_iterrG   r   rK   aget_examples  s,    

z%_AsyncExperimentManager.aget_examplesrO   c                   sX   | j d u st| j dd sLt|  I d H I d H }|d u rBtdt|jS t| j jS )Nreference_dataset_idz!No examples found in the dataset.)	_experimentgetattrrf   py_anextr   rW   rO   r   r   )r   r   rG   rG   rK   rx     s    
z&_AsyncExperimentManager.get_dataset_idzAsyncIterator[schemas.Run]c                 C sR   | j d u rtdtjt| j dt d\| _ }|2 z3 d H W }|V  q86 d S )NzRuns not loaded yet.r   r   )r   rW   rf   r   r   ro   r   )r   ri   rb   rG   rG   rK   	aget_runs  s    
z!_AsyncExperimentManager.aget_runsz AsyncIterator[EvaluationResults]c                 C st   | j d u r4|  I d H 2 z3 d H W }dg iV  q6 n<tjt| j dt d\| _ }|2 z3 d H W }|V  qZ6 d S )Nr   r   r   )r   r   rf   r   r   ro   r   )r   r   r   resultrG   rG   rK   aget_evaluation_results  s    

z/_AsyncExperimentManager.aget_evaluation_resultsc                   s   zt |  I d H I d H }W n ty8   tdY n0 |sFtd| jrV| |nd }| || | j| j	d< | j
|  I d H || j	| j| j| j| j| j| j| jd
S )Nz\No examples found in the dataset. Please ensure the data provided to aevaluate is not empty.z[No examples found in the dataset.Please ensure the data provided to aevaluate is not empty.r>   )	rA   r:   r?   ri   r   rl   rm   rB   r   )rf   r   r   StopAsyncIterationrW   r   Z_get_projectZ_print_experiment_startr   	_metadatar   r?   r   r   r   r   r   )r   Zfirst_examplerh   rG   rG   rK   rv     s2    
z_AsyncExperimentManager.astartc                 C  s   i }|j pi  D ]b\}}| jd urlt|j| | jv rlt| jt|j|  }|d ||d d||< q|||< qtj|j|j	|j
|j|j|j|j|j|j||j|jdS )Nr   r   r   r   )r   rV   r   rO   rX   r   r   r   r   r   r   r   r   r:   r   ri   r   r   r   )r   r   r   r   r   r   rG   rG   rK   _get_example_with_readers  s8    
z1_AsyncExperimentManager._get_example_with_readersr2   r|   z*Sequence[Union[EVALUATOR_T, AEVALUATOR_T]])r=   rC   r8   rE   c          	        s   t   tds tjdd_ fdd}tj| dd}tj|dt	 d	\}}}t
d
d |2 jjjdd |2 dd |2 jjjd	S )zRun predictions and evaluations in a single pipeline.

        This allows evaluators to process results as soon as they're available from
        the target function, rather than waiting for all predictions to complete first.
        _evaluator_executor   max_workersc                   s^   j td2 zB3 dH W } | d | d  }}j ||dg idjd}|V  q6 dS )zCreate a single task per example.

            That task is to run the target function and all the evaluators
            sequentially.
            r=   rl   Nr   rb   r   rb   r   r   executor)	_apredictr   _arun_evaluatorsr   )predr   rb   r   r8   r=   r   rC   rG   rK   process_examples  s    	zR_AsyncExperimentManager.awith_predictions_and_evaluators.<locals>.process_examplesMbP?Z_eager_consumption_timeout   r   c                 S s    | z3 d H W }|d V  q6 d S Nr   rG   rH   r   rG   rG   rK   rL   3  rM   zK_AsyncExperimentManager.awith_predictions_and_evaluators.<locals>.<genexpr>c                 S s    | z3 d H W }|d V  q6 d S Nrb   rG   r   rG   rG   rK   rL   7  rM   c                 S s    | z3 d H W }|d V  q6 d S Nr   rG   r   rG   rG   rK   rL   8  rM   rA   r:   r?   ri   r   r   rl   rB   )r&   rq   cfThreadPoolExecutorr   rf   aiter_with_concurrencyr   ro   r   ru   r   r   r?   r   r   r   )	r   rC   r8   r=   r   experiment_resultsr1r2r3rG   r   rK   r{     s*    
z8_AsyncExperimentManager.awith_predictions_and_evaluators)r=   rC   rE   c             	     s`   | j ||t|d}tj|dt d\}}tdd |2 | j| j| j	dd |2 | j| j
dS )Nr   r   r   c                 S s    | z3 d H W }|d V  q6 d S r   rG   rH   r   rG   rG   rK   rL   K  rM   z<_AsyncExperimentManager.awith_predictions.<locals>.<genexpr>c                 S s    | z3 d H W }|d V  q6 d S r   rG   r   rG   rG   rK   rL   O  rM   )rA   r:   r?   ri   rl   rB   )r   r   rf   r   ro   r   ru   r   r   r?   r   )r   rC   r=   Z_experiment_resultsr   r   rG   rG   rK   r}   >  s    z)_AsyncExperimentManager.awith_predictionsrn   )r8   r=   rE   c                  st   t |}| j||d}tj|dt d\}}}tdd |2 | j| j| j	dd |2 dd |2 | j
| j| jd	S )	Nrn   r   r   c                 S s    | z3 d H W }|d V  q6 d S r   rG   r   rG   rG   rK   rL   ^  rM   z;_AsyncExperimentManager.awith_evaluators.<locals>.<genexpr>c                 S s    | z3 d H W }|d V  q6 d S r   rG   r   rG   rG   rK   rL   b  rM   c                 S s    | z3 d H W }|d V  q6 d S r   rG   r   rG   rG   rK   rL   c  rM   r   )r&   _ascorerf   r   ro   r   ru   r   r   r?   r   r   r   )r   r8   r=   r   r   r   r   rG   rG   rK   r   T  s    z(_AsyncExperimentManager.awith_evaluatorszSequence[SUMMARY_EVALUATOR_T])r9   rE   c                   sF   t |}| |}t|  I d H | j| j| j|  | j|| j	| j
d	S )Nr   )r)   _aapply_summary_evaluatorsru   r   r   r   r?   r   r   r   r   )r   r9   Zwrapped_evaluatorsZaggregate_feedback_genrG   rG   rK   r~   i  s    
z0_AsyncExperimentManager.awith_summary_evaluators"AsyncIterator[ExperimentResultRow]c                 C sJ   t |  |  I d H |  2 z"3 d H W \}}}t|||dV  q 6 d S )Nr   )rf   	async_zipr   r   r   r   )r   rb   r   r   rG   rG   rK   aget_results{  s    z$_AsyncExperimentManager.aget_resultszDict[str, List[dict]]c                   s,   | j d u rdg iS ddd | j 2 I d H iS )Nr   c                   s(   g | z3 d H W }|d D ]}|qq6 S )Nr   rG   )rH   r   resrG   rG   rK   rd     s   z?_AsyncExperimentManager.aget_summary_scores.<locals>.<listcomp>)r   r   rG   rG   rK   aget_summary_scores  s    

z+_AsyncExperimentManager.aget_summary_scoreszAsyncIterator[_ForwardResults])r=   rl   rC   rE   c                 sR   t |  fdd}tj|| dd2 z3 d H W }|V  q*6  I d H  d S )Nc                   sB     I d H 2 z,3 d H W } t | jjjV  q6 d S rF   )r   	_aforwardr   experiment_namer   r?   )r   fnrl   r   rG   rK   predict_all  s    z6_AsyncExperimentManager._apredict.<locals>.predict_allr   r   )_ensure_async_traceablerf   r   _aend)r   rC   r=   rl   r   r   rG   r   rK   r     s    
z!_AsyncExperimentManager._apredictzSequence[RunEvaluator]c                  sh   t jddH fdd}tj|| dd2 z3 d H W }|V  q06 W d    n1 sZ0    Y  d S )Nr   r   c                   s.     2 z3 d H W } j | dV  q6 d S )Nr   )r   r   )current_resultsr8   r   r   rG   rK   	score_all  s    z2_AsyncExperimentManager._ascore.<locals>.score_allr   r   )r   r   rf   r   )r   r8   r=   r   r   rG   r   rK   r     s    z_AsyncExperimentManager._ascorer   zcf.ThreadPoolExecutor)r8   r   r   rE   c              	     s   t  }i |d pi dji}t jf i i |d|js>dndjd |d |d  |d	 } fd
d}g }|D ]}	|||	I d H  q|D ]}
|
d ur|d |
 qt |dW  d    S 1 s0    Y  d S )Nr:   rA   r8   localTproject_namer:   enabledr?   rb   r   r   c                   s(  zD| j dI d H }j|}jr@jj|d |W S  ty"   zzXt| }t fdd|D d}j|}jrjj|d |W W  Y d   S  ty } zt	
d|  W Y d }~n
d }~0 0 t	jdt|  dj d	t  d
d W Y d   n
d   0 0 d S )Nrb   r   )rb   Z	_executorc                   s&   g | ]}t |jt d didqS )errorT)keyr   commentextra)r+   rX   repr)rH   r  )r   rb   rG   rK   rd     s   z[_AsyncExperimentManager._arun_evaluators.<locals>._run_single_evaluator.<locals>.<listcomp>)r   zError parsing feedback keys: zError running evaluator z on run : Texc_info)Zaevaluate_runr   r?   _select_eval_resultsr   Z_log_evaluation_feedback	Exceptionr   r,   rY   rZ   r   r  rX   )	evaluatorZevaluator_responseZselected_resultsZfeedback_keysZerror_responsee2r   r   rb   r   )r   rK   _run_single_evaluator  sP    zG_AsyncExperimentManager._arun_evaluators.<locals>._run_single_evaluatorr   r   )	rhget_tracing_contextr   tracing_contextr   r?   appendextendr   )r   r8   r   r   current_contextr:   Zeval_resultsr  Zall_resultsr
  r   rG   r  rK   r     s>    
	/z(_AsyncExperimentManager._arun_evaluatorsc                 C s  g g  }}t |  I d H }t |  |2 z$3 d H W \}}|| || q.6 g }| jrj|  jnd }t	
 }	i |	d pi | j|d}
t	jf i i |	d|
| jsdnd| jd |D ]}z|||}| jj||jd}|| | jrH|D ]H}|jdhd	}|d
d }t j| jjfi |d ||dI d H   qW q ty } z*tjdt| d| dd W Y d }~qd }~0 0 qW d    n1 s0    Y  d|iV  d S )Nr:   )rA   Zexperiment_idr8   r   Tr   )fn_nameZtarget_run_id)excludeevaluator_info)Zrun_id
project_idZsource_infoz Error running summary evaluator r  r  r   )rf   r   r   r   r   r  r   Z_get_experimentrX   r  r  r   r  r?   r  __name__r  dictpoprg   Zcreate_feedbackr	  rY   r   r  )r   r9   ri   examplesZasync_examplesrb   r   Zaggregate_feedbackr  r  r:   r
  Zsummary_eval_resultZflattened_resultsr   Zfeedbackr  r   rG   rG   rK   r     sl    


	

>z2_AsyncExperimentManager._aapply_summary_evaluatorsc                   sT   g }|   I d H 2 z3 d H W }|jr||j q6 |r@t|nd }|rP| S d S rF   )r   r   r  max	isoformat)r   r   r   Zmax_modified_atrG   rG   rK   _get_dataset_versionL  s    z,_AsyncExperimentManager._get_dataset_versionzOptional[list[str]]c                   s~   t  }|  I d H 2 z^3 d H W }|jrh|jdrht|jd trh|jd D ]}t|trL|| qLq|d q6 t|S )NZdataset_splitbase)setr   r:   getrN   r\   rO   add)r   Zsplitsr   splitrG   rG   rK   _get_dataset_splitsW  s    

z+_AsyncExperimentManager._get_dataset_splitsNonec                   sl   | j s
d S | j}|d u r td|  }|  I d H |d< |  I d H |d< | jj|ji |j	|d d S )NzExperiment not started yet.Zdataset_versionZdataset_splits)r:   )
r   r   rW   Z_get_experiment_metadatar  r$  r?   Zupdate_projectrX   r:   )r   rA   Zproject_metadatarG   rG   rK   r   g  s     z_AsyncExperimentManager._aend)NNNNNNNr/   FFTN)N)N)NF)N)r  
__module____qualname____doc__r   r   r   rx   r   r   rv   r   r{   r}   r   r~   r   r   r   r   r   r   r  r$  r   __classcell__rG   rG   r   rK   ru     sP   '            ,$-!
	& C 
   T7ru   c                   @  s   e Zd ZddddZeddddZd	dd
dZddddZdddddZd#ddddddZ	ddddZ
ddddZdddd Zddd!d"ZdS )$r6   ru   )experiment_managerc                 C  s4   || _ g | _t | _t| | j | _d| _d S )Nr   )	_manager_resultsro   r   _lockcreate_task_process_data_task_processed_count)r   r*  rG   rG   rK   r   {  s
    
zAsyncExperimentResults.__init__rO   r   c                 C  s   | j jS rF   )r+  r   r   rG   rG   rK   r     s    z&AsyncExperimentResults.experiment_namer   c                 C  s   | S rF   rG   r   rG   rG   rK   rk     s    z AsyncExperimentResults.__aiter__r   c              	     s   ddd fdd} j 4 I d H f  jt jk rf j j }  jd7  _|W  d   I d H  S  j rttW d   I d H  q1 I d H s0    Y  ttj	|t jd dI d H  qd S )Nr3   r%  )indexrE   c                   s     j | k rtdI d H  q d S )Ng?)r1  ro   sleep)r2  r   rG   rK   _wait_until_index  s    
z;AsyncExperimentResults.__anext__.<locals>._wait_until_indexr/   )timeout)
r-  r1  lenr,  r0  doner   ro   shieldwait_for)r   r4  r   rG   r   rK   	__anext__  s    
.z AsyncExperimentResults.__anext__r%  )r   rE   c              
     s   t  }|| 2 zR3 d H W }| j4 I d H " | j| W d   I d H  q1 I d H sZ0    Y  q6 | I d H }| j4 I d H  || _W d   I d H  q1 I d H s0    Y  d S rF   )r#   r   r-  r,  r  r   r   )r   r   ZtqdmitemZsummary_scoresrG   rG   rK   r/    s    :z$AsyncExperimentResults._process_datar   Nr2   	DataFrame)startendrE   c                 C  s   t | j||dS )N)r=  r>  )r(   r,  )r   r=  r>  rG   rG   rK   	to_pandas  s    z AsyncExperimentResults.to_pandasc                 C  s:   dd l }| jr.|jdr.| dd}| S |  S d S )Nr   pandas   )importlib.utilr,  util	find_specr?  _repr_html___repr__)r   	importlibZdfrG   rG   rK   rE    s
    z"AsyncExperimentResults._repr_html_r3   c                 C  s
   t | jS rF   )r6  r,  r   rG   rG   rK   __len__  s    zAsyncExperimentResults.__len__c                 C  s   d| j  dS )Nz<AsyncExperimentResults >)r   r   rG   rG   rK   rF    s    zAsyncExperimentResults.__repr__c                   s   | j I d H  d S rF   )r0  r   rG   rG   rK   r     s    zAsyncExperimentResults.wait)r   N)r  r&  r'  r   propertyr   rk   r:  r/  r?  rE  rH  rF  r   rG   rG   rG   rK   r6   z  s   

 	z,rh.SupportsLangsmithExtra[[dict], Awaitable]r   rO   r  zlangsmith.Clientr   )r   r   r   r:   r?   rl   rE   c           	        s   d  ddd fdd}t jdd zd|r8|j|jfn|jf}| |dt j|j||i |d	|jrj|j n|j i|d
iI d H  W n: t	y } z"t
jd| ddd W Y d }~n
d }~0 0 tttj |dW  d    S 1 s0    Y  d S )Nzrun_trees.RunTreer%  )rrE   c                   s   |  d S rF   rG   )rK  rb   rG   rK   _get_run  s    z_aforward.<locals>._get_runT)r   Zlangsmith_extraZexample_version)ra   Zon_endr   r:   r?   zError running target function: r/   )r  
stacklevelr   )r  r  r   r   ZLangSmithExtrarX   r   r  r   r	  rY   r   r   r   r   rt   )	r   r   r   r:   r?   rl   rM  argsr   rG   rL  rK   r     sD    
r   r|   )rC   rE   c                 C  s\   t | s,t| s,t| r$tdntdt| r:| S t| rH| j} tjdd| S d S )NzTarget must be an async function. For sync functions, use evaluate. Example usage:

async def predict(inputs: dict) -> dict:
    # do work, like chain.invoke(inputs)
    return {...}
await aevaluate(predict, ...)zTarget must be a callable async function. Received a non-callable object. Example usage:

async def predict(inputs: dict) -> dict:
    # do work, like chain.invoke(inputs)
    return {...}
await aevaluate(predict, ...)ZAsyncTarget)r   )	ro   rp   r    callablerW   r  Zis_traceable_functionZainvokeZ	traceable)rC   rG   rG   rK   r     s    	
r   )rl   r   )r7   r?   rl   rE   c                C  s(   t | trt| S tt| ||dS )z*Return the examples for the given dataset.r   )rN   r   rf   r   r%   )r7   r?   rl   rG   rG   rK   r     s
    

r   TzIterable[AsyncIterable[T]]zAsyncIterator[T])iterablerE   c                 C s(   | D ]}|2 z3 dH W }|V  q6 qdS )zChain multiple async iterables.NrG   )rR  Zsub_iterabler;  rG   rG   rK   r     s    r   zList[schemas.Example]zAsyncIterable[schemas.Example])r  rE   c                 C s   | D ]
}|V  qdS )z0Convert a list of examples to an async iterable.NrG   )r  r   rG   rG   rK   r   '  s    r   )NNNNNNr   r/   NTNT)NNNr   NFT)NNNNNNr/   NTNT)F)Ur(  
__future__r   ro   concurrent.futuresfuturesr   r   loggingry   rP   typingr   r   r   r   r   r   r	   r
   r   r   r   r   r   r   Z	langsmithr   r  r   r   rs   r   rw   Zlangsmith._internalr   rf   Z#langsmith._internal._beta_decoratorr   Zlangsmith.evaluation._runnerr   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   Zlangsmith.evaluation.evaluatorr*   r+   r,   r-   r@  pdZlangchain_core.runnablesr.   r<  	getLoggerr  rY   r  r|   r_   r[   r]   ru   r6   r   r   r   rQ  r   r   rG   rG   rG   rK   <module>   s   @T
$            ,          l           *O    sK 0"	