a
    `g+i                     @  s.  d dl mZ d dlZd dlZd dlmZmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZ d dlZd dlZd dlmZmZ d dlmZ d dlmZmZmZ d dlmZ d dlm Z m!Z!m"Z" d d	l#m$Z$m%Z%m&Z&m'Z' d d
l(m)Z) e*e+Z,dddddddZ-dddddZ.G dd deZ/G dd de/Z0dS )    )annotationsN)AbstractSetAnyAsyncIterator
CollectionDictIteratorListLiteralMappingOptionalSetTupleUnion)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)BaseLLM)
GenerationGenerationChunk	LLMResult)get_pydantic_field_names)_build_model_kwargsfrom_envsecret_from_env)
ConfigDictField	SecretStrmodel_validator)SelfzSet[str]Dict[str, Any]None)keysresponsetoken_usagereturnc                 C  sN   |  |d }|D ]6}||vr0|d | ||< q||  |d | 7  < qdS )zUpdate token usage.usageN)intersection)r!   r"   r#   Z_keys_to_use_key r(   h/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_openai/llms/base.py_update_token_usage%   s
    r*   r   )stream_responser$   c                 C  sR   | d st ddS t | d d d t| d d dd| d d ddd	d
S )z0Convert a stream response to a generation chunk.choices )textr   r.   finish_reasonNlogprobsr/   r0   r.   generation_info)r   dictget)r+   r(   r(   r)   $_stream_response_to_generation_chunk1   s    
r6   c                      sN  e Zd ZU dZedddZded< edddZded< edd	d
Zded< dZ	ded< dZ
ded< dZded< dZded< dZded< dZded< dZded< eedZded< ededddd Zd!ed"< ed#ed$ddd Zd%ed&< ed'ed(d)gddd Zd%ed*< eed+dddZd%ed,< d-Zded.< edd/d
Zd0ed1< dZd2ed3< d4Zded5< dZd6ed7< dZd6ed8< d9Zd:ed;< e Z d<ed=< d>Z!d?ed@< dZ"d%edA< dZ#dBedC< dZ$dDedE< dZ%dFedG< dZ&dFedH< dZ'dIedJ< e(ddKZ)e*dLdMe+dddNdOdPZ,e*dQdMdRdSdTdUZ-e.ddSdVdWZ/dddXdYddZd[d\d]Z0dddXd^dd_d[d`daZ1ddbdXdYddcdddedfZ2ddbdXd^ddcdddgdhZ3dddbdXdidjdkdlZ4ddmddbddnd%dcdodpdqZ5e.ddSdrdsZ6e.dtdSdudvZ7e.ddSdwdxZ8ddydz fd{d|Z9e:ddd}d~dZ;e.ddSddZ<dddddZ=  Z>S )
BaseOpenAIz'Base OpenAI large language model class.NT)defaultexcluder   clientasync_clientgpt-3.5-turbo-instructmodel)r8   aliasstr
model_namegffffff?floattemperature   int
max_tokens   top_pr   frequency_penaltypresence_penaltynbest_of)default_factoryr   model_kwargsapi_keyOPENAI_API_KEY)r8   )r>   rL   zOptional[SecretStr]openai_api_keybase_urlZOPENAI_API_BASEzOptional[str]openai_api_baseorganizationZOPENAI_ORG_IDZOPENAI_ORGANIZATIONopenai_organizationZOPENAI_PROXYopenai_proxy   
batch_sizetimeoutz,Union[float, Tuple[float, float], Any, None]request_timeoutzOptional[Dict[str, float]]
logit_bias   max_retrieszOptional[int]seedr0   Fbool	streamingz'Union[Literal['all'], AbstractSet[str]]allowed_specialallz&Union[Literal['all'], Collection[str]]disallowed_specialtiktoken_model_namezUnion[Mapping[str, str], None]default_headersz!Union[Mapping[str, object], None]default_queryzUnion[Any, None]http_clienthttp_async_clientzOptional[Mapping[str, Any]]
extra_body)Zpopulate_by_namebefore)mode)valuesr$   c                 C  s   t | }t||}|S )z>Build extra kwargs from additional params that were passed in.)r   r   )clsrk   Zall_required_field_namesr(   r(   r)   build_extra   s    
zBaseOpenAI.build_extraafterr   r$   c                 C  s   | j dk rtd| jr*| j dkr*td| jrB| jdkrBtd| jrR| j nd| j| j| j| j	| j
| jd}| jsd| ji}tjf i ||j| _| jsd| ji}tjf i ||j| _| S )z?Validate that api key and python package exists in environment.rF   zn must be at least 1.z!Cannot stream results when n > 1.z'Cannot stream results when best_of > 1.N)rN   rS   rQ   rX   r\   rd   re   rf   )rJ   
ValueErrorr_   rK   rP   Zget_secret_valuerT   rR   rY   r\   rd   re   r:   rf   openaiOpenAIZcompletionsr;   rg   ZAsyncOpenAI)selfZclient_paramsZsync_specificZasync_specificr(   r(   r)   validate_environment   s2    



zBaseOpenAI.validate_environmentc                 C  s   | j | j| j| j| j| j| jd}| jdur6| j|d< | jdurJ| j|d< | j	dur^| j	|d< | j
dkrr| j
|d< i || jS )z2Get the default parameters for calling OpenAI API.)rB   rG   rH   rI   rJ   r]   r0   NrZ   rE   rh   rF   rK   )rB   rG   rH   rI   rJ   r]   r0   rZ   rE   rh   rK   rM   )rs   Znormal_paramsr(   r(   r)   _default_params   s"    








zBaseOpenAI._default_paramszOptional[List[str]]z"Optional[CallbackManagerForLLMRun]zIterator[GenerationChunk])promptstoprun_managerkwargsr$   c                 k  s   i | j |ddi}| ||g| | jjf d|i|D ]N}t|tsR| }t|}|r|j|j	|| j
|jr||jd nd d |V  q<d S NstreamTrv   r0   )chunkverboser0   )_invocation_paramsget_sub_promptsr:   create
isinstancer4   
model_dumpr6   on_llm_new_tokenr.   r}   r3   rs   rv   rw   rx   ry   paramsZstream_respr|   r(   r(   r)   _stream   s     

zBaseOpenAI._streamz'Optional[AsyncCallbackManagerForLLMRun]zAsyncIterator[GenerationChunk]c                 K s   i | j |ddi}| ||g| | jjf d|i|I d H 2 z\3 d H W }t|ts`| }t|}|r|j|j	|| j
|jr|jd nd dI d H  |V  qB6 d S rz   )r~   r   r;   r   r   r4   r   r6   r   r.   r}   r3   r   r(   r(   r)   _astream   s(    


zBaseOpenAI._astream	List[str]r   )promptsrw   rx   ry   r$   c                 K  sZ  | j }i ||}| |||}g }i }h d}	d}
|D ]
}| jrt|dkrXtdd}| j|d ||fi |D ]}|du r|}qv||7 }qv|dusJ ||j|jr|j	dnd|jr|j	dndd q8| j
jf d	|i|}t|ts| }|	d
rt|	d
||d  t|	|| |
s8|	d}
q8| j|||||
dS )at  Call out to OpenAI's endpoint with k unique prompts.

        Args:
            prompts: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            The full LLM output.

        Example:
            .. code-block:: python

                response = openai.generate(["Tell me a joke."])
           Zprompt_tokensZcompletion_tokensZtotal_tokensNrF   ,Cannot stream results with multiple prompts.r   r/   r0   r.   r/   r0   rv   errorr,   system_fingerprintr   )r~   r   r_   lenrp   r   appendr.   r3   r5   r:   r   r   r4   r   extendr*   create_llm_resultrs   r   rw   rx   ry   r   sub_promptsr,   r#   Z_keysr   Z_promptsZ
generationr|   r"   r(   r(   r)   	_generate  sN    


zBaseOpenAI._generatec                   s@  | j }i ||}| |||}g }i }h d}	d}
|D ]}| jrt|dkrVtdd}| j|d ||fi |2 z"3 dH W }|du r|}qt||7 }qt6 |dusJ ||j|jr|j	dnd|jr|j	dndd q8| j
jf d	|i|I dH }t|ts| }||d
  t|	|| q8| j|||||
dS )z:Call out to OpenAI's endpoint async with k unique prompts.r   NrF   r   r   r/   r0   r   rv   r,   r   )r~   r   r_   r   rp   r   r   r.   r3   r5   r;   r   r   r4   r   r   r*   r   r   r(   r(   r)   
_ageneratel  sN    

zBaseOpenAI._ageneratezList[List[str]])r   r   rw   r$   c                   sh   |dur||d< |d dkrBt  dkr0td d |d<  fdd	tdt  jD }|S )
z!Get the sub prompts for llm call.Nrw   rE   rF   z7max_tokens set to -1 not supported for multiple inputs.r   c                   s   g | ]} ||j   qS r(   )rW   ).0ir   rs   r(   r)   
<listcomp>  s   z.BaseOpenAI.get_sub_prompts.<locals>.<listcomp>)r   rp   max_tokens_for_promptrangerW   )rs   r   r   rw   r   r(   r   r)   r     s    zBaseOpenAI.get_sub_promptsr   zDict[str, int])r,   r   r   r#   r   r$   c                C  st   g }| d| j}t|D ]4\}}	||| |d |  }
|dd |
D  q|| jd}|rh||d< t||dS )z2Create the LLMResult from the choices and prompts.rJ   rF   c              	   S  s0   g | ](}t |d  t|d|dddqS )r.   r/   r0   r1   r2   )r   r4   r5   )r   choicer(   r(   r)   r     s   z0BaseOpenAI.create_llm_result.<locals>.<listcomp>)r#   r@   r   )generations
llm_output)r5   rJ   	enumerater   r@   r   )rs   r,   r   r   r#   r   r   rJ   r   _Zsub_choicesr   r(   r(   r)   r     s    
zBaseOpenAI.create_llm_resultc                 C  s   | j S )z,Get the parameters used to invoke the model.)ru   rs   r(   r(   r)   r~     s    zBaseOpenAI._invocation_paramszMapping[str, Any]c                 C  s   i d| j i| jS )zGet the identifying parameters.r@   )r@   ru   r   r(   r(   r)   _identifying_params  s    zBaseOpenAI._identifying_paramsc                 C  s   dS )zReturn type of llm.rq   r(   r   r(   r(   r)   	_llm_type  s    zBaseOpenAI._llm_typez	List[int])r.   r$   c                   sz   | j dur|  |S tjd dk r.t |S | jp8| j}zt|}W n t	yd   t
d}Y n0 |j|| j| jdS )z-Get the token IDs using the tiktoken package.NrF      Zcl100k_base)r`   rb   )Zcustom_get_token_idssysversion_infosuperget_num_tokensrc   r@   tiktokenZencoding_for_modelKeyErrorZget_encodingencoder`   rb   )rs   r.   r@   enc	__class__r(   r)   get_token_ids  s    

zBaseOpenAI.get_token_ids)	modelnamer$   c                 C  s   ddddddddddddddddddddddddd	d	d
d
d}d| v rT|  dd } || d}|du rtd|  dd|  |S )ao  Calculate the maximum number of tokens possible to generate for a model.

        Args:
            modelname: The modelname we want to know the context size for.

        Returns:
            The maximum context size

        Example:
            .. code-block:: python

                max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct")
        i  i    i   i   i@  i  i  i  iA  i   )zgpt-4o-minizgpt-4ozgpt-4o-2024-05-13zgpt-4z
gpt-4-0314z
gpt-4-0613z	gpt-4-32kzgpt-4-32k-0314zgpt-4-32k-0613zgpt-3.5-turbozgpt-3.5-turbo-0301zgpt-3.5-turbo-0613zgpt-3.5-turbo-16kzgpt-3.5-turbo-16k-0613r<   ztext-ada-001adaztext-babbage-001Zbabbageztext-curie-001ZcurieZdavinciztext-davinci-003ztext-davinci-002zcode-davinci-002zcode-davinci-001zcode-cushman-002zcode-cushman-001zft-:r   NzUnknown model: z=. Please provide a valid OpenAI model name.Known models are: z, )splitr5   rp   joinr!   )r   Zmodel_token_mappingZcontext_sizer(   r(   r)   modelname_to_contextsize  sN     
z#BaseOpenAI.modelname_to_contextsizec                 C  s   |  | jS )z$Get max context size for this model.)r   r@   r   r(   r(   r)   max_context_size7  s    zBaseOpenAI.max_context_size)rv   r$   c                 C  s   |  |}| j| S )ao  Calculate the maximum number of tokens possible to generate for a prompt.

        Args:
            prompt: The prompt to pass into the model.

        Returns:
            The maximum number of tokens to generate for a prompt.

        Example:
            .. code-block:: python

                max_tokens = openai.max_token_for_prompt("Tell me a joke.")
        )r   r   )rs   rv   Z
num_tokensr(   r(   r)   r   <  s    
z BaseOpenAI.max_tokens_for_prompt)NN)NN)NN)NN)N)?__name__
__module____qualname____doc__r   r:   __annotations__r;   r@   rB   rE   rG   rH   rI   rJ   rK   r4   rM   r   rP   r   rR   rT   rU   rW   rY   rZ   r\   r]   r0   r_   setr`   rb   rc   rd   re   rf   rg   rh   r   Zmodel_configr   classmethodrm   rt   propertyru   r   r   r   r   r   r   r~   r   r   r   staticmethodr   r   r   __classcell__r(   r(   r   r)   r7   @   s   



        S  < ;r7   c                      sr   e Zd ZdZeddddZeddddZed	d fd
dZeddddZ	ed	dddZ
  ZS )rr   u  OpenAI completion model integration.

    Setup:
        Install ``langchain-openai`` and set environment variable ``OPENAI_API_KEY``.

        .. code-block:: bash

            pip install -U langchain-openai
            export OPENAI_API_KEY="your-api-key"

    Key init args — completion params:
        model: str
            Name of OpenAI model to use.
        temperature: float
            Sampling temperature.
        max_tokens: Optional[int]
            Max number of tokens to generate.
        logprobs: Optional[bool]
            Whether to return logprobs.
        stream_options: Dict
            Configure streaming outputs, like whether to return token usage when
            streaming (``{"include_usage": True}``).

    Key init args — client params:
        timeout: Union[float, Tuple[float, float], Any, None]
            Timeout for requests.
        max_retries: int
            Max number of retries.
        api_key: Optional[str]
            OpenAI API key. If not passed in will be read from env var OPENAI_API_KEY.
        base_url: Optional[str]
            Base URL for API requests. Only specify if using a proxy or service
            emulator.
        organization: Optional[str]
            OpenAI organization ID. If not passed in will be read from env
            var OPENAI_ORG_ID.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:
        .. code-block:: python

            from langchain_openai import OpenAI

            llm = OpenAI(
                model="gpt-3.5-turbo-instruct",
                temperature=0,
                max_retries=2,
                # api_key="...",
                # base_url="...",
                # organization="...",
                # other params...
            )

    Invoke:
        .. code-block:: python

            input_text = "The meaning of life is "
            llm.invoke(input_text)

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    Stream:
        .. code-block:: python

            for chunk in llm.stream(input_text):
                print(chunk, end="|")

        .. code-block:: none

            a| philosophical| question| that| has| been| debated| by| thinkers| and| scholars| for| centuries|.

        .. code-block:: python

            "".join(llm.stream(input_text))

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    Async:
        .. code-block:: python

            await llm.ainvoke(input_text)

            # stream:
            # async for chunk in (await llm.astream(input_text)):
            #    print(chunk)

            # batch:
            # await llm.abatch([input_text])

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    r   ro   c                 C  s   g dS )z*Get the namespace of the langchain object.)Z	langchainZllmsrq   r(   rl   r(   r(   r)   get_lc_namespace  s    zOpenAI.get_lc_namespacer^   c                 C  s   dS )z9Return whether this model can be serialized by Langchain.Tr(   r   r(   r(   r)   is_lc_serializable  s    zOpenAI.is_lc_serializabler   c                   s   i d| j it jS )Nr=   )r@   r   r~   r   r   r(   r)   r~     s    zOpenAI._invocation_paramszDict[str, str]c                 C  s   ddiS )NrP   rO   r(   r   r(   r(   r)   
lc_secrets  s    zOpenAI.lc_secretsc                 C  s8   i }| j r| j |d< | jr$| j|d< | jr4| j|d< |S )NrR   rT   rU   )rR   rT   rU   )rs   
attributesr(   r(   r)   lc_attributes  s    


zOpenAI.lc_attributes)r   r   r   r   r   r   r   r   r~   r   r   r   r(   r(   r   r)   rr   N  s   drr   )1
__future__r   loggingr   typingr   r   r   r   r   r   r	   r
   r   r   r   r   r   rq   r   Zlangchain_core.callbacksr   r   Z#langchain_core.language_models.llmsr   Zlangchain_core.outputsr   r   r   Zlangchain_core.utilsr   Zlangchain_core.utils.utilsr   r   r   Zpydanticr   r   r   r   Ztyping_extensionsr   	getLoggerr   loggerr*   r6   r7   rr   r(   r(   r(   r)   <module>   s*   <
    