a
    `g                    @  s  d Z ddlmZ ddlZddlZddlZddlZddlZddlZddl	m
Z
 ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddl m!Z! ddl"Z"ddl#Z#dd	l$m%Z% dd
l&m'Z'm(Z( ddl)m*Z* ddl+m,Z,m-Z-m.Z.m/Z/ ddl0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=m>Z>m?Z?m@Z@ ddlAmBZBmCZCmDZD ddlEmFZF ddlGmHZHmIZI ddlJmKZKmLZLmMZMmNZN ddlOmPZPmQZQmRZR ddlSmTZTmUZUmVZVmWZW ddlXmYZY ddlZm[Z[ ddl\m]Z] ddl^m_Z_m`Z` ddlambZbmcZcmdZd ddlemfZfmgZgmhZh ddlimjZjmkZkmlZlmmZmmnZn ddlomjZp ddlqmrZr esetZudddd d!Zvd"d"d#d$d%Zwdd&d'd(d)Zxdd*d+d,d-d.Zyd/d/d/d0d1d2Zzd3d4d5d6d7Z{G d8d9 d9eZ|ed:ejd;Z}eee~ef ee} ef Zeee}f ZG d<d= d=eZG d>d? d?e,ZG d@dA dAeZd"dBdCdDdEZdFd&dGdHdIZdJd&dKdLdMZdNdOdPdQdRZdSdSdSdTdUdVZdNdBdWdXdYZdNdBdWdZd[ZdSdSd\dTd]d^Zdd_d`dadbdcdddeZeWdfdgdhdidjZG dkdl dleZd&dmdndodpZdS )qzOpenAI chat wrapper.    )annotationsN)BytesIO)ceil)
itemgetter)AnyAsyncIteratorCallableDictIteratorListLiteralMappingOptionalSequenceTupleType	TypedDictTypeVarUnioncast)urlparse)
deprecated)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)LanguageModelInput)BaseChatModelLangSmithParamsagenerate_from_streamgenerate_from_stream)	AIMessageAIMessageChunkBaseMessageBaseMessageChunkChatMessageChatMessageChunkFunctionMessageFunctionMessageChunkHumanMessageHumanMessageChunkInvalidToolCallSystemMessageSystemMessageChunkToolCallToolMessageToolMessageChunk)InputTokenDetailsOutputTokenDetailsUsageMetadata)tool_call_chunk)JsonOutputParserPydanticOutputParser)JsonOutputKeyToolsParserPydanticToolsParsermake_invalid_tool_callparse_tool_call)ChatGenerationChatGenerationChunk
ChatResult)RunnableRunnableMapRunnablePassthroughchain)run_in_executor)BaseTool)get_pydantic_field_names)convert_to_openai_functionconvert_to_openai_tool)PydanticBaseModelTypeBaseModelis_basemodel_subclass)_build_model_kwargsfrom_envsecret_from_env)	BaseModel
ConfigDictField	SecretStrmodel_validator)rK   )SelfzMapping[str, Any]r!   )_dictreturnc                 C  s  |  d}|  d}|  d}|dkr<t|  dd||dS |dkr|  ddpTd}i }|  d	 }rtt||d	< g }g }|  d
 }	r|	|d
< |	D ]T}
z|t|
dd W q ty } z |t|
t| W Y d}~qd}~0 0 q|  d }r||d< t||||||dS |dv rR|dkr6d|i}ni }t	|  dd|||dS |dkr~t
|  ddtt|  d|dS |dkri }d| v r| d |d< t|  ddtt|  d|||dS t|  dd||dS dS )zConvert a dictionary to a LangChain message.

    Args:
        _dict: The dictionary.

    Returns:
        The LangChain message.
    rolenameidusercontent )rW   rU   rT   	assistantfunction_call
tool_callsT)Z	return_idNaudio)rW   additional_kwargsrT   rU   r[   invalid_tool_callssystem	developerra   __openai_role__)rW   rT   rU   r]   functionrW   rT   rU   tooltool_call_id)rW   rf   r]   rT   rU   rW   rS   rU   )getr'   dictappendr8   	Exceptionr7   strr   r*   r%   r   r-   r#   )rQ   rS   rT   id_rW   r]   rZ   r[   r^   raw_tool_callsZraw_tool_caller\    rp   o/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_openai/chat_models/base.py_convert_dict_to_messagee   sr    	











rr   r   )rW   rR   c                 C  sR   | rJt | trJg }| D ]0}t |tr<d|v r<|d dkr<qq|| qn| }|S )zFormat message content.typeZtool_use)
isinstancelistri   rj   )rW   Zformatted_contentblockrp   rp   rq   _format_message_content   s    
rw   ri   )messagerR   c                   s  dt | ji}| jp| jd }dur0||d< t| trH| j|d< nt| tr^d|d< nt| t	r^d|d< d| jv r| jd |d< | j
s| jrdd	 | j
D d
d	 | jD  |d< n<d| jv r| jd |d< h dfdd	|d D |d< n d|v sd|v r|d pd|d< d| jv r| jd }d|v rPd| jd d in|}||d< nt| tr~| jdd|d< nbt| trd|d< nLt| trd|d< | j|d< h d  fdd| D }ntd|  |S )zConvert a LangChain message to a dictionary.

    Args:
        message: The LangChain message.

    Returns:
        The dictionary.
    rW   rT   NrS   rV   rY   rZ   c                 S  s   g | ]}t |qS rp   )!_lc_tool_call_to_openai_tool_call.0Ztcrp   rp   rq   
<listcomp>   s   z,_convert_message_to_dict.<locals>.<listcomp>c                 S  s   g | ]}t |qS rp   ))_lc_invalid_tool_call_to_openai_tool_callrz   rp   rp   rq   r|      s   r[   >   rc   rs   rU   c                   s"   g | ]} fd d|  D qS )c                   s   i | ]\}}| v r||qS rp   rp   r{   kvtool_call_supported_propsrp   rq   
<dictcomp>       z7_convert_message_to_dict.<locals>.<listcomp>.<dictcomp>)items)r{   	tool_callr   rp   rq   r|      s   r\   rU   rb   r`   rc   re   rf   >   rf   rS   rW   c                   s   i | ]\}}| v r||qS rp   rp   r~   )supported_propsrp   rq   r      r   z,_convert_message_to_dict.<locals>.<dictcomp>zGot unknown type )rw   rW   rT   r]   rh   rt   r#   rS   r'   r   r[   r^   r*   r%   r-   rf   r   	TypeError)rx   Zmessage_dictrT   Z	raw_audior\   rp   )r   r   rq   _convert_message_to_dict   s^    	










r   zType[BaseMessageChunk]r"   )rQ   default_classrR   c           	      C  s  |  d}tt|  d}tt|  dp*d}i }|  drlt| d }d|v rd|d d u rdd|d< ||d< g }|  d }r||d< zdd	 |D }W n ty   Y n0 |d
ks|tkrt||dS |dks|tkrt||||dS |dv s|tkr |dkrddi}ni }t|||dS |dks4|tkrFt|| d |dS |dksZ|t	krlt	|| d |dS |s||t
krt
|||dS |||dS d S )NrU   rS   rW   rX   rZ   rT   r[   c                 S  s:   g | ]2}t |d  d|d  d|d|d dqS )rc   rT   	argumentsrU   index)rT   argsrU   r   )r2   rh   )r{   Zrtcrp   rp   rq   r|     s   z3_convert_delta_to_message_chunk.<locals>.<listcomp>rV   )rW   rU   rY   )rW   r]   rU   tool_call_chunksr_   ra   rb   )rW   rU   r]   rc   rd   re   rf   )rW   rf   rU   rg   )rh   r   rl   ri   KeyErrorr(   r    r+   r&   r.   r$   )	rQ   r   rm   rS   rW   r]   rZ   r   rn   rp   rp   rq   _convert_delta_to_message_chunk  sV    


	


r   zUnion[int, dict])overall_token_usage	new_usagerR   c                   s   t |tr8t  ts0tdt| dt  |  S t |tr~t  tshtdt| dt   fdd| D S tdt|  |S d S )Nz%Got different types for token usage: z and c                   s$   i | ]\}}|t  |d |qS )r   )_update_token_usagerh   r~   r   rp   rq   r   Q  s   z'_update_token_usage.<locals>.<dictcomp>z!Unexpected type for token usage: )rt   int
ValueErrorrs   ri   r   warningswarn)r   r   rp   r   rq   r   ?  s0    




r   zopenai.BadRequestErrorNone)ro   rR   c                 C  sB   d| j v rd}t| | n d| j v r<d}t| | n d S )NzH'response_format' of type 'json_schema' is not supported with this modelzThis model does not support OpenAI's structured output feature, which is the default method for `with_structured_output` as of langchain-openai==0.3. To use `with_structured_output` with this model, specify `method="function_calling"`.z"Invalid schema for response_formata3  Invalid schema for OpenAI's structured output feature, which is the default method for `with_structured_output` as of langchain-openai==0.3. Specify `method="function_calling"` instead or update your schema. See supported schemas: https://platform.openai.com/docs/guides/structured-outputs#supported-schemas)rx   r   r   )ro   rx   rp   rp   rq   _handle_openai_bad_requestZ  s    


r   c                   @  s   e Zd ZU ded< dS )_FunctionCallrl   rT   N__name__
__module____qualname____annotations__rp   rp   rp   rq   r   t  s   
r   _BM)boundc                   @  s&   e Zd ZU ded< ded< ded< dS )_AllReturnTyper!   rawzOptional[_DictOrPydantic]parsedzOptional[BaseException]parsing_errorNr   rp   rp   rp   rq   r   }  s   
r   c                	      s  e Zd ZU edddZded< edddZded< edddZded< edddZded< ed	d
dZ	ded< dZ
ded< eedZded< ededdddZded< edddZded< edddZded< eeddddZded< edd dZd!ed"< dZd#ed$< dZded%< dZded&< dZd#ed'< dZd(ed)< dZd#ed*< dZd+ed,< d-Zd.ed/< dZd#ed0< dZded1< eddZd#ed2< dZded3< dZ ded4< dZ!d5ed6< dZ"d7ed8< edddZ#d9ed:< edddZ$d9ed;< edd<dZ%d=ed>< dZ&d?ed@< d-Z'd.edA< eddZ(dBedC< e)ddDZ*e+dEdFe,dddGdHdIZ-e+dEdFe,dddGdJdKZ.e+dEdFe,dddGdLdMZ/e+dNdFdOdPdQdRZ0e1ddPdSdTZ2dUdVdWdXdYZ3dVdZd[d\d]d^d_Z4dd`dadbddcdddedfZ5dd`dadbddgdddhdiZ6ddjdkdaddVdldmdnZ7ddod[dgdpdqdrZ8dd`dadsddtdddudvZ9dd`dadsddgdddwdxZ:e1ddPdydzZ;ddaddd{ fd|d}Z<ddadd~d{ddZ=e1ddPddZ>ddPddZ?ddd fddZ@dd`ddd fddZAeBdddddddddd fddZCddddddd(d(ddd fddZDddd-ddddd.d(dddddZEdddddZFdddddZG  ZHS )BaseChatOpenAINT)defaultexcluder   clientasync_clientroot_clientroot_async_clientgpt-3.5-turbomodelr   aliasrl   
model_namezOptional[float]temperature)default_factoryDict[str, Any]model_kwargsapi_keyOPENAI_API_KEY)r   )r   r   zOptional[SecretStr]openai_api_keybase_urlzOptional[str]openai_api_baseorganizationopenai_organizationZOPENAI_PROXYopenai_proxytimeoutz,Union[float, Tuple[float, float], Any, None]request_timeoutOptional[int]max_retriespresence_penaltyfrequency_penaltyseedOptional[bool]logprobstop_logprobszOptional[Dict[int, int]]
logit_biasFbool	streamingntop_p
max_tokensreasoning_efforttiktoken_model_namezUnion[Mapping[str, str], None]default_headersz!Union[Mapping[str, object], None]default_queryzUnion[Any, None]http_clienthttp_async_clientZstop_sequenceszOptional[Union[List[str], str]]stopzOptional[Mapping[str, Any]]
extra_bodyinclude_response_headerszOptional[Dict[str, Any]]disabled_params)Zpopulate_by_namebefore)mode)valuesrR   c                 C  s   t | }t||}|S )z>Build extra kwargs from additional params that were passed in.)rB   rH   )clsr   Zall_required_field_namesrp   rp   rq   build_extra  s    
zBaseChatOpenAI.build_extrac                 C  s6   | dp| dpd}|dr2d|vr2d|d< |S )z-Currently o1 models only allow temperature=1.r   r   rX   o1r      )rh   
startswithr   r   r   rp   rp   rq   validate_temperature  s    z#BaseChatOpenAI.validate_temperaturec                 C  s:   | dp| dpd}|dkr6| ddu r6d|d< |S )zDisable streaming if n > 1.r   r   rX   r   Zdisable_streamingNT)rh   r   rp   rp   rq   validate_disable_streaming  s    z)BaseChatOpenAI.validate_disable_streamingafterrP   rR   c           	   
   C  s  | j dur| j dk rtdn"| j dur@| j dkr@| jr@td| jpXtdpXtd| _| jpjtd| _| jr~| j nd| j| j| j	| j
| jd}| jdur| j|d	< | jr| js| jr| j}| j}| j}td
|d|d|| js~| jrR| jsRzddl}W n0 ty@ } ztd|W Y d}~n
d}~0 0 |j| jd| _d| ji}tjf i ||| _| jjj| _| js| jr| jszddl}W n0 ty } ztd|W Y d}~n
d}~0 0 |j| jd| _d| ji}tjf i ||| _| jjj| _| S )z?Validate that api key and python package exists in environment.Nr   zn must be at least 1.zn must be 1 when streaming.ZOPENAI_ORG_IDZOPENAI_ORGANIZATIONZOPENAI_API_BASE)r   r   r   r   r   r   r   zwCannot specify 'openai_proxy' if one of 'http_client'/'http_async_client' is already specified. Received:
openai_proxy=z
http_client=z
http_async_client=r   zRCould not import httpx python package. Please install it with `pip install httpx`.)proxyr   )r   r   r   r   osgetenvr   r   Zget_secret_valuer   r   r   r   r   r   r   r   httpxImportErrorZClientopenaiZOpenAIr   chatcompletionsr   ZAsyncClientZAsyncOpenAIr   )	selfZclient_paramsr   r   r   r   ro   Zsync_specificZasync_specificrp   rp   rq   validate_environment  s    





z#BaseChatOpenAI.validate_environmentc                 C  sh   | j | j| j| j| j| j| j| jp$d| j| j	| j
| j| jd}| j| jddd | D | j}|S )2Get the default parameters for calling OpenAI API.N)r   r   r   r   r   r   r   r   r   r   r   r   r   )r   streamc                 S  s   i | ]\}}|d ur||qS Nrp   r~   rp   rp   rq   r   e  r   z2BaseChatOpenAI._default_params.<locals>.<dictcomp>)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   )r   Zexclude_if_noneparamsrp   rp   rq   _default_paramsO  s,    zBaseChatOpenAI._default_paramszList[Optional[dict]]ri   )llm_outputsrR   c           	      C  s   i }d }|D ]p}|d u rq|d }|d urj|  D ]6\}}|d u rDq2||v r`t|| |||< q2|||< q2|d u r|d}q|| jd}|r||d< |S )Ntoken_usagesystem_fingerprint)r   r   )r   r   rh   r   )	r   r   r   r   outputr   r   r   combinedrp   rp   rq   _combine_llm_outputsk  s*    

z#BaseChatOpenAI._combine_llm_outputsr   zOptional[Dict]zOptional[ChatGenerationChunk])chunkdefault_chunk_classbase_generation_inforR   c                 C  s*  | ddkrd S | d}| dg p:| di  dg }|rHt|nd }t|dkrnt|d|dd	}|S |d }|d
 d u rd S t|d
 |}	|ri |ni }
| d }r||
d< | d }r||
d< | d }r||
d< | d}|r||
d< |rt|	tr||	_t|	|
p d d}|S )Nrs   zcontent.deltausagechoicesr   r   rX   )rW   usage_metadata)rx   deltafinish_reasonr   r   r   r   rx   generation_info)rh   _create_usage_metadatalenr:   r   rt   r    r   )r   r   r   r   r   r   r   generation_chunkchoiceZmessage_chunkr  r  r   r   r   rp   rp   rq   "_convert_chunk_to_generation_chunk  sF    



z1BaseChatOpenAI._convert_chunk_to_generation_chunkzList[BaseMessage]Optional[List[str]]z"Optional[CallbackManagerForLLMRun]Iterator[ChatGenerationChunk])messagesr   run_managerkwargsrR   c              
   k  s  d|d< | j |fd|i|}t}i }d|v rf| jr>td |d | jjjj	j
f i |}|}	nH| jr| jjjf i |}
|
 }dt|
ji}n| jjf i |}|}	z|	}d}|D ]r}t|ts| }| |||r|ni }|d u rq|jj}|jpi d}|r&|j|j||d d	}|V  qW d    n1 sH0    Y  W n0 tjy } zt| W Y d }~n
d }~0 0 t|d
rd|v r| }| |}|r|j|j|d |V  d S NTr   r   response_formatLCannot currently include response headers when response_format is specified.headersr   )r   r   Fget_final_completion)r   ) _get_request_payloadr    r   r   r   popr   betar   r   r   r   with_raw_responsecreateparseri   r  rt   
model_dumpr	  rx   	__class__r  rh   on_llm_new_tokentextr   BadRequestErrorr   hasattrr  %_get_generation_chunk_from_completionr   r  r   r  r  payloadr   r   Zresponse_streamZcontext_managerraw_responseresponseZis_first_chunkr   r  r   ro   Zfinal_completionrp   rp   rq   _stream  sj    


,zBaseChatOpenAI._streamr;   c              
   K  s   | j r&| j|f||d|}t|S | j|fd|i|}d }d|v r| jrXtd |d z| jj	j
jjf i |}W q tjy }	 zt|	 W Y d }	~	qd }	~	0 0 nD| jr| jjjf i |}
|
 }dt|
ji}n| jjf i |}| ||S N)r   r  r   r  r  r   r  )r   r%  r   r  r   r   r   r  r   r  r   r   r  r   r  r   r   r  r  ri   r  _create_chat_resultr   r  r   r  r  Zstream_iterr"  r  r$  ro   r#  rp   rp   rq   	_generate  s6    
 zBaseChatOpenAI._generater   r   input_r   r  rR   c                K  s:   |  | }|d ur||d< ddd |D i| j|S )Nr   r  c                 S  s   g | ]}t |qS rp   r   r{   mrp   rp   rq   r|   '  r   z7BaseChatOpenAI._get_request_payload.<locals>.<listcomp>)Z_convert_inputZto_messagesr   )r   r,  r   r  r  rp   rp   rq   r    s    z#BaseChatOpenAI._get_request_payloadzUnion[dict, openai.BaseModel])r$  r  rR   c           
      C  sT  g }t |tr|n| }|dr2t|d|d}|d D ]}t|d }|rlt |trlt||_|pri }|dd ur|dn|d|d< d|v r|d |d< t	||d}|
| qD||d| j|d	d
d}	t |tjrHt|dd rH|jd j}t|dr*|j|d jjd< t|drH|j|d jjd< t||	dS )Nerrorr   r   rx   r  r   r  r   r   rX   )r   r   r   r   r   refusal)generations
llm_output)rt   ri   r  rh   r   rr   r   r  r   r9   rj   r   r   rK   getattrr   rx   r  r   r]   r1  r;   )
r   r$  r  r2  Zresponse_dictr   resrx   genr3  rp   rp   rq   r'  ,  s@    



z"BaseChatOpenAI._create_chat_resultz'Optional[AsyncCallbackManagerForLLMRun]"AsyncIterator[ChatGenerationChunk]c              
   K s  d|d< | j |fd|i|}t}i }d|v rf| jr>td |d | jjjj	j
f i |}|}	nT| jr| jjjf i |I d H }
|
 }dt|
ji}n| jjf i |I d H }|}	z|	4 I d H }d}|2 z3 d H W }t|ts| }| |||r|ni }|d u rq|jj}|jp&i d}|rL|j|j||dI d H  d	}|V  q6 W d   I d H  q1 I d H s~0    Y  W n0 tjy } zt| W Y d }~n
d }~0 0 t|d
rd|v r| I d H }| |}|r|j|j|dI d H  |V  d S r  ) r  r    r   r   r   r  r   r  r   r   r   r   r  r  r  ri   r  rt   r  r	  rx   r  r  rh   r  r  r   r  r   r  r  r   r!  rp   rp   rq   _astream]  sr    


<zBaseChatOpenAI._astreamc              
     s"  | j r,| j|f||d|}t|I d H S | j|fd|i|}d }d|v r| jr^td |d z"| jj	j
jjf i |I d H }W n. tjy }	 zt|	 W Y d }	~	n
d }	~	0 0 nP| jr| jjjf i |I d H }
|
 }dt|
ji}n| jjf i |I d H }td | j||I d H S r&  )r   r8  r   r  r   r   r   r  r   r  r   r   r  r   r  r   r   r  r  ri   r  r@   r'  r(  rp   rp   rq   
_agenerate  s>    
 
zBaseChatOpenAI._ageneratec                 C  s   d| j i| jS )zGet the identifying parameters.r   )r   r   r   rp   rp   rq   _identifying_params  s    z"BaseChatOpenAI._identifying_params)r   r  rR   c                   s"   d| j it j|d| j|S )z,Get the parameters used to invoke the model.r   r*  )r   super_get_invocation_paramsr   )r   r   r  r  rp   rq   r=    s    z%BaseChatOpenAI._get_invocation_paramsr   c                 K  sx   | j f d|i|}td| jd|d| jd}|d| jpJ|d| j }rX||d< |pf|dd	 }rt||d
< |S )z Get standard params for tracing.r   r   r   r   )Zls_providerZls_model_nameZls_model_typeZls_temperaturer   max_completion_tokensls_max_tokensNls_stop)r=  r   r   rh   r   r   )r   r   r  r   Z	ls_paramsr@  rA  rp   rp   rq   _get_ls_params  s    zBaseChatOpenAI._get_ls_paramsc                 C  s   dS )zReturn type of chat model.zopenai-chatrp   r:  rp   rp   rq   	_llm_type  s    zBaseChatOpenAI._llm_typezTuple[str, tiktoken.Encoding]c                 C  sP   | j d ur| j }n| j}zt|}W n  tyF   d}t|}Y n0 ||fS )NZcl100k_base)r   r   tiktokenZencoding_for_modelr   Zget_encoding)r   r   encodingrp   rp   rq   _get_encoding_model  s    
z"BaseChatOpenAI._get_encoding_modelz	List[int])r  rR   c                   sD   | j dur|  |S tjd dkr.t |S |  \}}||S )z9Get the tokens present in the text with tiktoken package.Nr      )Zcustom_get_token_idssysversion_infor<  get_token_idsrF  encode)r   r  _Zencoding_modelr>  rp   rq   rJ    s    

zBaseChatOpenAI.get_token_idszCOptional[Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]]r   )r  toolsrR   c              	     s  |durt d tjd dkr,t |S |  \}}|drLd}d}n.|ds`|d	rjd
}d}ntd| dd}dd |D }|D ]b}	||7 }|		 D ]J\}
}|
dkr|d
7 }qt
|tr|D ]}t
|ts|d dkrt
|tr|d n|}|t||7 }q|d dkrl|d ddkrF|d7 }n$t|d d }|s^q|t| 7 }q|d dkr|t||d d 7 }|t||d d 7 }qtd| qn |sqn|t|t|7 }|
dkr||7 }qq|d
7 }|S )a  Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.

        **Requirements**: You must have the ``pillow`` installed if you want to count
        image tokens if you are specifying the image as a base64 string, and you must
        have both ``pillow`` and ``httpx`` installed if you are specifying the image
        as a URL. If these aren't installed image inputs will be ignored in token
        counting.

        OpenAI reference: https://github.com/openai/openai-cookbook/blob/
        main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb

        Args:
            messages: The message inputs to tokenize.
            tools: If provided, sequence of dict, BaseModel, function, or BaseTools
                to be converted to tool schemas.
        NzECounting tokens in tool schemas is not yet supported. Ignoring tools.r   rG  zgpt-3.5-turbo-0301   r   gpt-4   zFget_num_tokens_from_messages() is not presently implemented for model z. See https://platform.openai.com/docs/guides/text-generation/managing-tokens for information on how messages are converted to tokens.r   c                 S  s   g | ]}t |qS rp   r-  r.  rp   rp   rq   r|   4  r   z?BaseChatOpenAI.get_num_tokens_from_messages.<locals>.<listcomp>rf   rs   r  Z	image_urldetaillowU   urlrc   r   rT   z!Unrecognized content block type

)r   r   rH  rI  r<  get_num_tokens_from_messagesrF  r   NotImplementedErrorr   rt   ru   rl   ri   r  rK  rh   _url_to_size_count_image_tokensr   )r   r  rM  r   rE  Ztokens_per_messageZtokens_per_nameZ
num_tokensZmessages_dictrx   keyvaluevalr  Z
image_sizer>  rp   rq   rV    sj    


z+BaseChatOpenAI.get_num_tokens_from_messagesz0.2.1z7langchain_openai.chat_models.base.ChatOpenAI.bind_toolsz1.0.0)ZsincealternativeZremovalzDSequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]]z<Optional[Union[_FunctionCall, str, Literal['auto', 'none']]]z)Runnable[LanguageModelInput, BaseMessage])	functionsrZ   r  rR   c                   s   dd |D }|durt |tr0|dvr0d|in|}t |trRt|dkrRtdt |tr|d d |d krtd	| d
|d d  di |d|i}t jf d|i|S )a  Bind functions (and other objects) to this chat model.

        Assumes model is compatible with OpenAI function-calling API.

        NOTE: Using bind_tools is recommended instead, as the `functions` and
            `function_call` request parameters are officially marked as deprecated by
            OpenAI.

        Args:
            functions: A list of function definitions to bind to this chat model.
                Can be  a dictionary, pydantic model, or callable. Pydantic
                models and callables will be automatically converted to
                their schema dictionary representation.
            function_call: Which function to require the model to call.
                Must be the name of the single provided function or
                "auto" to automatically determine which function to call
                (if any).
            **kwargs: Any additional parameters to pass to the
                :class:`~langchain.runnable.Runnable` constructor.
        c                 S  s   g | ]}t |qS rp   )rC   )r{   fnrp   rp   rq   r|     r   z1BaseChatOpenAI.bind_functions.<locals>.<listcomp>N)autononerT   r   zGWhen specifying `function_call`, you must provide exactly one function.r   zFunction call z3 was specified, but the only provided function was .rZ   r^  )rt   rl   ri   r  r   r<  bind)r   r^  rZ   r  Zformatted_functionsr>  rp   rq   bind_functionsb  s.    "

zBaseChatOpenAI.bind_functions)tool_choicestrictparallel_tool_callsz9Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]]zLOptional[Union[dict, str, Literal['auto', 'none', 'required', 'any'], bool]])rM  re  rf  rg  r  rR   c                  s   |dur||d<  fdd|D }rt trTdvrFddidd	krd
ndt trdd
nTt trdd |D }tfdd|D std d| dntd |d< t jf d|i|S )a3  Bind tool-like objects to this chat model.

        Assumes model is compatible with OpenAI tool-calling API.

        Args:
            tools: A list of tool definitions to bind to this chat model.
                Supports any tool definition handled by
                :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
            tool_choice: Which tool to require the model to call. Options are:

                - str of the form ``"<<tool_name>>"``: calls <<tool_name>> tool.
                - ``"auto"``: automatically selects a tool (including no tool).
                - ``"none"``: does not call a tool.
                - ``"any"`` or ``"required"`` or ``True``: force at least one tool to be called.
                - dict of the form ``{"type": "function", "function": {"name": <<tool_name>>}}``: calls <<tool_name>> tool.
                - ``False`` or ``None``: no effect, default OpenAI behavior.
            strict: If True, model output is guaranteed to exactly match the JSON Schema
                provided in the tool definition. If True, the input schema will be
                validated according to
                https://platform.openai.com/docs/guides/structured-outputs/supported-schemas.
                If False, input schema will not be validated and model output will not
                be validated.
                If None, ``strict`` argument will not be passed to the model.
            parallel_tool_calls: Set to ``False`` to disable parallel tool use.
                Defaults to ``None`` (no specification, which allows parallel tool use).
            kwargs: Any additional parameters are passed directly to
                :meth:`~langchain_openai.chat_models.base.ChatOpenAI.bind`.

        .. versionchanged:: 0.1.21

            Support for ``strict`` argument added.

        Nrg  c                   s   g | ]}t | d qS )rf  )rD   )r{   re   rh  rp   rq   r|     s   z-BaseChatOpenAI.bind_tools.<locals>.<listcomp>)r`  ra  anyrequiredrc   rT   )rs   rc   ri  rj  c                 S  s   g | ]}|d  d qS )rc   rT   rp   )r{   Zformatted_toolrp   rp   rq   r|     s   c                 3  s   | ]}| d  d kV  qdS )rc   rT   Nrp   )r{   	tool_name)re  rp   rq   	<genexpr>  s   z,BaseChatOpenAI.bind_tools.<locals>.<genexpr>zTool choice z1 was specified, but the only provided tools were rb  zEUnrecognized tool_choice type. Expected str, bool or dict. Received: re  rM  )rt   rl   r   ri   ri  r   r<  rc  )r   rM  re  rf  rg  r  Zformatted_toolsZ
tool_namesr>  )rf  re  rq   
bind_tools  sD    -



zBaseChatOpenAI.bind_toolsfunction_callingmethodinclude_rawrf  Optional[_DictOrPydanticClass]7Literal['function_calling', 'json_mode', 'json_schema']-Runnable[LanguageModelInput, _DictOrPydantic]schemarp  rq  rf  r  rR   c                K  s"  |rt d| |dur*|dkr*t dt|}|dkr|rVt|trVtd d}| jr| jds~| jd	s~| jd
krtd| j d d}|dkr|du rt dt|d d }| j	|d|d|i|dd}| j
|gfi |}	|rt|gdd}
nt|dd}
n|dkrR| jddid|i|dd}	|rJt|dnt }
nv|dkr|du rnt dt||d}| j|d|it|dd}	|rtjtt|d}
nt }
nt d| d|rtjtd|
B d d! d"}tjd#d! d$}|j|gd%d&}t|	d'|B S |	|
B S dS )(a  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:

                - an OpenAI function/tool schema,
                - a JSON Schema,
                - a TypedDict class (support added in 0.1.20),
                - or a Pydantic class.

                If ``schema`` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method: The method for steering model generation, one of:

                - "function_calling":
                    Uses OpenAI's tool-calling (formerly called function calling)
                    API: https://platform.openai.com/docs/guides/function-calling
                - "json_schema":
                    Uses OpenAI's Structured Output API: https://platform.openai.com/docs/guides/structured-outputs
                    Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later
                    models.
                - "json_mode":
                    Uses OpenAI's JSON mode. Note that if using JSON mode then you
                    must include instructions for formatting the output into the
                    desired schema into the model call:
                    https://platform.openai.com/docs/guides/structured-outputs/json-mode

                Learn more about the differences between the methods and which models
                support which methods here:

                - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode
                - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".
            strict:

                - True:
                    Model output is guaranteed to exactly match the schema.
                    The input schema will also be validated according to
                    https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
                - False:
                    Input schema will not be validated and model output will not be
                    validated.
                - None:
                    ``strict`` argument will not be passed to the model.

            kwargs: Additional keyword args aren't supported.

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.

            | If ``include_raw`` is True, then Runnable outputs a dict with keys:

            - "raw": BaseMessage
            - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
            - "parsing_error": Optional[BaseException]

        .. versionchanged:: 0.1.20

            Added support for TypedDict class ``schema``.

        .. versionchanged:: 0.1.21

            Support for ``strict`` argument added.
            Support for ``method`` = "json_schema" added.
        zReceived unsupported arguments NZ	json_modez<Argument `strict` is not supported with `method`='json_mode'json_schemazReceived a Pydantic BaseModel V1 schema. This is not supported by method="json_schema". Please use method="function_calling" or specify schema via JSON Schema or Pydantic V2 BaseModel. Overriding to method="function_calling".rn  zgpt-3zgpt-4-rP  z+Cannot use method='json_schema' with model a   since it doesn't support OpenAI's Structured Output API. You can see supported models here: https://platform.openai.com/docs/guides/structured-outputs#supported-models. To fix this warning, set `method='function_calling'. Overriding to method='function_calling'.zGschema must be specified when method is not 'json_mode'. Received None.rc   rT   Frp  )r  rv  )re  rg  rf  structured_output_formatT)rM  first_tool_only)Zkey_namery  rs   Zjson_object)r  rx  )Zpydantic_objectrh  )output_typez\Unrecognized method argument. Expected one of 'function_calling' or 'json_mode'. Received: ''r   c                 S  s   d S r   rp   rL  rp   rp   rq   <lambda>  r   z7BaseChatOpenAI.with_structured_output.<locals>.<lambda>)r   r   c                 S  s   d S r   rp   r|  rp   rp   rq   r}    r   )r   r   )Zexception_key)r   )r   _is_pydantic_class
issubclassBaseModelV1r   r   r   r   rD   _filter_disabled_paramsrm  r6   r5   rc  r4   r3   "_convert_to_openai_response_format_oai_structured_outputs_parserZ
with_typesr   rs   r>   Zassignr   Zwith_fallbacksr=   )r   rv  rp  rq  rf  r  Zis_pydantic_schemark  Zbind_kwargsZllmZoutput_parserr  Zparser_assignZparser_noneZparser_with_fallbackrp   rp   rq   with_structured_output  s    [




	

z%BaseChatOpenAI.with_structured_output)r  rR   c                 K  sV   | j s
|S i }| D ]:\}}|| j v rH| j | d u s|| j | v rHqq|||< q|S r   )r   r   )r   r  filteredr   r   rp   rp   rq   r    s    

z&BaseChatOpenAI._filter_disabled_paramszopenai.BaseModelr:   )
completionrR   c                 C  s`   |  |}|jd j}t|tr>|j}d|jv rB|jd nd}td|j|d}t	||j
dS )zDGet chunk from completion (e.g., from final completion of a stream).r   r[   NrX   )rW   r]   r   r  )r'  r2  rx   rt   r   r   r]   r  r    r:   r3  )r   r  Zchat_resultZchat_messager   rx   rp   rp   rq   r     s    


z4BaseChatOpenAI._get_generation_chunk_from_completion)NN)NN)N)NN)NN)N)N)N)N)N)Ir   r   r   rM   r   r   r   r   r   r   r   ri   r   rJ   r   r   r   rI   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rL   Zmodel_configrO   classmethodr   r   r   r   propertyr   r   r	  r%  r)  r  r'  r8  r9  r;  r=  rB  rC  rF  rJ  rV  r   rd  rm  r  r  r   __classcell__rp   rp   r>  rq   r     s   




C8  D  % 4  H  %   ]
 ; W  Nr   c                	      s*  e Zd ZU dZdZded< edddZded	< ed
dddZ	e
ddddZeddddZe
ddddZedd fddZddddddd fddZd4d ddd!d"d#Zdd$dd dd%d& fd'd(Zdd$dd dd)d& fd*d+Zd5d,ddd-d.d/dd dd0d1 fd2d3Z  ZS )6
ChatOpenAIuG:  OpenAI chat model integration.

    .. dropdown:: Setup
        :open:

        Install ``langchain-openai`` and set environment variable ``OPENAI_API_KEY``.

        .. code-block:: bash

            pip install -U langchain-openai
            export OPENAI_API_KEY="your-api-key"

    .. dropdown:: Key init args — completion params

        model: str
            Name of OpenAI model to use.
        temperature: float
            Sampling temperature.
        max_tokens: Optional[int]
            Max number of tokens to generate.
        logprobs: Optional[bool]
            Whether to return logprobs.
        stream_options: Dict
            Configure streaming outputs, like whether to return token usage when
            streaming (``{"include_usage": True}``).

        See full list of supported init args and their descriptions in the params section.

    .. dropdown:: Key init args — client params

        timeout: Union[float, Tuple[float, float], Any, None]
            Timeout for requests.
        max_retries: Optional[int]
            Max number of retries.
        api_key: Optional[str]
            OpenAI API key. If not passed in will be read from env var OPENAI_API_KEY.
        base_url: Optional[str]
            Base URL for API requests. Only specify if using a proxy or service
            emulator.
        organization: Optional[str]
            OpenAI organization ID. If not passed in will be read from env
            var OPENAI_ORG_ID.

        See full list of supported init args and their descriptions in the params section.

    .. dropdown:: Instantiate

        .. code-block:: python

            from langchain_openai import ChatOpenAI

            llm = ChatOpenAI(
                model="gpt-4o",
                temperature=0,
                max_tokens=None,
                timeout=None,
                max_retries=2,
                # api_key="...",
                # base_url="...",
                # organization="...",
                # other params...
            )

        **NOTE**: Any param which is not explicitly supported will be passed directly to the
        ``openai.OpenAI.chat.completions.create(...)`` API every time to the model is
        invoked. For example:

        .. code-block:: python

            from langchain_openai import ChatOpenAI
            import openai

            ChatOpenAI(..., frequency_penalty=0.2).invoke(...)

            # results in underlying API call of:

            openai.OpenAI(..).chat.completions.create(..., frequency_penalty=0.2)

            # which is also equivalent to:

            ChatOpenAI(...).invoke(..., frequency_penalty=0.2)

    .. dropdown:: Invoke

        .. code-block:: python

            messages = [
                (
                    "system",
                    "You are a helpful translator. Translate the user sentence to French.",
                ),
                ("human", "I love programming."),
            ]
            llm.invoke(messages)

        .. code-block:: pycon

            AIMessage(
                content="J'adore la programmation.",
                response_metadata={
                    "token_usage": {
                        "completion_tokens": 5,
                        "prompt_tokens": 31,
                        "total_tokens": 36,
                    },
                    "model_name": "gpt-4o",
                    "system_fingerprint": "fp_43dfabdef1",
                    "finish_reason": "stop",
                    "logprobs": None,
                },
                id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
                usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36},
            )

    .. dropdown:: Stream

        .. code-block:: python

            for chunk in llm.stream(messages):
                print(chunk)

        .. code-block:: python

            AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
            AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
            AIMessageChunk(content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
            AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
            AIMessageChunk(
                content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
            )
            AIMessageChunk(content=".", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
            AIMessageChunk(
                content="",
                response_metadata={"finish_reason": "stop"},
                id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0",
            )

        .. code-block:: python

            stream = llm.stream(messages)
            full = next(stream)
            for chunk in stream:
                full += chunk
            full

        .. code-block:: python

            AIMessageChunk(
                content="J'adore la programmation.",
                response_metadata={"finish_reason": "stop"},
                id="run-bf917526-7f58-4683-84f7-36a6b671d140",
            )

    .. dropdown:: Async

        .. code-block:: python

            await llm.ainvoke(messages)

            # stream:
            # async for chunk in (await llm.astream(messages))

            # batch:
            # await llm.abatch([messages])

        .. code-block:: python

            AIMessage(
                content="J'adore la programmation.",
                response_metadata={
                    "token_usage": {
                        "completion_tokens": 5,
                        "prompt_tokens": 31,
                        "total_tokens": 36,
                    },
                    "model_name": "gpt-4o",
                    "system_fingerprint": "fp_43dfabdef1",
                    "finish_reason": "stop",
                    "logprobs": None,
                },
                id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
                usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36},
            )

    .. dropdown:: Tool calling

        .. code-block:: python

            from pydantic import BaseModel, Field


            class GetWeather(BaseModel):
                '''Get the current weather in a given location'''

                location: str = Field(
                    ..., description="The city and state, e.g. San Francisco, CA"
                )


            class GetPopulation(BaseModel):
                '''Get the current population in a given location'''

                location: str = Field(
                    ..., description="The city and state, e.g. San Francisco, CA"
                )


            llm_with_tools = llm.bind_tools(
                [GetWeather, GetPopulation]
                # strict = True  # enforce tool args schema is respected
            )
            ai_msg = llm_with_tools.invoke(
                "Which city is hotter today and which is bigger: LA or NY?"
            )
            ai_msg.tool_calls

        .. code-block:: python

            [
                {
                    "name": "GetWeather",
                    "args": {"location": "Los Angeles, CA"},
                    "id": "call_6XswGD5Pqk8Tt5atYr7tfenU",
                },
                {
                    "name": "GetWeather",
                    "args": {"location": "New York, NY"},
                    "id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi",
                },
                {
                    "name": "GetPopulation",
                    "args": {"location": "Los Angeles, CA"},
                    "id": "call_49CFW8zqC9W7mh7hbMLSIrXw",
                },
                {
                    "name": "GetPopulation",
                    "args": {"location": "New York, NY"},
                    "id": "call_6ghfKxV264jEfe1mRIkS3PE7",
                },
            ]

        Note that ``openai >= 1.32`` supports a ``parallel_tool_calls`` parameter
        that defaults to ``True``. This parameter can be set to ``False`` to
        disable parallel tool calls:

        .. code-block:: python

            ai_msg = llm_with_tools.invoke(
                "What is the weather in LA and NY?", parallel_tool_calls=False
            )
            ai_msg.tool_calls

        .. code-block:: python

            [
                {
                    "name": "GetWeather",
                    "args": {"location": "Los Angeles, CA"},
                    "id": "call_4OoY0ZR99iEvC7fevsH8Uhtz",
                }
            ]

        Like other runtime parameters, ``parallel_tool_calls`` can be bound to a model
        using ``llm.bind(parallel_tool_calls=False)`` or during instantiation by
        setting ``model_kwargs``.

        See ``ChatOpenAI.bind_tools()`` method for more.

    .. dropdown:: Structured output

        .. code-block:: python

            from typing import Optional

            from pydantic import BaseModel, Field


            class Joke(BaseModel):
                '''Joke to tell user.'''

                setup: str = Field(description="The setup of the joke")
                punchline: str = Field(description="The punchline to the joke")
                rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")


            structured_llm = llm.with_structured_output(Joke)
            structured_llm.invoke("Tell me a joke about cats")

        .. code-block:: python

            Joke(
                setup="Why was the cat sitting on the computer?",
                punchline="To keep an eye on the mouse!",
                rating=None,
            )

        See ``ChatOpenAI.with_structured_output()`` for more.

    .. dropdown:: JSON mode

        .. code-block:: python

            json_llm = llm.bind(response_format={"type": "json_object"})
            ai_msg = json_llm.invoke(
                "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
            )
            ai_msg.content

        .. code-block:: python

            '\n{\n  "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\n}'

    .. dropdown:: Image input

        .. code-block:: python

            import base64
            import httpx
            from langchain_core.messages import HumanMessage

            image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
            image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
            message = HumanMessage(
                content=[
                    {"type": "text", "text": "describe the weather in this image"},
                    {
                        "type": "image_url",
                        "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
                    },
                ]
            )
            ai_msg = llm.invoke([message])
            ai_msg.content

        .. code-block:: python

            "The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions."

    .. dropdown:: Token usage

        .. code-block:: python

            ai_msg = llm.invoke(messages)
            ai_msg.usage_metadata

        .. code-block:: python

            {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}

        When streaming, set the ``stream_usage`` kwarg:

        .. code-block:: python

            stream = llm.stream(messages, stream_usage=True)
            full = next(stream)
            for chunk in stream:
                full += chunk
            full.usage_metadata

        .. code-block:: python

            {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}

        Alternatively, setting ``stream_usage`` when instantiating the model can be
        useful when incorporating ``ChatOpenAI`` into LCEL chains-- or when using
        methods like ``.with_structured_output``, which generate chains under the
        hood.

        .. code-block:: python

            llm = ChatOpenAI(model="gpt-4o", stream_usage=True)
            structured_llm = llm.with_structured_output(...)

    .. dropdown:: Logprobs

        .. code-block:: python

            logprobs_llm = llm.bind(logprobs=True)
            ai_msg = logprobs_llm.invoke(messages)
            ai_msg.response_metadata["logprobs"]

        .. code-block:: python

            {
                "content": [
                    {
                        "token": "J",
                        "bytes": [74],
                        "logprob": -4.9617593e-06,
                        "top_logprobs": [],
                    },
                    {
                        "token": "'adore",
                        "bytes": [39, 97, 100, 111, 114, 101],
                        "logprob": -0.25202933,
                        "top_logprobs": [],
                    },
                    {
                        "token": " la",
                        "bytes": [32, 108, 97],
                        "logprob": -0.20141791,
                        "top_logprobs": [],
                    },
                    {
                        "token": " programmation",
                        "bytes": [
                            32,
                            112,
                            114,
                            111,
                            103,
                            114,
                            97,
                            109,
                            109,
                            97,
                            116,
                            105,
                            111,
                            110,
                        ],
                        "logprob": -1.9361265e-07,
                        "top_logprobs": [],
                    },
                    {
                        "token": ".",
                        "bytes": [46],
                        "logprob": -1.2233183e-05,
                        "top_logprobs": [],
                    },
                ]
            }

    .. dropdown:: Response metadata

        .. code-block:: python

            ai_msg = llm.invoke(messages)
            ai_msg.response_metadata

        .. code-block:: python

            {
                "token_usage": {
                    "completion_tokens": 5,
                    "prompt_tokens": 28,
                    "total_tokens": 33,
                },
                "model_name": "gpt-4o",
                "system_fingerprint": "fp_319be4768e",
                "finish_reason": "stop",
                "logprobs": None,
            }

    Fr   stream_usageNr?  r   r   r   zDict[str, str]r   c                 C  s   ddiS )Nr   r   rp   r:  rp   rp   rq   
lc_secrets  s    zChatOpenAI.lc_secretsz	List[str]c                 C  s   g dS )z*Get the namespace of the langchain object.)Z	langchainZchat_modelsr   rp   r   rp   rp   rq   get_lc_namespace  s    zChatOpenAI.get_lc_namespacer   c                 C  s8   i }| j r| j |d< | jr$| j|d< | jr4| j|d< |S )Nr   r   r   )r   r   r   )r   
attributesrp   rp   rq   lc_attributes  s    


zChatOpenAI.lc_attributesc                 C  s   dS )z9Return whether this model can be serialized by Langchain.Trp   r  rp   rp   rq   is_lc_serializable  s    zChatOpenAI.is_lc_serializablec                   s"   t  j}d|v r|d|d< |S )r   r   r?  )r<  r   r  )r   r   r>  rp   rq   r     s    zChatOpenAI._default_paramsr*  r   r
  r   ri   r+  c                  s2   t  j|fd|i|}d|v r.|d|d< |S )Nr   r   r?  )r<  r  r  )r   r,  r   r  r"  r>  rp   rq   r    s    zChatOpenAI._get_request_payloadr   )r  r  rR   c                 K  sN   || di  d| j di  d| jg}|D ]}t|tr0|  S q0| jS )zDetermine whether to include usage metadata in streaming output.

        For backwards compatibility, we check for `stream_options` passed
        explicitly to kwargs or in the model_kwargs and override self.stream_usage.
        stream_optionsinclude_usage)rh   r   r  rt   r   )r   r  r  Zstream_usage_sourcessourcerp   rp   rq   _should_stream_usage  s    	

zChatOpenAI._should_stream_usage)r  r  )r   r  r  rR   c                  s4   | j |fi |}|r"d|i|d< t j|i |S )Set default stream_options.r  r  )r  r<  r%  )r   r  r   r  r>  rp   rq   r%    s    zChatOpenAI._streamr7  c                 sN   | j |fi |}|r"d|i|d< t j|i |2 z3 dH W }|V  q46 dS )r  r  r  N)r  r<  r8  )r   r  r   r  r   r>  rp   rq   r8    s
    zChatOpenAI._astreamrw  ro  rr  rs  rt  ru  c                  s   t  j|f|||d|S )aH8  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:

                - a JSON Schema,
                - a TypedDict class,
                - or a Pydantic class,
                - an OpenAI function/tool schema.

                If ``schema`` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method: The method for steering model generation, one of:

                - "json_schema":
                    Uses OpenAI's Structured Output API:
                    https://platform.openai.com/docs/guides/structured-outputs
                    Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later
                    models.
                - "function_calling":
                    Uses OpenAI's tool-calling (formerly called function calling)
                    API: https://platform.openai.com/docs/guides/function-calling
                - "json_mode":
                    Uses OpenAI's JSON mode. Note that if using JSON mode then you
                    must include instructions for formatting the output into the
                    desired schema into the model call:
                    https://platform.openai.com/docs/guides/structured-outputs/json-mode

                Learn more about the differences between the methods and which models
                support which methods here:

                - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode
                - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".
            strict:

                - True:
                    Model output is guaranteed to exactly match the schema.
                    The input schema will also be validated according to
                    https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
                - False:
                    Input schema will not be validated and model output will not be
                    validated.
                - None:
                    ``strict`` argument will not be passed to the model.

                If schema is specified via TypedDict or JSON schema, ``strict`` is not
                enabled by default. Pass ``strict=True`` to enable it.

                Note: ``strict`` can only be non-null if ``method`` is
                ``"json_schema"`` or ``"function_calling"``.

            kwargs: Additional keyword args aren't supported.

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.

            | If ``include_raw`` is True, then Runnable outputs a dict with keys:

            - "raw": BaseMessage
            - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
            - "parsing_error": Optional[BaseException]

        .. versionchanged:: 0.1.20

            Added support for TypedDict class ``schema``.

        .. versionchanged:: 0.1.21

            Support for ``strict`` argument added.
            Support for ``method="json_schema"`` added.

        .. versionchanged:: 0.3.0

            ``method`` default changed from "function_calling" to "json_schema".

        .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False, strict=True

            Note, OpenAI has a number of restrictions on what types of schemas can be
            provided if ``strict`` = True. When using Pydantic, our model cannot
            specify any Field metadata (like min/max constraints) and fields cannot
            have default values.

            See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

            .. code-block:: python

                from typing import Optional

                from langchain_openai import ChatOpenAI
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Optional[str] = Field(
                        default=..., description="A justification for the answer."
                    )


                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
                # )

        .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False, strict=False

            .. code-block:: python

                from typing import Optional

                from langchain_openai import ChatOpenAI
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Optional[str] = Field(
                        default=..., description="A justification for the answer."
                    )


                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, method="function_calling"
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
                # )

        .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True

            .. code-block:: python

                from langchain_openai import ChatOpenAI
                from pydantic import BaseModel


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str


                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, include_raw=True
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
                #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
                #     'parsing_error': None
                # }

        .. dropdown:: Example: schema=TypedDict class, method="json_schema", include_raw=False, strict=False

            .. code-block:: python

                # IMPORTANT: If you are using Python <=3.8, you need to import Annotated
                # from typing_extensions, not from typing.
                from typing_extensions import Annotated, TypedDict

                from langchain_openai import ChatOpenAI


                class AnswerWithJustification(TypedDict):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Annotated[
                        Optional[str], None, "A justification for the answer."
                    ]


                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
                # }

        .. dropdown:: Example: schema=OpenAI function schema, method="json_schema", include_raw=False

            .. code-block:: python

                from langchain_openai import ChatOpenAI

                oai_schema = {
                    'name': 'AnswerWithJustification',
                    'description': 'An answer to the user question along with justification for the answer.',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'answer': {'type': 'string'},
                            'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                        },
                       'required': ['answer']
                   }
               }

                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(oai_schema)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
                # }

        .. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True

            .. code-block::

                from langchain_openai import ChatOpenAI
                from pydantic import BaseModel

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatOpenAI(model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification,
                    method="json_mode",
                    include_raw=True
                )

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
                #     'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
                #     'parsing_error': None
                # }

        .. dropdown:: Example: schema=None, method="json_mode", include_raw=True

            .. code-block::

                structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
                #     'parsed': {
                #         'answer': 'They are both the same weight.',
                #         'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
                #     },
                #     'parsing_error': None
                # }
        ro  )r<  r  )r   rv  rp  rq  rf  r  r>  rp   rq   r    s      8z!ChatOpenAI.with_structured_output)N)N)r   r   r   __doc__r  r   rM   r   r  r  r  r  r  r  r   r  r  r%  r8  r  r  rp   rp   r>  rq   r    s<   
   K  r  r   )objrR   c                 C  s   t | tot| S r   )rt   rs   rG   )r  rp   rp   rq   r~  Q	  s    r~  r,   )r   rR   c                 C  s$   d| d | d t | d ddS Nrc   rU   rT   r   )rT   r   )rs   rU   rc   )jsondumps)r   rp   rp   rq   ry   U	  s    ry   r)   )invalid_tool_callrR   c                 C  s   d| d | d | d ddS r  rp   )r  rp   rp   rq   r}   `	  s    r}   rl   zOptional[Tuple[int, int]])image_sourcerR   c           	      C  s   zddl m} W n ty.   td Y d S 0 t| rzdd l}W n tyb   td Y d S 0 || }|  |	t
|jj\}}||fS t| r| dd\}}t|}|	t
|j\}}||fS d S d S )Nr   )ImagezaUnable to count image tokens. To count image tokens please install `pip install -U pillow httpx`.zZUnable to count image tokens. To count image tokens please install `pip install -U httpx`.,r   )ZPILr  r   loggerinfo_is_urlr   rh   raise_for_statusopenr   rW   size_is_b64splitbase64	b64decode)	r  r  r   r$  widthheightrL  encodeddatarp   rp   rq   rX  m	  s2    

rX  r   )r  r  rR   c                 C  s6   t | |\} }t|d }t| d }d| | d S )Ni      rT  )_resizer   )r  r  hwrp   rp   rq   rY  	  s    rY  )srR   c              
   C  sV   zt | }t|j|jgW S  tyP } ztd|  W Y d }~dS d }~0 0 d S )NzUnable to parse URL: F)r   allschemenetlocrk   r  debug)r  resultro   rp   rp   rq   r  	  s    r  c                 C  s
   |  dS )Nz
data:image)r   )r  rp   rp   rq   r  	  s    r  zTuple[int, int]c                 C  s|   | dks|dkr:| |kr*|d |  }d} n| d | } d}| dkrt|dkrt| |krd| d | } d}n| d | }d} | |fS )Ni   i   rp   )r  r  rp   rp   rq   r  	  s    r  rh  zUnion[Dict[str, Any], Type]r   zUnion[Dict, TypeBaseModel])rv  rf  rR   c                C  s   t | trt| r| S t | tr<d| v r<| ddkr<| }nzt | trbd| v rbd| v rbd| d}nT|d u rt | trt | dtr| d }nd}t| |d}|d	|d< d|d}|d ur||d durd
| d d  d| d}t||S )Nrw  rs   rT   rv  )rs   rw  rf  Frh  
parametersz0Output schema already has 'strict' value set to z: but 'strict' also passed in to with_structured_output as z@. Please make sure that 'strict' is only specified in one place.)	rt   rs   rG   ri   rh   r   rC   r  r   )rv  rf  r  rc   msgrp   rp   rq   r  	  s:    


r  r   rE   )ai_msgrR   c                 C  sD   | j dr| j d S | j dr2t| j d ntd|  d S )Nr   r1  zdStructured Output response does not have a 'parsed' field nor a 'refusal' field. Received message:

)r]   rh   OpenAIRefusalErrorr   )r  rp   rp   rq   r  	  s    
r  c                   @  s   e Zd ZdZdS )r  am  Error raised when OpenAI Structured Outputs API returns a refusal.

    When using OpenAI's Structured Outputs API with user-generated input, the model
    may occasionally refuse to fulfill the request for safety reasons.

    See here for more on refusals:
    https://platform.openai.com/docs/guides/structured-outputs/refusals

    .. versionadded:: 0.1.21
    N)r   r   r   r  rp   rp   rp   rq   r  	  s   r  r1   )oai_token_usagerR   c                 C  s   |  dd}|  dd}|  d|| }|  dp4i  d|  dpFi  dd}|  d	p^i  d|  d	ppi  d
d}t|||tf i dd | D tf i dd | D dS )NZprompt_tokensr   Zcompletion_tokenstotal_tokensZprompt_tokens_detailsZaudio_tokensZcached_tokens)r\   Z
cache_readZcompletion_tokens_detailsZreasoning_tokens)r\   Z	reasoningc                 S  s   i | ]\}}|d ur||qS r   rp   r~   rp   rp   rq   r   
  r   z*_create_usage_metadata.<locals>.<dictcomp>c                 S  s   i | ]\}}|d ur||qS r   rp   r~   rp   rp   rq   r   
  r   )input_tokensoutput_tokensr  input_token_detailsoutput_token_details)rh   r1   r/   r   r0   )r  r  r  r  r  r  rp   rp   rq   r  	  s8    	r  )r  
__future__r   r  r  loggingr   rH  r   ior   mathr   operatorr   typingr   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   urllib.parser   r   rD  Zlangchain_core._api.deprecationr   Zlangchain_core.callbacksr   r   Zlangchain_core.language_modelsr   Z*langchain_core.language_models.chat_modelsr   r   r   r   Zlangchain_core.messagesr   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   Zlangchain_core.messages.air/   r0   r1   Zlangchain_core.messages.toolr2   Zlangchain_core.output_parsersr3   r4   Z*langchain_core.output_parsers.openai_toolsr5   r6   r7   r8   Zlangchain_core.outputsr9   r:   r;   Zlangchain_core.runnablesr<   r=   r>   r?   Zlangchain_core.runnables.configr@   Zlangchain_core.toolsrA   Zlangchain_core.utilsrB   Z%langchain_core.utils.function_callingrC   rD   Zlangchain_core.utils.pydanticrE   rF   rG   Zlangchain_core.utils.utilsrH   rI   rJ   ZpydanticrK   rL   rM   rN   rO   Zpydantic.v1r  Ztyping_extensionsrP   	getLoggerr   r  rr   rw   r   r   r   r   r   r   rl   Z_DictOrPydanticClassZ_DictOrPydanticr   r   r  r~  ry   r}   rX  rY  r  r  r  r  r  rk   r  r  rp   rp   rp   rq   <module>   s   HH
HE9        i      s	%