a
    `g                     @  sR  d Z ddlmZ ddlZddlZddlmZmZmZm	Z	m
Z
mZmZmZmZmZ ddlZddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) e*e+Z,ede!dZ-ee	e.ef ee- f Z/ee	e-f Z0G dd deZ1dddddZ2G dd de)Z3dS )zAzure OpenAI chat wrapper.    )annotationsN)
Any	AwaitableCallableDictListOptionalType	TypedDictTypeVarUnion)LanguageModelInput)LangSmithParams)BaseMessage)
ChatResult)Runnable)from_envsecret_from_env)is_basemodel_subclass)	BaseModelField	SecretStrmodel_validator)LiteralSelf)BaseChatOpenAI_BM)boundc                   @  s&   e Zd ZU ded< ded< ded< dS )_AllReturnTyper   rawzOptional[_DictOrPydantic]parsedzOptional[BaseException]Zparsing_errorN)__name__
__module____qualname____annotations__ r%   r%   p/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_openai/chat_models/azure.pyr   )   s   
r   r   bool)objreturnc                 C  s   t | tot| S )N)
isinstancetyper   )r(   r%   r%   r&   _is_pydantic_class/   s    r,   c                	      s  e Zd ZU dZeeddddZded< eddd	Zd
ed< ededdddZ	ded< ede
ddgdddZded< ee
ddddZded< dZded< dZded< dZded< eeddddZded < d!Zd"ed#< edd$d	Zded%< eddZd&ed'< ed(d)d*d+Zed,d)d-d.Zed"d)d/d0Zed1d2d3d)d4d5Zed6d) fd7d8Zedd)d9d:Zed6d)d;d<ZdSd=d>d?d@ fdAdBZdTdCdDdEdF fdGdHZdUdIdJddKdLdMd"dNd>dOdP fdQdRZ   Z!S )VAzureChatOpenAIu9  Azure OpenAI chat model integration.

    Setup:
        Head to the https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python
        to create your Azure OpenAI deployment.

        Then install ``langchain-openai`` and set environment variables
        ``AZURE_OPENAI_API_KEY`` and ``AZURE_OPENAI_ENDPOINT``:

        .. code-block:: bash

            pip install -U langchain-openai

            export AZURE_OPENAI_API_KEY="your-api-key"
            export AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/"

    Key init args — completion params:
        azure_deployment: str
            Name of Azure OpenAI deployment to use.
        temperature: float
            Sampling temperature.
        max_tokens: Optional[int]
            Max number of tokens to generate.
        logprobs: Optional[bool]
            Whether to return logprobs.

    Key init args — client params:
        api_version: str
            Azure OpenAI API version to use. See more on the different versions here:
            https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
        timeout: Union[float, Tuple[float, float], Any, None]
            Timeout for requests.
        max_retries: Optional[int]
            Max number of retries.
        organization: Optional[str]
            OpenAI organization ID. If not passed in will be read from env
            var OPENAI_ORG_ID.
        model: Optional[str]
            The name of the underlying OpenAI model. Used for tracing and token
            counting. Does not affect completion. E.g. "gpt-4", "gpt-35-turbo", etc.
        model_version: Optional[str]
            The version of the underlying OpenAI model. Used for tracing and token
            counting. Does not affect completion. E.g., "0125", "0125-preview", etc.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:
        .. code-block:: python

            from langchain_openai import AzureChatOpenAI

            llm = AzureChatOpenAI(
                azure_deployment="your-deployment",
                api_version="2024-05-01-preview",
                temperature=0,
                max_tokens=None,
                timeout=None,
                max_retries=2,
                # organization="...",
                # model="gpt-35-turbo",
                # model_version="0125",
                # other params...
            )

    **NOTE**: Any param which is not explicitly supported will be passed directly to the
    ``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is
    invoked. For example:
        .. code-block:: python

            from langchain_openai import AzureChatOpenAI
            import openai

            AzureChatOpenAI(..., logprobs=True).invoke(...)

            # results in underlying API call of:

            openai.AzureOpenAI(..).chat.completions.create(..., logprobs=True)

            # which is also equivalent to:

            AzureChatOpenAI(...).invoke(..., logprobs=True)

    Invoke:
        .. code-block:: python

            messages = [
                (
                    "system",
                    "You are a helpful translator. Translate the user sentence to French.",
                ),
                ("human", "I love programming."),
            ]
            llm.invoke(messages)

        .. code-block:: python

            AIMessage(
                content="J'adore programmer.",
                usage_metadata={"input_tokens": 28, "output_tokens": 6, "total_tokens": 34},
                response_metadata={
                    "token_usage": {
                        "completion_tokens": 6,
                        "prompt_tokens": 28,
                        "total_tokens": 34,
                    },
                    "model_name": "gpt-4",
                    "system_fingerprint": "fp_7ec89fabc6",
                    "prompt_filter_results": [
                        {
                            "prompt_index": 0,
                            "content_filter_results": {
                                "hate": {"filtered": False, "severity": "safe"},
                                "self_harm": {"filtered": False, "severity": "safe"},
                                "sexual": {"filtered": False, "severity": "safe"},
                                "violence": {"filtered": False, "severity": "safe"},
                            },
                        }
                    ],
                    "finish_reason": "stop",
                    "logprobs": None,
                    "content_filter_results": {
                        "hate": {"filtered": False, "severity": "safe"},
                        "self_harm": {"filtered": False, "severity": "safe"},
                        "sexual": {"filtered": False, "severity": "safe"},
                        "violence": {"filtered": False, "severity": "safe"},
                    },
                },
                id="run-6d7a5282-0de0-4f27-9cc0-82a9db9a3ce9-0",
            )

    Stream:
        .. code-block:: python

            for chunk in llm.stream(messages):
                print(chunk)

        .. code-block:: python

            AIMessageChunk(content="", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content="J", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content="'", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
            AIMessageChunk(
                content="",
                response_metadata={
                    "finish_reason": "stop",
                    "model_name": "gpt-4",
                    "system_fingerprint": "fp_811936bd4f",
                },
                id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f",
            )

        .. code-block:: python

            stream = llm.stream(messages)
            full = next(stream)
            for chunk in stream:
                full += chunk
            full

        .. code-block:: python

            AIMessageChunk(
                content="J'adore la programmation.",
                response_metadata={
                    "finish_reason": "stop",
                    "model_name": "gpt-4",
                    "system_fingerprint": "fp_811936bd4f",
                },
                id="run-ba60e41c-9258-44b8-8f3a-2f10599643b3",
            )

    Async:
        .. code-block:: python

            await llm.ainvoke(messages)

            # stream:
            # async for chunk in (await llm.astream(messages))

            # batch:
            # await llm.abatch([messages])

    Tool calling:
        .. code-block:: python

            from pydantic import BaseModel, Field


            class GetWeather(BaseModel):
                '''Get the current weather in a given location'''

                location: str = Field(
                    ..., description="The city and state, e.g. San Francisco, CA"
                )


            class GetPopulation(BaseModel):
                '''Get the current population in a given location'''

                location: str = Field(
                    ..., description="The city and state, e.g. San Francisco, CA"
                )


            llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
            ai_msg = llm_with_tools.invoke(
                "Which city is hotter today and which is bigger: LA or NY?"
            )
            ai_msg.tool_calls

        .. code-block:: python

            [
                {
                    "name": "GetWeather",
                    "args": {"location": "Los Angeles, CA"},
                    "id": "call_6XswGD5Pqk8Tt5atYr7tfenU",
                },
                {
                    "name": "GetWeather",
                    "args": {"location": "New York, NY"},
                    "id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi",
                },
                {
                    "name": "GetPopulation",
                    "args": {"location": "Los Angeles, CA"},
                    "id": "call_49CFW8zqC9W7mh7hbMLSIrXw",
                },
                {
                    "name": "GetPopulation",
                    "args": {"location": "New York, NY"},
                    "id": "call_6ghfKxV264jEfe1mRIkS3PE7",
                },
            ]

    Structured output:
        .. code-block:: python

            from typing import Optional

            from pydantic import BaseModel, Field


            class Joke(BaseModel):
                '''Joke to tell user.'''

                setup: str = Field(description="The setup of the joke")
                punchline: str = Field(description="The punchline to the joke")
                rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")


            structured_llm = llm.with_structured_output(Joke)
            structured_llm.invoke("Tell me a joke about cats")

        .. code-block:: python

            Joke(
                setup="Why was the cat sitting on the computer?",
                punchline="To keep an eye on the mouse!",
                rating=None,
            )

        See ``AzureChatOpenAI.with_structured_output()`` for more.

    JSON mode:
        .. code-block:: python

            json_llm = llm.bind(response_format={"type": "json_object"})
            ai_msg = json_llm.invoke(
                "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
            )
            ai_msg.content

        .. code-block:: python

            '\n{\n  "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\n}'

    Image input:
        .. code-block:: python

            import base64
            import httpx
            from langchain_core.messages import HumanMessage

            image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
            image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
            message = HumanMessage(
                content=[
                    {"type": "text", "text": "describe the weather in this image"},
                    {
                        "type": "image_url",
                        "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
                    },
                ]
            )
            ai_msg = llm.invoke([message])
            ai_msg.content

        .. code-block:: python

            "The weather in the image appears to be quite pleasant. The sky is mostly clear"

    Token usage:
        .. code-block:: python

            ai_msg = llm.invoke(messages)
            ai_msg.usage_metadata

        .. code-block:: python

            {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}

    Logprobs:
        .. code-block:: python

            logprobs_llm = llm.bind(logprobs=True)
            ai_msg = logprobs_llm.invoke(messages)
            ai_msg.response_metadata["logprobs"]

        .. code-block:: python

            {
                "content": [
                    {
                        "token": "J",
                        "bytes": [74],
                        "logprob": -4.9617593e-06,
                        "top_logprobs": [],
                    },
                    {
                        "token": "'adore",
                        "bytes": [39, 97, 100, 111, 114, 101],
                        "logprob": -0.25202933,
                        "top_logprobs": [],
                    },
                    {
                        "token": " la",
                        "bytes": [32, 108, 97],
                        "logprob": -0.20141791,
                        "top_logprobs": [],
                    },
                    {
                        "token": " programmation",
                        "bytes": [
                            32,
                            112,
                            114,
                            111,
                            103,
                            114,
                            97,
                            109,
                            109,
                            97,
                            116,
                            105,
                            111,
                            110,
                        ],
                        "logprob": -1.9361265e-07,
                        "top_logprobs": [],
                    },
                    {
                        "token": ".",
                        "bytes": [46],
                        "logprob": -1.2233183e-05,
                        "top_logprobs": [],
                    },
                ]
            }

    Response metadata
        .. code-block:: python

            ai_msg = llm.invoke(messages)
            ai_msg.response_metadata

        .. code-block:: python

            {
                "token_usage": {
                    "completion_tokens": 6,
                    "prompt_tokens": 28,
                    "total_tokens": 34,
                },
                "model_name": "gpt-35-turbo",
                "system_fingerprint": None,
                "prompt_filter_results": [
                    {
                        "prompt_index": 0,
                        "content_filter_results": {
                            "hate": {"filtered": False, "severity": "safe"},
                            "self_harm": {"filtered": False, "severity": "safe"},
                            "sexual": {"filtered": False, "severity": "safe"},
                            "violence": {"filtered": False, "severity": "safe"},
                        },
                    }
                ],
                "finish_reason": "stop",
                "logprobs": None,
                "content_filter_results": {
                    "hate": {"filtered": False, "severity": "safe"},
                    "self_harm": {"filtered": False, "severity": "safe"},
                    "sexual": {"filtered": False, "severity": "safe"},
                    "violence": {"filtered": False, "severity": "safe"},
                },
            }
    ZAZURE_OPENAI_ENDPOINTN)default)default_factoryzOptional[str]azure_endpointazure_deployment)r.   aliaszUnion[str, None]deployment_nameapi_versionZOPENAI_API_VERSION)r2   r/   openai_api_versionapi_keyAZURE_OPENAI_API_KEYZOPENAI_API_KEYzOptional[SecretStr]openai_api_keyAZURE_OPENAI_AD_TOKENazure_ad_tokenzUnion[Callable[[], str], None]azure_ad_token_providerz)Union[Callable[[], Awaitable[str]], None]azure_ad_async_token_provider strmodel_versionZOPENAI_API_TYPEazureopenai_api_typeTr'   validate_base_urlmodel
model_namezOptional[Dict[str, Any]]disabled_paramsz	List[str])r)   c                 C  s   g dS )z*Get the namespace of the langchain object.)Z	langchainZchat_modelsZazure_openair%   clsr%   r%   r&   get_lc_namespace<  s    z AzureChatOpenAI.get_lc_namespacezDict[str, str]c                 C  s
   dddS )Nr7   r9   )r8   r:   r%   selfr%   r%   r&   
lc_secretsA  s    zAzureChatOpenAI.lc_secretsc                 C  s   dS )NTr%   rF   r%   r%   r&   is_lc_serializableH  s    z"AzureChatOpenAI.is_lc_serializableafter)moder   c                 C  s  | j dur| j dk rtdn"| j dur@| j dkr@| jr@td| jdu rf| jr\| jdkr\n
ddi| _| jp~tdp~td| _| j}|r| j	rd	|vrtd
| j
rtd| j| j| j
| jr| j nd| jr| j nd| j| j| j| ji | jpi ddi| jd}| jdur$| j|d< | jsXd| ji}tjf i ||| _| jjj| _| jsd| ji}| jr|| j|d< tjf i ||| _ | j jj| _| S )z?Validate that api key and python package exists in environment.N   zn must be at least 1.zn must be 1 when streaming.zgpt-4oZparallel_tool_callsZOPENAI_ORG_IDZOPENAI_ORGANIZATIONz/openaizAs of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`).a  As of openai>=1.0.0, if `azure_deployment` (or alias `deployment_name`) is specified then `base_url` (or alias `openai_api_base`) should not be. If specifying `azure_deployment`/`deployment_name` then use `azure_endpoint` instead of `base_url`.

For example, you could specify:

azure_endpoint="https://xxx.openai.azure.com/", azure_deployment="my-deployment"

Or you can equivalently specify:

base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"z
User-Agentz%langchain-partner-python-azure-openai)r4   r0   r1   r6   r:   r;   Zorganizationbase_urltimeoutdefault_headersdefault_querymax_retrieshttp_clientr;   )!n
ValueErrorZ	streamingrE   rD   Zopenai_organizationosgetenvopenai_api_baserB   r3   r5   r0   r8   Zget_secret_valuer:   r;   request_timeoutrR   rS   rT   clientrU   openaiZAzureOpenAIZroot_clientZchatZcompletionsZasync_clientZhttp_async_clientr<   ZAsyncAzureOpenAIZroot_async_client)rJ   rZ   Zclient_paramsZsync_specificZasync_specificr%   r%   r&   validate_environmentL  sr    






z$AzureChatOpenAI.validate_environmentzDict[str, Any]c                   s   i d| j it jS )zGet the identifying parameters.r1   )r3   super_identifying_paramsrI   	__class__r%   r&   r`     s
    z#AzureChatOpenAI._identifying_paramsc                 C  s   dS )Nzazure-openai-chatr%   rI   r%   r%   r&   	_llm_type  s    zAzureChatOpenAI._llm_typec                 C  s   | j | jdS )NrA   r5   rd   rI   r%   r%   r&   lc_attributes  s    zAzureChatOpenAI.lc_attributeszOptional[List[str]]r   r   )stopkwargsr)   c                   sr   t  jf d|i|}d|d< | jr^| jrR| j| jvrR| jd | jd |d< qn| j|d< n| jrn| j|d< |S )z,Get the parameters used to invoke the model.rf   r@   Zls_provider-Zls_model_name)r_   _get_ls_paramsrD   r?   lstripr3   )rJ   rf   rg   paramsra   r%   r&   ri     s    
zAzureChatOpenAI._get_ls_paramszUnion[dict, openai.BaseModel]zOptional[Dict]r   )responsegeneration_infor)   c                   s   t  ||}t|ts | }|d D ]}|dd dkr(tdq(d|v r|d }| jrl| d| j }|jpti |_||jd< d|v r|jpi |_|d |jd< t	|j
|d D ]&\}}|jpi |_|d	i |jd	< q|S )
NchoicesZfinish_reasonZcontent_filterzKAzure has not provided the response due to a content filter being triggeredrC   rh   rD   Zprompt_filter_resultsZcontent_filter_results)r_   _create_chat_resultr*   dictZ
model_dumpgetrW   r?   Z
llm_outputzipZgenerationsrm   )rJ   rl   rm   Zchat_resultresrC   Zchat_genZresponse_choicera   r%   r&   ro     s6    



z#AzureChatOpenAI._create_chat_resultZjson_schemaFmethodinclude_rawstrictzOptional[_DictOrPydanticClass]z7Literal['function_calling', 'json_mode', 'json_schema']zOptional[bool]z-Runnable[LanguageModelInput, _DictOrPydantic])schemaru   rv   rw   rg   r)   c                  s   t  j|f|||d|S )a9  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:

                - a JSON Schema,
                - a TypedDict class,
                - or a Pydantic class,
                - an OpenAI function/tool schema.

                If ``schema`` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method: The method for steering model generation, one of:

                - "json_schema":
                    Uses OpenAI's Structured Output API:
                    https://platform.openai.com/docs/guides/structured-outputs
                    Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later
                    models.
                - "function_calling":
                    Uses OpenAI's tool-calling (formerly called function calling)
                    API: https://platform.openai.com/docs/guides/function-calling
                - "json_mode":
                    Uses OpenAI's JSON mode. Note that if using JSON mode then you
                    must include instructions for formatting the output into the
                    desired schema into the model call:
                    https://platform.openai.com/docs/guides/structured-outputs/json-mode

                Learn more about the differences between the methods and which models
                support which methods here:

                - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode
                - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".
            strict:

                - True:
                    Model output is guaranteed to exactly match the schema.
                    The input schema will also be validated according to
                    https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
                - False:
                    Input schema will not be validated and model output will not be
                    validated.
                - None:
                    ``strict`` argument will not be passed to the model.

                If schema is specified via TypedDict or JSON schema, ``strict`` is not
                enabled by default. Pass ``strict=True`` to enable it.

                Note: ``strict`` can only be non-null if ``method`` is
                ``"json_schema"`` or ``"function_calling"``.

            kwargs: Additional keyword args aren't supported.

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.

            | If ``include_raw`` is True, then Runnable outputs a dict with keys:

            - "raw": BaseMessage
            - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
            - "parsing_error": Optional[BaseException]

        .. versionchanged:: 0.1.20

            Added support for TypedDict class ``schema``.

        .. versionchanged:: 0.1.21

            Support for ``strict`` argument added.
            Support for ``method="json_schema"`` added.

        .. versionchanged:: 0.3.0

            ``method`` default changed from "function_calling" to "json_schema".

        .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False, strict=True

            Note, OpenAI has a number of restrictions on what types of schemas can be
            provided if ``strict`` = True. When using Pydantic, our model cannot
            specify any Field metadata (like min/max constraints) and fields cannot
            have default values.

            See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas

            .. code-block:: python

                from typing import Optional

                from langchain_openai import AzureChatOpenAI
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Optional[str] = Field(
                        default=..., description="A justification for the answer."
                    )


                llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
                # )

        .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False, strict=False

            .. code-block:: python

                from typing import Optional

                from langchain_openai import AzureChatOpenAI
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Optional[str] = Field(
                        default=..., description="A justification for the answer."
                    )


                llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, method="function_calling"
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
                # )

        .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True

            .. code-block:: python

                from langchain_openai import AzureChatOpenAI
                from pydantic import BaseModel


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str


                llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, include_raw=True
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
                #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
                #     'parsing_error': None
                # }

        .. dropdown:: Example: schema=TypedDict class, method="json_schema", include_raw=False, strict=False

            .. code-block:: python

                from typing_extensions import Annotated, TypedDict

                from langchain_openai import AzureChatOpenAI


                class AnswerWithJustification(TypedDict):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Annotated[
                        Optional[str], None, "A justification for the answer."
                    ]


                llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
                # }

        .. dropdown:: Example: schema=OpenAI function schema, method="json_schema", include_raw=False

            .. code-block:: python

                from langchain_openai import AzureChatOpenAI

                oai_schema = {
                    'name': 'AnswerWithJustification',
                    'description': 'An answer to the user question along with justification for the answer.',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'answer': {'type': 'string'},
                            'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                        },
                       'required': ['answer']
                   }
               }

                llm = AzureChatOpenAI(
                    azure_deployment="...",
                    model="gpt-4o",
                    temperature=0,
                )
                structured_llm = llm.with_structured_output(oai_schema)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
                # }

        .. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True

            .. code-block::

                from langchain_openai import AzureChatOpenAI
                from pydantic import BaseModel

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = AzureChatOpenAI(
                    azure_deployment="...",
                    model="gpt-4o",
                    temperature=0,
                )
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification,
                    method="json_mode",
                    include_raw=True
                )

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
                #     'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
                #     'parsing_error': None
                # }

        .. dropdown:: Example: schema=None, method="json_mode", include_raw=True

            .. code-block::

                structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
                #     'parsed': {
                #         'answer': 'They are both the same weight.',
                #         'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
                #     },
                #     'parsing_error': None
                # }
        rt   )r_   with_structured_output)rJ   rx   ru   rv   rw   rg   ra   r%   r&   ry     s      >z&AzureChatOpenAI.with_structured_output)N)N)N)"r!   r"   r#   __doc__r   r   r0   r$   r3   r5   r   r8   r:   r;   r<   r?   rA   rB   rD   rE   classmethodrH   propertyrK   rL   r   r^   r`   rc   re   ri   ro   ry   __classcell__r%   r%   ra   r&   r-   3   sl   
   "
	



U  ( r-   )4rz   
__future__r   loggingrX   typingr   r   r   r   r   r   r	   r
   r   r   r]   Zlangchain_core.language_modelsr   Z*langchain_core.language_models.chat_modelsr   Zlangchain_core.messagesr   Zlangchain_core.outputsr   Zlangchain_core.runnablesr   Zlangchain_core.utilsr   r   Zlangchain_core.utils.pydanticr   Zpydanticr   r   r   r   Ztyping_extensionsr   r   Z!langchain_openai.chat_models.baser   	getLoggerr!   loggerr   r>   Z_DictOrPydanticClassZ_DictOrPydanticr   r,   r-   r%   r%   r%   r&   <module>   s,   0
