a
    _gA                    @  s  d dl mZ d dlZd dlmZmZmZmZ d dlm	Z	 d dlm
Z
mZ d dlZddlmZ dd	lmZmZmZmZmZmZ dd
lmZmZmZmZmZ ddlmZmZmZm Z  ddl!m"Z" ddl#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z*m+Z+ ddl,m-Z-m.Z. ddl/m0Z0m1Z1 ddl2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8 ddl9m:Z: ddl;m<Z<m=Z=m>Z>m?Z? ddl@mAZA ddlBmCZC ddlDmEZE ddlFmGZG ddlHmIZI ddlJmKZK ddlLmMZM ddgZNG dd de$ZOG dd de%ZPG d d! d!ZQG d"d# d#ZRG d$d% d%ZSG d&d' d'ZTdS )(    )annotationsN)ListUnionIterableOptional)partial)Literaloverload   )_legacy_response   )Steps
AsyncStepsStepsWithRawResponseAsyncStepsWithRawResponseStepsWithStreamingResponseAsyncStepsWithStreamingResponse)	NOT_GIVENBodyQueryHeadersNotGiven)is_givenrequired_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)SyncCursorPageAsyncCursorPage)AsyncPaginatormake_request_options)AssistantEventHandlerAssistantEventHandlerTAssistantStreamManagerAsyncAssistantEventHandlerAsyncAssistantEventHandlerTAsyncAssistantStreamManager)	ChatModel)run_list_paramsrun_create_paramsrun_update_paramsrun_submit_tool_outputs_params)Run)Metadata)AssistantToolParam)AssistantStreamEvent)RunStepInclude)AssistantToolChoiceOptionParam)"AssistantResponseFormatOptionParamRuns	AsyncRunsc                   @  sL  e Zd ZeddddZeddddZeddd	d
Zeeeeeeeeeeeeeeeeeedddeddddddddddddddddddddddddd d!d"d#Z	eeeeeeeeeeeeeeeeeddded$ddd%ddddddddddddddddddddd&d'd(d#Z	eeeeeeeeeeeeeeeeeddded$ddd)ddddddddddddddddddddd*d'd+d#Z	e
d,gd,d-geeeeeeeeeeeeeeeeedddeddddddddddddddd.dddddddddd*d!d/d#Z	ddded0ddddddd d1d2d3Zeddded4dddddddd d5d6d7Zeeeeddded8dd9d9d:d;ddddd<d=
d>d?Zddded0ddddddd d1d@dAZeeeeeeeeeeeeeeeeedddedBdddddddddddddddddd:dddddd dCdDdEZeedFeeeeeeeeeeeeeeedddedGddddddddddddddddddddddHdIdJdKZeedFeeeeeeeeeeeeeeedddedGddddddddddddddddddLdddddMdNdOdKZedFeeeeeeeeeeeeeeeddddedPddddddddddddddddddQdddddRdNdSdKZdddeefddddddd:d dTdUdVZeeeeeeeeeeeeeeeeeddded$dddddddddddddddddddddddHdWdXdYZeeeeeeeeeeeeeeeeeddded$dddddddddddddddddddLdddddMdZd[dYZeeeeeeeeeeeeeeeedddded\dddddddddddddddddddQdddddRdZd]dYZeeddded^ddd_dddddd d`	dadbZeddded0ddd%d_ddddd&dc	dddbZeddded0ddd)d_ddddd*dc	dedbZe
dfdggg dheddded^ddd_d.ddddd*d`	didbZedddedjd_ddd:ddddd dk	dldmZeddded0d_dddddddHdndodpZeddded0d_dddLdddddMdq	drdpZddddedsd_dddQdddddRdq	dtdpZdS )ur9   r   returnc                 C  s
   t | jS N)r   _clientself rA   u/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/openai/resources/beta/threads/runs/runs.pysteps=   s    z
Runs.stepsRunsWithRawResponsec                 C  s   t | S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )rD   r?   rA   rA   rB   with_raw_responseA   s    zRuns.with_raw_responseRunsWithStreamingResponsec                 C  s   t | S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )rG   r?   rA   rA   rB   with_streaming_responseK   s    zRuns.with_streaming_responseNincludeadditional_instructionsadditional_messagesinstructionsmax_completion_tokensmax_prompt_tokensmetadatamodelparallel_tool_callsreasoning_effortresponse_formatstreamtemperaturetool_choicetoolstop_ptruncation_strategyextra_headersextra_query
extra_bodytimeoutstrList[RunStepInclude] | NotGivenOptional[str] | NotGivenBOptional[Iterable[run_create_params.AdditionalMessage]] | NotGivenOptional[int] | NotGivenOptional[Metadata] | NotGiven&Union[str, ChatModel, None] | NotGivenbool | NotGiven5Optional[Literal['low', 'medium', 'high']] | NotGiven7Optional[AssistantResponseFormatOptionParam] | NotGiven#Optional[Literal[False]] | NotGivenOptional[float] | NotGiven3Optional[AssistantToolChoiceOptionParam] | NotGiven1Optional[Iterable[AssistantToolParam]] | NotGiven9Optional[run_create_params.TruncationStrategy] | NotGivenHeaders | NoneQuery | NoneBody | None'float | httpx.Timeout | None | NotGivenr2   	thread_idassistant_idrK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r<   c                C  s   dS a  
        Create a run.

        Args:
          assistant_id: The ID of the
              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
              execute this run.

          include: A list of additional fields to include in the response. Currently the only
              supported value is `step_details.tool_calls[*].file_search.results[*].content`
              to fetch the file search result content.

              See the
              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
              for more information.

          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
              is useful for modifying the behavior on a per-run basis without overriding other
              instructions.

          additional_messages: Adds additional messages to the thread before creating the run.

          instructions: Overrides the
              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
              of the assistant. This is useful for modifying the behavior on a per-run basis.

          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
              run. The run will make a best effort to use only the number of completion tokens
              specified, across multiple turns of the run. If the run exceeds the number of
              completion tokens specified, the run will end with status `incomplete`. See
              `incomplete_details` for more info.

          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
              The run will make a best effort to use only the number of prompt tokens
              specified, across multiple turns of the run. If the run exceeds the number of
              prompt tokens specified, the run will end with status `incomplete`. See
              `incomplete_details` for more info.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format, and
              querying for objects via API or the dashboard.

              Keys are strings with a maximum length of 64 characters. Values are strings with
              a maximum length of 512 characters.

          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
              be used to execute this run. If a value is provided here, it will override the
              model associated with the assistant. If not, the model associated with the
              assistant will be used.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
              during tool use.

          reasoning_effort: **o1 and o3-mini models only**

              Constrains effort on reasoning for
              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
              result in faster responses and fewer tokens used on reasoning in a response.

          response_format: Specifies the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          stream: If `true`, returns a stream of events that happen during the Run as server-sent
              events, terminating when the Run enters a terminal state with a `data: [DONE]`
              message.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tools and instead generates a message. `auto` is the default value
              and means the model can pick between generating a message or calling one or more
              tools. `required` means the model must call one or more tools before responding
              to the user. Specifying a particular tool like `{"type": "file_search"}` or
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

          tools: Override the tools the assistant can use for this run. This is useful for
              modifying the behavior on a per-run basis.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or temperature but not both.

          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
              control the intial context window of the run.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrA   r@   rt   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   rA   rA   rB   createT   s     zRuns.createrK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   Literal[True]zStream[AssistantStreamEvent]rt   ru   rV   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r<   c                C  s   dS a  
        Create a run.

        Args:
          assistant_id: The ID of the
              [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
              execute this run.

          stream: If `true`, returns a stream of events that happen during the Run as server-sent
              events, terminating when the Run enters a terminal state with a `data: [DONE]`
              message.

          include: A list of additional fields to include in the response. Currently the only
              supported value is `step_details.tool_calls[*].file_search.results[*].content`
              to fetch the file search result content.

              See the
              [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
              for more information.

          additional_instructions: Appends additional instructions at the end of the instructions for the run. This
              is useful for modifying the behavior on a per-run basis without overriding other
              instructions.

          additional_messages: Adds additional messages to the thread before creating the run.

          instructions: Overrides the
              [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
              of the assistant. This is useful for modifying the behavior on a per-run basis.

          max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
              run. The run will make a best effort to use only the number of completion tokens
              specified, across multiple turns of the run. If the run exceeds the number of
              completion tokens specified, the run will end with status `incomplete`. See
              `incomplete_details` for more info.

          max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
              The run will make a best effort to use only the number of prompt tokens
              specified, across multiple turns of the run. If the run exceeds the number of
              prompt tokens specified, the run will end with status `incomplete`. See
              `incomplete_details` for more info.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format, and
              querying for objects via API or the dashboard.

              Keys are strings with a maximum length of 64 characters. Values are strings with
              a maximum length of 512 characters.

          model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
              be used to execute this run. If a value is provided here, it will override the
              model associated with the assistant. If not, the model associated with the
              assistant will be used.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
              during tool use.

          reasoning_effort: **o1 and o3-mini models only**

              Constrains effort on reasoning for
              [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
              result in faster responses and fewer tokens used on reasoning in a response.

          response_format: Specifies the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tools and instead generates a message. `auto` is the default value
              and means the model can pick between generating a message or calling one or more
              tools. `required` means the model must call one or more tools before responding
              to the user. Specifying a particular tool like `{"type": "file_search"}` or
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

          tools: Override the tools the assistant can use for this run. This is useful for
              modifying the behavior on a per-run basis.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or temperature but not both.

          truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
              control the intial context window of the run.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrA   r@   rt   ru   rV   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   rA   rA   rB   rx      s     boolz"Run | Stream[AssistantStreamEvent]c                C  s   dS r|   rA   r}   rA   rA   rB   rx     s     ru   rV   3Optional[Literal[False]] | Literal[True] | NotGivenc                C  s   |st d|ddi|pi }| jd| dt|||||||	|
|||||||||dtjt||||td|itjdt|pd	tt d
S N8Expected a non-empty value for `thread_id` but received OpenAI-Betaassistants=v2	/threads//runs)ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   rK   r\   r]   r^   r_   queryFbodyoptionscast_torV   Z
stream_cls)	
ValueError_postr   r/   RunCreateParamsr&   r2   r!   r5   rw   rA   rA   rB   rx     sJ    
ߩr\   r]   r^   r_   run_idrt   r\   r]   r^   r_   r<   c                C  s\   |st d||s$t d|ddi|p0i }| jd| d| t||||dtdS )	D  
        Retrieves a run.

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   5Expected a non-empty value for `run_id` but received r   r   r   /runs/r   r   r   r   _getr&   r2   r@   r   rt   r\   r]   r^   r_   rA   rA   rB   retrieve[  s    zRuns.retrieverQ   r\   r]   r^   r_   r   rt   rQ   r\   r]   r^   r_   r<   c             	   C  sj   |st d||s$t d|ddi|p0i }| jd| d| td|itjt||||dtd	S )
  
        Modifies a run.

        Args:
          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format, and
              querying for objects via API or the dashboard.

              Keys are strings with a maximum length of 64 characters. Values are strings with
              a maximum length of 512 characters.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   r   r   r   r   r   rQ   r   r   r   r   )r   r   r   r0   RunUpdateParamsr&   r2   r@   r   rt   rQ   r\   r]   r^   r_   rA   rA   rB   update  s     zRuns.updateafterbeforelimitorderr\   r]   r^   r_   str | NotGivenint | NotGiven!Literal['asc', 'desc'] | NotGivenzSyncCursorPage[Run]
rt   r   r   r   r   r\   r]   r^   r_   r<   c          
      C  s`   |st d|ddi|pi }| jd| dtt t||||	t||||dtjdtdS 	a!  
        Returns a list of runs belonging to a thread.

        Args:
          after: A cursor for use in pagination. `after` is an object ID that defines your place
              in the list. For instance, if you make a list request and receive 100 objects,
              ending with obj_foo, your subsequent call can include after=obj_foo in order to
              fetch the next page of the list.

          before: A cursor for use in pagination. `before` is an object ID that defines your place
              in the list. For instance, if you make a list request and receive 100 objects,
              starting with obj_foo, your subsequent call can include before=obj_foo in order
              to fetch the previous page of the list.

          limit: A limit on the number of objects to be returned. Limit can range between 1 and
              100, and the default is 20.

          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
              order and `desc` for descending order.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   r   r   r   r   )r   r   r   r   r   )pager   rR   )r   _get_api_listr#   r2   r&   r   r.   RunListParams
r@   rt   r   r   r   r   r\   r]   r^   r_   rA   rA   rB   list  s,    +
z	Runs.listc                C  s^   |st d||s$t d|ddi|p0i }| jd| d| dt||||dtd	S )
X  
        Cancels a run that is `in_progress`.

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   r   r   r   r   r   /cancelr   r   r   r   r&   r2   r   rA   rA   rB   cancel  s    zRuns.cancelrK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   poll_interval_msr\   r]   r^   r_   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r   rt   r\   r]   r^   r_   r<   c                C  sR   | j ||||||||||	||||
|d|||||||d}| j|j||||||dS )
        A helper to create a run an poll for a terminal state. More information on Run
        lifecycles can be found here:
        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
        Frt   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rU   rW   rX   rS   rT   rV   rY   r[   rZ   r\   r]   r^   r_   rt   r\   r]   r^   r   r_   rx   pollidr@   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r   rt   r\   r]   r^   r_   runrA   rA   rB   create_and_poll  sD    "zRuns.create_and_polluse `stream` insteadrL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   z-AssistantStreamManager[AssistantEventHandler]ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   r<   c                C  s   dS Create a Run streamNrA   r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   rA   rA   rB   create_and_stream\  s    zRuns.create_and_streamr(   z.AssistantStreamManager[AssistantEventHandlerT])ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   event_handlerr\   r]   r^   r_   r<   c                C  s   dS r   rA   )r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   rA   rA   rB   r   |  s    )rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r   r\   r]   r^   r_   zAssistantEventHandlerT | Nonez^AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]c                C  s   |st d|dd|rdndd|p*i }t| jd| dt|||||||||||d	|||	|
|d
tjt||||dtd	tt	 d}t
||pt dS )r   r   r   threads.runs.create_and_streamtruefalser   zX-Stainless-Stream-Helperz X-Stainless-Custom-Event-Handlerr   r   T)ru   rL   rM   rN   rO   rP   rQ   rR   rU   rW   rX   rV   rY   r[   rS   rT   rZ   r   r   r   r   r   r   r   r/   r   r&   r2   r!   r5   r)   r'   )r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   make_requestrA   rA   rB   r     sP    

 r   rt   r\   r]   r^   r_   r   r<   c                 C  s   ddi|pi }t |r$t||d< h d}| jj||||||d}	|	 }
|
j|v rZ|
S t |s|	jd}|durt|}nd}| 	|d  q,dS )	
        A helper to poll a run status until it reaches a terminal state. More
        information on Run lifecycles can be found here:
        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
        X-Stainless-Poll-Helperr    X-Stainless-Custom-Poll-Interval>   
incompletefailedexpired	completedrequires_action	cancelledrt   r   r\   r^   r]   r_   openai-poll-after-msN  
r   r`   rF   r   parsestatusheadersgetintZ_sleepr@   r   rt   r\   r]   r^   r_   r   Zterminal_statesresponser   Zfrom_headerrA   rA   rB   r     s*    	

z	Runs.poll)ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   r<   c                C  s   dS r   rA   )r@   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   rA   rA   rB   rV     s    zRuns.streamru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   r<   c                C  s   dS r   rA   r@   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   rA   rA   rB   rV   3  s    rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   r   r\   r]   r^   r_   c                C  s   |st d|dd|rdndd|p*i }t| jd| dt||||||||	|||d	||
|||d
tjt||||td|itjdtd	tt	 d}t
||pt dS r   r   r   r   r   r   r   r   r   T)ru   rL   rM   rN   rO   rP   rQ   rR   rU   rW   rX   rV   rY   rS   rT   r[   rZ   rK   r   r   r   r   )r@   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   r   rA   rA   rB   rV   T  sX    

$rV   r\   r]   r^   r_   3Iterable[run_submit_tool_outputs_params.ToolOutput]	r   rt   tool_outputsrV   r\   r]   r^   r_   r<   c          	      C  s   dS am  
        When a run has the `status: "requires_action"` and `required_action.type` is
        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
        tool calls once they're all completed. All outputs must be submitted in a single
        request.

        Args:
          tool_outputs: A list of tools for which the outputs are being submitted.

          stream: If `true`, returns a stream of events that happen during the Run as server-sent
              events, terminating when the Run enters a terminal state with a `data: [DONE]`
              message.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrA   	r@   r   rt   r   rV   r\   r]   r^   r_   rA   rA   rB   submit_tool_outputs  s    $zRuns.submit_tool_outputs	r   rt   rV   r   r\   r]   r^   r_   r<   c          	      C  s   dS am  
        When a run has the `status: "requires_action"` and `required_action.type` is
        `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
        tool calls once they're all completed. All outputs must be submitted in a single
        request.

        Args:
          stream: If `true`, returns a stream of events that happen during the Run as server-sent
              events, terminating when the Run enters a terminal state with a `data: [DONE]`
              message.

          tool_outputs: A list of tools for which the outputs are being submitted.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrA   	r@   r   rt   rV   r   r\   r]   r^   r_   rA   rA   rB   r     s    $c          	      C  s   dS r   rA   r   rA   rA   rB   r     s    $rt   r   rt   rV   r   c          	   	   C  sz   |st d||s$t d|ddi|p0i }| jd| d| dt||dtjt||||d	t|pnd
tt dS Nr   r   r   r   r   r   /submit_tool_outputsr   rV   r   Fr   )	r   r   r   r1   RunSubmitToolOutputsParamsr&   r2   r!   r5   r   rA   rA   rB   r     s(    r   r\   r]   r^   r_   	r   r   rt   r   r\   r]   r^   r_   r<   c          
   
   C  s4   | j |||d||||d}	| j|	j||||||dS )
        A helper to submit a tool output to a run and poll for a terminal run state.
        More information on Run lifecycles can be found here:
        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
        Fr   rt   r   rV   r\   r]   r^   r_   r   rt   r\   r]   r^   r_   r   r   r   r   
r@   r   r   rt   r   r\   r]   r^   r_   r   rA   rA   rB   submit_tool_outputs_and_poll8  s&    
z!Runs.submit_tool_outputs_and_pollr   r   rt   r\   r]   r^   r_   r<   c                C  s   dS 
        Submit the tool outputs from a previous run and stream the run to a terminal
        state. More information on Run lifecycles can be found here:
        https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
        NrA   r@   r   r   rt   r\   r]   r^   r_   rA   rA   rB   submit_tool_outputs_stream_  s    zRuns.submit_tool_outputs_stream	r   r   rt   r   r\   r]   r^   r_   r<   c          	      C  s   dS r   rA   	r@   r   r   rt   r   r\   r]   r^   r_   rA   rA   rB   r  t  s    r   r\   r]   r^   r_   c          
   
   C  s   |st d||s$t d|dd|r0dndd|p<i }t| jd| d	| d
t|ddtjt||||dtdtt	 d}	t
|	|pt dS r  r   r   r   z'threads.runs.submit_tool_outputs_streamr   r   r   r   r   r   Tr   r   r   r   )r   r   r   r   r1   r   r&   r2   r!   r5   r)   r'   
r@   r   r   rt   r   r\   r]   r^   r_   requestrA   rA   rB   r    s6    
__name__
__module____qualname__r   rC   rF   rI   r	   r   rx   r   r   r   r   r   r   typing_extensions
deprecatedr   r   rV   r   r   r  rA   rA   rA   rB   r9   <   s  	B B B BK* 2$J)@F@B@L-@B$@M$%
$%
$%$*"'	"
$c                   @  s<  e Zd ZeddddZeddddZeddd	d
Zeeeeeeeeeeeeeeeeeedddeddddddddddddddddddddddddd d!d"d#Z	eeeeeeeeeeeeeeeeeddded$ddd%ddddddddddddddddddddd&d'd(d#Z	eeeeeeeeeeeeeeeeeddded$ddd)ddddddddddddddddddddd*d'd+d#Z	e
d,gd,d-geeeeeeeeeeeeeeeeedddeddddddddddddddd.dddddddddd*d!d/d#Z	ddded0ddddddd d1d2d3Zeddded4dddddddd d5d6d7Zeeeeddded8dd9d9d:d;ddddd<d=
d>d?Zddded0ddddddd d1d@dAZeeeeeeeeeeeeeeeeedddedBdddddddddddddddddd:dddddd dCdDdEZeedFeeeeeeeeeeeeeedddedGdddddddddddddddddddddHdIdJdKZeedFeeeeeeeeeeeeeedddedGdddddddddddddddddLdddddMdNdOdKZedFeeeeeeeeeeeeeeddddedPdddddddddddddddddQdddddRdNdSdKZdddeefddddddd:d dTdUdVZeeeeeeeeeeeeeeeedddedWddddddddddddddddddddddHdXdYdZZeeeeeeeeeeeeeeeeeddded$dddddddddddddddddddLdddddMd[d\dZZeeeeeeeeeeeeeeeedddded]dddddddddddddddddddQdddddRd[d^dZZeeddded_ddd`dddddd da	dbdcZeddded0ddd%d`ddddd&dd	dedcZeddded0ddd)d`ddddd*dd	dfdcZe
dgdhgg dieddded_ddd`d.ddddd*da	djdcZedddedkd`ddd:ddddd dl	dmdnZeddded0d`dddddddHdodpdqZeddded0d`dddLdddddMdr	dsdqZddddedtd`dddQdddddRdr	dudqZdS )vr:   r   r;   c                 C  s
   t | jS r=   )r   r>   r?   rA   rA   rB   rC     s    zAsyncRuns.stepsAsyncRunsWithRawResponsec                 C  s   t | S rE   )r  r?   rA   rA   rB   rF     s    zAsyncRuns.with_raw_responseAsyncRunsWithStreamingResponsec                 C  s   t | S rH   )r  r?   rA   rA   rB   rI     s    z!AsyncRuns.with_streaming_responseNrJ   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   r2   rs   c                  s   dS rv   rA   rw   rA   rA   rB   rx     s     zAsyncRuns.createry   rz   z!AsyncStream[AssistantStreamEvent]r{   c                  s   dS r|   rA   r}   rA   rA   rB   rx   k  s     r~   z'Run | AsyncStream[AssistantStreamEvent]c                  s   dS r|   rA   r}   rA   rA   rB   rx     s     ru   rV   r   c                  s   |st d|ddi|pi }| jd| dt|||||||	|
|||||||||dtjI d H t||||td|itjI d H dt|pd	tt d
I d H S r   )	r   r   r   r/   r   r&   r2   r"   r5   rw   rA   rA   rB   rx     sJ    
r   r   c                  sb   |st d||s$t d|ddi|p0i }| jd| d| t||||dtdI d	H S )
r   r   r   r   r   r   r   r   r   Nr   r   rA   rA   rB   r     s    zAsyncRuns.retriever   r   c             	     sv   |st d||s$t d|ddi|p0i }| jd| d| td|itjI dH t||||d	td
I dH S )r   r   r   r   r   r   r   rQ   Nr   r   )r   r   r   r0   r   r&   r2   r   rA   rA   rB   r     s     zAsyncRuns.updater   r   r   r   z)AsyncPaginator[Run, AsyncCursorPage[Run]]r   c          
      C  s`   |st d|ddi|pi }| jd| dtt t||||	t||||dtjdtdS r   )r   r   r$   r2   r&   r   r.   r   r   rA   rA   rB   r   /  s,    +
zAsyncRuns.listc                  sd   |st d||s$t d|ddi|p0i }| jd| d| dt||||dtd	I d
H S )r   r   r   r   r   r   r   r   r   r   Nr   r   rA   rA   rB   r   r  s    zAsyncRuns.cancelr   r   c                  s^   | j ||||||||||	||||
|d|||||||dI dH }| j|j||||||dI dH S )r   Fr   Nr   r   r   rA   rA   rB   r     sD    "zAsyncRuns.create_and_pollr   )rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   z7AsyncAssistantStreamManager[AsyncAssistantEventHandler])ru   rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   r<   c                C  s   dS r   rA   )r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   rt   r\   r]   r^   r_   rA   rA   rB   r     s    zAsyncRuns.create_and_streamr+   z8AsyncAssistantStreamManager[AsyncAssistantEventHandlerT])ru   rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   r<   c                C  s   dS r   rA   )r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   rA   rA   rB   r     s    )rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   r   r\   r]   r^   r_   z"AsyncAssistantEventHandlerT | NonezrAsyncAssistantStreamManager[AsyncAssistantEventHandler] | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]c                C  s   |st d|dd|rdndd|p*i }| jd| dt|||||||||
||d	||||	d
tjt||||dtd	tt d}t	||pt
 dS )r   r   r   r   r   r   r   r   r   T)ru   rL   rM   rN   rO   rP   rQ   rR   rU   rW   rX   rV   rY   r[   rZ   rS   r   r   r   r   r   r   r/   r   r&   r2   r"   r5   r,   r*   )r@   ru   rL   rM   rN   rO   rP   rQ   rR   rS   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   r	  rA   rA   rB   r   	  sL     

r   c                   s   ddi|pi }t |r$t||d< h d}| jj||||||dI dH }	|	 }
|
j|v r`|
S t |s|	jd}|durt|}nd}| 	|d I dH  q,dS )	r   r   r   r   >   r   r   r   r   r   r   r   Nr   r   r   r   rA   rA   rB   r   e	  s*    	

zAsyncRuns.pollr   r   c                C  s   dS r   rA   r   rA   rA   rB   rV   	  s    zAsyncRuns.streamr   c                C  s   dS r   rA   r   rA   rA   rB   rV   	  s    r   c                C  s   |st d|dd|rdndd|p*i }| jd| dt||||||||	|||d	||
|||d
tjt||||td|itjdtd	tt d}t	||pt
 dS r   r  )r@   ru   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   r[   rt   r   r\   r]   r^   r_   r	  rA   rA   rB   rV   	  sV    !

#r   r   r   c          	        s   dS r   rA   r   rA   rA   rB   r   !
  s    $zAsyncRuns.submit_tool_outputsr   c          	        s   dS r   rA   r   rA   rA   rB   r   G
  s    $c          	        s   dS r   rA   r   rA   rA   rB   r   m
  s    $rt   r   r   c          	   	     s   |st d||s$t d|ddi|p0i }| jd| d| dt||dtjI d H t||||d	t|ptd
tt dI d H S r   )	r   r   r   r1   r   r&   r2   r"   r5   r   rA   rA   rB   r   
  s(    r   r   c          
   
     s@   | j |||d||||dI dH }	| j|	j||||||dI dH S )r   Fr   Nr   r   r   rA   rA   rB   r   
  s&    
z&AsyncRuns.submit_tool_outputs_and_pollr   c                C  s   dS r   rA   r  rA   rA   rB   r  
  s    z$AsyncRuns.submit_tool_outputs_streamr  c          	      C  s   dS r   rA   r  rA   rA   rB   r  
  s    r  c          
   	   C  s   |st d||s$t d|dd|r0dndd|p<i }| jd| d	| d
t|ddtjt||||dtdtt d}	t	|	|pt
 dS r  )r   r   r   r1   r   r&   r2   r"   r5   r,   r*   r  rA   rA   rB   r  
  s4    
r
  rA   rA   rA   rB   r:     s  	B B B BK* 2$J)@F>@>L->B$@O$%
$%
$%$*"'	"
$c                   @  s.   e Zd ZdddddZedddd	Zd
S )rD   r9   Nonerunsr<   c                 C  s^   || _ t|j| _t|j| _t|j| _t|j| _t|j| _t|j| _d S r=   )	_runsr   Zto_raw_response_wrapperrx   r   r   r   r   r   r@   r  rA   rA   rB   __init__@  s&    zRunsWithRawResponse.__init__r   r;   c                 C  s   t | jjS r=   )r   r  rC   r?   rA   rA   rB   rC   V  s    zRunsWithRawResponse.stepsNr  r  r  r  r   rC   rA   rA   rA   rB   rD   ?  s   rD   c                   @  s.   e Zd ZdddddZedddd	Zd
S )r  r:   r  r  c                 C  s^   || _ t|j| _t|j| _t|j| _t|j| _t|j| _t|j| _d S r=   )	r  r   Zasync_to_raw_response_wrapperrx   r   r   r   r   r   r  rA   rA   rB   r  \  s&    z!AsyncRunsWithRawResponse.__init__r   r;   c                 C  s   t | jjS r=   )r   r  rC   r?   rA   rA   rB   rC   r  s    zAsyncRunsWithRawResponse.stepsNr  rA   rA   rA   rB   r  [  s   r  c                   @  s.   e Zd ZdddddZedddd	Zd
S )rG   r9   r  r  c                 C  sR   || _ t|j| _t|j| _t|j| _t|j| _t|j| _t|j| _d S r=   )r  r   rx   r   r   r   r   r   r  rA   rA   rB   r  x  s&    z"RunsWithStreamingResponse.__init__r   r;   c                 C  s   t | jjS r=   )r   r  rC   r?   rA   rA   rB   rC     s    zRunsWithStreamingResponse.stepsNr  rA   rA   rA   rB   rG   w  s   rG   c                   @  s.   e Zd ZdddddZedddd	Zd
S )r  r:   r  r  c                 C  sR   || _ t|j| _t|j| _t|j| _t|j| _t|j| _t|j| _d S r=   )r  r    rx   r   r   r   r   r   r  rA   rA   rB   r    s&    z'AsyncRunsWithStreamingResponse.__init__r   r;   c                 C  s   t | jjS r=   )r   r  rC   r?   rA   rA   rB   rC     s    z$AsyncRunsWithStreamingResponse.stepsNr  rA   rA   rA   rB   r    s   r  )U
__future__r   r  typingr   r   r   r   	functoolsr   r   r	   Zhttpx r   rC   r   r   r   r   r   r   _typesr   r   r   r   r   _utilsr   r   r   r   Z_compatr   Z	_resourcer   r   	_responser   r    Z
_streamingr!   r"   Z
paginationr#   r$   Z_base_clientr%   r&   Zlib.streamingr'   r(   r)   r*   r+   r,   Ztypes.chat_modelr-   Ztypes.beta.threadsr.   r/   r0   r1   Ztypes.beta.threads.runr2   Ztypes.shared_params.metadatar3   Ztypes.beta.assistant_tool_paramr4   Z!types.beta.assistant_stream_eventr5   Z(types.beta.threads.runs.run_step_includer6   Z-types.beta.assistant_tool_choice_option_paramr7   Z1types.beta.assistant_response_format_option_paramr8   __all__r9   r:   rD   r  rG   r  rA   rA   rA   rB   <module>   sl                           