a
    !fB                     @  s  d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZmZ ddlmZmZmZmZmZ ddlmZmZ dd	lmZmZ dd
lmZmZ ddlmZm Z  ddl!m"Z" erddl#m$Z$m%Z% ddgZ&G dd deZ'G dd deZ(G dd dZ)G dd dZ*dS )    )annotations)TYPE_CHECKINGDictListUnionOptionaloverload)LiteralN   )
Completioncompletion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transform)SyncAPIResourceAsyncAPIResource)to_raw_response_wrapperasync_to_raw_response_wrapper)StreamAsyncStream)make_request_options)OpenAIAsyncOpenAICompletionsAsyncCompletionsc                      s  e Zd ZU ded< ddd fddZeeeeeeeeeeeeeeeeddded	d
ddddddddddddddddddddddddZeeeeeeeeeeeeeeedddedd
ddddddddddddddddddddddd dZeeeeeeeeeeeeeeedddedd
dd!ddddddddddddddddddd"dd#dZed$d%gg d&eeeeeeeeeeeeeeeddded	d
dddddddddddd'ddddddddd"dd(dZ  Z	S ))r   CompletionsWithRawResponsewith_raw_responser   Noneclientreturnc                   s   t  | t| | _d S N)super__init__r   r    selfr#   	__class__ ]/var/www/html/python-backend/venv/lib/python3.9/site-packages/openai/resources/completions.pyr'      s    zCompletions.__init__Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamsuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutUnion[str, Literal['babbage-002', 'davinci-002', 'gpt-3.5-turbo-instruct', 'text-davinci-003', 'text-davinci-002', 'text-davinci-001', 'code-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001']]7Union[str, List[str], List[int], List[List[int]], None]Optional[int] | NotGivenOptional[bool] | NotGivenOptional[float] | NotGiven#Optional[Dict[str, int]] | NotGiven0Union[Optional[str], List[str], None] | NotGiven#Optional[Literal[False]] | NotGivenOptional[str] | NotGivenstr | NotGivenHeaders | NoneQuery | NoneBody | None'float | httpx.Timeout | None | NotGivenr   modelpromptr/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   r$   c                C  s   dS u\  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
              convert text to token IDs. Mathematically, the bias is added to the logits
              generated by the model prior to sampling. The exact effect will vary per model,
              but values between -1 and 1 should decrease or increase likelihood of selection;
              values like -100 or 100 should result in a ban or exclusive selection of the
              relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the
              chosen tokens. For example, if `logprobs` is 5, the API will return a list of
              the 5 most likely tokens. The API will always return the `logprob` of the
              sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) to generate in the completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          suffix: The suffix that comes after a completion of inserted text.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr,   r)   rQ   rR   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   r,   r,   r-   create   s     !zCompletions.creater/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   Literal[True]zStream[Completion]rQ   rR   r9   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   r$   c                C  s   dS u\  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
              convert text to token IDs. Mathematically, the bias is added to the logits
              generated by the model prior to sampling. The exact effect will vary per model,
              but values between -1 and 1 should decrease or increase likelihood of selection;
              values like -100 or 100 should result in a ban or exclusive selection of the
              relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely tokens, as well the
              chosen tokens. For example, if `logprobs` is 5, the API will return a list of
              the 5 most likely tokens. The API will always return the `logprob` of the
              sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) to generate in the completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          suffix: The suffix that comes after a completion of inserted text.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr,   r)   rQ   rR   r9   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   r,   r,   r-   rU      s     !boolzCompletion | Stream[Completion]c                C  s   dS rY   r,   rZ   r,   r,   r-   rU   c  s     !rQ   rR   rQ   rR   r9   3Optional[Literal[False]] | Literal[True] | NotGivenc                C  sV   | j dt|||||||||	|
|||||||dtjt||||dt|pJdtt dS Nz/completions)rQ   rR   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   )r>   r?   r@   rA   F)bodyoptionsZcast_tor9   Z
stream_cls)_postr   r   CompletionCreateParamsr   r   r   rT   r,   r,   r-   rU     s<    *
__name__
__module____qualname____annotations__r'   r   r   rU   r   __classcell__r,   r,   r*   r-   r      s   
> "> "> "c                      s  e Zd ZU ded< ddd fddZeeeeeeeeeeeeeeeeddded	d
ddddddddddddddddddddddddZeeeeeeeeeeeeeeedddedd
ddddddddddddddddddddddd dZeeeeeeeeeeeeeeedddedd
dd!ddddddddddddddddddd"dd#dZed$d%gg d&eeeeeeeeeeeeeeeddded	d
dddddddddddd'ddddddddd"dd(dZ  Z	S ))r   AsyncCompletionsWithRawResponser    r   r!   r"   c                   s   t  | t| | _d S r%   )r&   r'   ri   r    r(   r*   r,   r-   r'   S  s    zAsyncCompletions.__init__Nr.   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   r   rP   c                  s   dS rS   r,   rT   r,   r,   r-   rU   W  s     !zAsyncCompletions.createrV   rW   zAsyncStream[Completion]rX   c                  s   dS rY   r,   rZ   r,   r,   r-   rU     s     !r[   z$Completion | AsyncStream[Completion]c                  s   dS rY   r,   rZ   r,   r,   r-   rU     s     !rQ   rR   r\   r]   c                  s\   | j dt|||||||||	|
|||||||dtjt||||dt|pJdtt dI d H S r^   )ra   r   r   rb   r   r   r   rT   r,   r,   r-   rU   =  s<    *rc   r,   r,   r*   r-   r   P  s   
> "> "> "c                   @  s   e Zd ZdddddZdS )r   r   r!   completionsr$   c                 C  s   t |j| _d S r%   )r   rU   r)   rk   r,   r,   r-   r'     s    z#CompletionsWithRawResponse.__init__Nrd   re   rf   r'   r,   r,   r,   r-   r     s   r   c                   @  s   e Zd ZdddddZdS )ri   r   r!   rj   c                 C  s   t |j| _d S r%   )r   rU   rl   r,   r,   r-   r'     s    z(AsyncCompletionsWithRawResponse.__init__Nrm   r,   r,   r,   r-   ri     s   ri   )+
__future__r   typingr   r   r   r   r   r   Ztyping_extensionsr	   Zhttpxtypesr   r   _typesr   r   r   r   r   _utilsr   r   Z	_resourcer   r   	_responser   r   Z
_streamingr   r   Z_base_clientr   Z_clientr   r   __all__r   r   r   ri   r,   r,   r,   r-   <module>   s2        <    <