a
    bg'8                     @   s   d dl Z d dlZd dlmZmZmZmZmZmZ d dl	Z	d dl
mZmZ d dlmZ d dlmZ d dlmZ eeZG dd deZdS )	    N)AnyAsyncIteratorDictIteratorListOptional)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)LLM)GenerationChunk)Fieldc                   @   s  e Zd ZU dZeed< dZee ed< dZee	 ed< e
ddd	Zeed< d
Zee ed< dZee ed< dZee ed< dZee ed< dZee ed< dZee ed< dZee ed< dZee	 ed< dZee	 ed< dZee	 ed< dZee ed< dZee ed< e
ddd	Zeed< e
ddd	Ze	ed< e
dd d	Zeed < d!Zee	 ed"< e
dd#d	Zeed#< e
dd$d	Zeed$< g Z ee!e  ed%< dZ"eed&< e#e$ee%f d'd(d)Z&e#e$ee%f d'd*d+Z'e#ed'd,d-Z(d:ee!e  e$ee%f d.d/d0Z)d;eee!e  ee* e%ed1d2d3Z+d<eee!e  ee, e%ed1d4d5Z-d=eee!e  ee* e%e.e/ d1d6d7Z0d>eee!e  ee, e%e1e/ d1d8d9Z2dS )?TextGenaz  Text generation models from WebUI.

    To use, you should have the text-generation-webui installed, a model loaded,
    and --api added as a command-line option.

    Suggested installation, use one-click installer for your OS:
    https://github.com/oobabooga/text-generation-webui#one-click-installers

    Parameters below taken from text-generation-webui api example:
    https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py

    Example:
        .. code-block:: python

            from langchain_community.llms import TextGen
            llm = TextGen(model_url="http://localhost:8500")
    	model_urlNpreset   max_new_tokensT	do_sample)aliasg?temperatureg?top_p   	typical_pr   epsilon_cutoff
eta_cutoffgzG?repetition_penalty(   top_k
min_lengthno_repeat_ngram_size	num_beamspenalty_alphalength_penaltyFearly_stoppingseedadd_bos_tokeni   truncation_lengthban_eos_tokenskip_special_tokensstopping_strings	streaming)returnc                 C   sZ   | j | j| j| j| j| j| j| j| j| j	| j
| j| j| j| j| j| j| j| j| j| jdS )z/Get the default parameters for calling textgen.r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r$   r%   r&   r'   r(   r)   r,   self r/   n/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_community/llms/textgen.py_default_paramsv   s,    zTextGen._default_paramsc                 C   s   i d| j i| jS )zGet the identifying parameters.r   )r   r1   r-   r/   r/   r0   _identifying_params   s    zTextGen._identifying_paramsc                 C   s   dS )zReturn type of llm.Ztextgenr/   r-   r/   r/   r0   	_llm_type   s    zTextGen._llm_type)stopr+   c                 C   sH   | j r|durtd| jdu r(| j}n
d| ji}| j p>|p>g |d< |S )a  
        Performs sanity check, preparing parameters in format needed by textgen.

        Args:
            stop (Optional[List[str]]): List of stop sequences for textgen.

        Returns:
            Dictionary containing the combined parameters.
        Nz2`stop` found in both the input and default params.r   r)   )r)   
ValueErrorr   r1   )r.   r4   paramsr/   r/   r0   _get_parameters   s    

zTextGen._get_parameters)promptr4   run_managerkwargsr+   c                 K   s   | j r8d}| jf |||d|D ]}||j7 }q"|}nf| j d}| |}	|	 }
||
d< tj||
d}|jdkr|	 d d d	 }nt
d
|  d}|S )  Call the textgen web API and return the output.

        Args:
            prompt: The prompt to use for generation.
            stop: A list of strings to stop generation when encountered.

        Returns:
            The generated text.

        Example:
            .. code-block:: python

                from langchain_community.llms import TextGen
                llm = TextGen(model_url="http://localhost:5000")
                llm.invoke("Write a story about llamas.")
         r8   r4   r9   /api/v1/generater8   json   resultsr   textERROR: response: )r*   _streamrC   r   r7   copyrequestspoststatus_coder@   printr.   r8   r4   r9   r:   Zcombined_text_outputchunkresulturlr6   requestresponser/   r/   r0   _call   s&    


zTextGen._callc                    s   | j rBd}| jf |||d|2 z3 dH W }||j7 }q"6 |}nf| j d}| |}	|	 }
||
d< tj||
d}|jdkr|	 d d	 d
 }nt
d|  d}|S )r;   r<   r=   Nr>   r8   r?   rA   rB   r   rC   rD   )r*   _astreamrC   r   r7   rF   rG   rH   rI   r@   rJ   rK   r/   r/   r0   _acall   s&    

zTextGen._acallc                 k   s   zddl }W n ty&   tdY n0 i | ||}| j d}| }||d< | }	|	| |	t	| |	
 }
t|
}
|
d dkrt|
d dd	}|r|j|jd
 |V  qx|
d dkrx|	  dS qxdS a  Yields results objects as they are generated in real time.

        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.

        Args:
            prompt: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            A generator representing the stream of tokens being generated.

        Yields:
            A dictionary like objects containing a string token and metadata.
            See text-generation-webui docs and below for more.

        Example:
            .. code-block:: python

                from langchain_community.llms import TextGen
                llm = TextGen(
                    model_url = "ws://localhost:5005"
                    streaming=True
                )
                for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
                        stop=["'","
"]):
                    print(chunk, end='', flush=True)  # noqa: T201

        r   Nz9The `websocket-client` package is required for streaming.z/api/v1/streamr8   eventZtext_streamrC   )rC   Zgeneration_info)tokenZ
stream_end	websocketImportErrorr7   r   rF   Z	WebSocketconnectsendr@   dumpsrecvloadsr   Zon_llm_new_tokenrC   closer.   r8   r4   r9   r:   rX   r6   rN   rO   Zwebsocket_clientrM   rL   r/   r/   r0   rE     s4    $


zTextGen._streamc                 K  s   zddl }W n ty&   tdY n0 i | ||}| j d}| }||d< | }	|	| |	t	| |	
 }
t|
}
|
d dkrt|
d dd	}|r|j|jd
I dH  |V  qx|
d dkrx|	  dS qxdS rT   rW   r`   r/   r/   r0   rR   Y  s4    $


zTextGen._astream)N)NN)NN)NN)NN)3__name__
__module____qualname____doc__str__annotations__r   r   r   intr   r   boolr   floatr   r   r   r   r   r   r   r   r   r    r!   r"   r$   r%   r&   r'   r(   r)   r   r*   propertyr   r   r1   r2   r3   r7   r	   rQ   r   rS   r   r   rE   r   rR   r/   r/   r/   r0   r      s   
"  
1  
1  
K  
r   )r@   loggingtypingr   r   r   r   r   r   rG   Zlangchain_core.callbacksr   r	   Z#langchain_core.language_models.llmsr
   Zlangchain_core.outputsr   Zpydanticr   	getLoggerra   loggerr   r/   r/   r/   r0   <module>   s    
