a
    bgn                     @   sj   d dl Z d dlmZmZmZmZ d dlZd dlmZ d dl	m
Z
 d dlmZ e eZG dd de
ZdS )    N)AnyListMappingOptional)CallbackManagerForLLMRun)LLM)enforce_stop_tokensc                   @   s   e Zd ZU dZdZeed< dZee	 ed< dZ
eed< dZeed	< g Zee ed
< dZeed< dZeed< eedddZeeeef dddZdeeee  ee eedddZdS )ChatGLMa.  ChatGLM LLM service.

    Example:
        .. code-block:: python

            from langchain_community.llms import ChatGLM
            endpoint_url = (
                "http://127.0.0.1:8000"
            )
            ChatGLM_llm = ChatGLM(
                endpoint_url=endpoint_url
            )
    zhttp://127.0.0.1:8000/endpoint_urlNmodel_kwargsi N  	max_tokeng?temperaturehistorygffffff?top_pFwith_history)returnc                 C   s   dS )NZchat_glm )selfr   r   n/var/www/html/cobodadashboardai.evdpl.com/venv/lib/python3.9/site-packages/langchain_community/llms/chatglm.py	_llm_type+   s    zChatGLM._llm_typec                 C   s    | j pi }i d| jid|iS )zGet the identifying parameters.r
   r   )r   r
   )r   _model_kwargsr   r   r   _identifying_params/   s    
zChatGLM._identifying_params)promptstoprun_managerkwargsr   c              
   K   sz  | j pi }ddi}|| j| j| j| jd}|| || td|  ztj	| j
||d}W n6 tjjy }	 ztd|	 W Y d}	~	n
d}	~	0 0 td|  |jd	krtd
| zJ| }
t|
trd}||
v r|
| }ntd|
 ntd|
 W n@ tjjyN }	 z"td|	 d|j W Y d}	~	n
d}	~	0 0 |durdt||}| jrv|
d | _|S )aw  Call out to a ChatGLM LLM inference endpoint.

        Args:
            prompt: The prompt to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            The string generated by the model.

        Example:
            .. code-block:: python

                response = chatglm_llm.invoke("Who are you?")
        zContent-Typezapplication/json)r   r   r   
max_lengthr   zChatGLM payload: )headersjsonz$Error raised by inference endpoint: NzChatGLM response:    zFailed with response: responsezNo content in response : zUnexpected response type: z?Error raised during decoding response from inference endpoint: z.
Response: r   )r   r   r   r   r   updateloggerdebugrequestspostr
   
exceptionsRequestException
ValueErrorstatus_coder   
isinstancedictJSONDecodeErrortextr   r   )r   r   r   r   r   r   r   payloadr    eZparsed_responseZcontent_keysr-   r   r   r   _call8   sJ    


$





zChatGLM._call)NN)__name__
__module____qualname____doc__r
   str__annotations__r   r   r+   r   intr   floatr   r   r   r   boolpropertyr   r   r   r   r   r0   r   r   r   r   r	      s*   
  
r	   )loggingtypingr   r   r   r   r$   Zlangchain_core.callbacksr   Z#langchain_core.language_models.llmsr   Zlangchain_community.llms.utilsr   	getLoggerr1   r"   r	   r   r   r   r   <module>   s   
