U
    9%e!3                     @   s   d dl Z d dlmZmZmZmZ ddlmZmZm	Z	m
Z
 ddlmZmZ e rVd dlZe	 rdd dlZe
eZG dd dZeed	G d
d deZdS )    N)AnyDictListUnion   )add_end_docstringsis_tf_availableis_torch_availablelogging   )PIPELINE_INIT_ARGSPipelinec                   @   s   e Zd ZdZd$eeeeeef  f ej	dddZ
dd Zeeef dd	d
Zd%eedddZedddZdd Zdd Zdd Zdd Zdd Zdd Zdd Zed d! Zed"d# ZdS )&Conversationa  
    Utility class containing a conversation and its history. This class is meant to be used as an input to the
    [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user
    inputs and generated model responses.

    Arguments:
        messages (Union[str, List[Dict[str, str]]], *optional*):
            The initial messages to start the conversation, either a string, or a list of dicts containing "role" and
            "content" keys. If a string is passed, it is interpreted as a single message with the "user" role.
        conversation_id (`uuid.UUID`, *optional*):
            Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
            conversation.

    Usage:

    ```python
    conversation = Conversation("Going to the movies tonight - any suggestions?")
    conversation.add_message({"role": "assistant", "content": "The Big lebowski."})
    conversation.add_message({"role": "user", "content": "Is it good?"})
    ```N)messagesconversation_idc           	      K   s  |st  }|d kr<|dd }|d k	r6d|dg}qRg }nt|trRd|dg}|dd }|dd }|d k	r|d krtd|d k	rg }|d krg }ttt|t|gD ]D}|t|k r|	d|| d |t|k r|	d|| d q|| }|| _ || _
d S )Ntextuserrolecontentgenerated_responsespast_user_inputsz>generated_responses cannot be passed without past_user_inputs!	assistant)uuiduuid4pop
isinstancestr
ValueErrorrangemaxlenappendr   )	selfr   r   Zdeprecated_kwargsr   r   r   Zlegacy_messagesi r%   d/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/pipelines/conversational.py__init__(   s2    

zConversation.__init__c                 C   s&   t |tsdS | j|jkp$| j|jkS )NF)r   r   r   r   )r#   otherr%   r%   r&   __eq__L   s    
zConversation.__eq__)messagec                 C   s@   t | ddhkstd|d dkr0td| j| d S )Nr   r   z6Message should contain only 'role' and 'content' keys!)r   r   systemzBOnly 'user', 'assistant' and 'system' roles are supported for now!)setkeysr   r   r"   r#   r*   r%   r%   r&   add_messageQ   s
    zConversation.add_messageF)r   	overwritec                 C   s   t | dkrp| d d dkrp|rNtd| d d  d| d || d d< qtd| d d  d	| d
 n| jd|d dS )a3  
        Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must
        alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend
        just using `add_message` with role "user" instead.
        r   r   r   z8User input added while unprocessed input was existing: "r   z" was overwritten with: "z".z" new input ignored: "z>". Set `overwrite` to True to overwrite unprocessed user inputr   N)r!   loggerwarningr   r"   )r#   r   r0   r%   r%   r&   add_user_inputX   s    zConversation.add_user_input)responsec                 C   s   | j d|d dS )zr
        This is a legacy method. We recommend just using `add_message` with an appropriate role instead.
        r   r   N)r   r"   )r#   r5   r%   r%   r&   append_responsem   s    zConversation.append_responsec                 C   s   dS )z
        This is a legacy method that no longer has any effect, as the Conversation no longer distinguishes between
        processed and unprocessed user input.
        Nr%   r#   r%   r%   r&   mark_processeds   s    zConversation.mark_processedc                 c   s   | j D ]
}|V  qd S Nr   r.   r%   r%   r&   __iter__z   s    
zConversation.__iter__c                 C   s
   | j | S r9   r:   )r#   itemr%   r%   r&   __getitem__~   s    zConversation.__getitem__c                 C   s   || j |< d S r9   r:   )r#   keyvaluer%   r%   r&   __setitem__   s    zConversation.__setitem__c                 C   s
   t | jS r9   )r!   r   r7   r%   r%   r&   __len__   s    zConversation.__len__c                 C   s:   d| j  d}| jD ] }||d  d|d  d7 }q|S )a  
        Generates a string representation of the conversation.

        Returns:
            `str`:

        Example:
            Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions?
            bot: The Big Lebowski
        zConversation id: 
r   z: r   )r   r   )r#   outputr*   r%   r%   r&   __repr__   s    
zConversation.__repr__c                 c   s&   | j D ]}|d dk|d fV  qd S )Nr   r   r   r:   r.   r%   r%   r&   
iter_texts   s    
zConversation.iter_textsc                 C   s   dd | j D S )Nc                 S   s    g | ]}|d  dkr|d qS )r   r   r   r%   .0r*   r%   r%   r&   
<listcomp>   s      z1Conversation.past_user_inputs.<locals>.<listcomp>r:   r7   r%   r%   r&   r      s    zConversation.past_user_inputsc                 C   s   dd | j D S )Nc                 S   s    g | ]}|d  dkr|d qS )r   r   r   r%   rF   r%   r%   r&   rH      s      z4Conversation.generated_responses.<locals>.<listcomp>r:   r7   r%   r%   r&   r      s    z Conversation.generated_responses)NN)F)__name__
__module____qualname____doc__r   r   r   r   r   UUIDr'   r)   r/   boolr4   r6   r8   r;   r=   r@   rA   rD   rE   propertyr   r   r%   r%   r%   r&   r      s,       $
r   a  
        min_length_for_response (`int`, *optional*, defaults to 32):
            The minimum length (in number of tokens) for a response.
        minimum_tokens (`int`, *optional*, defaults to 10):
            The minimum length of tokens to leave for a response.
    c                       sx   e Zd ZdZ fddZdddZdeeee f d fd	d
Z	dee
eef dddZdddZdddZ  ZS )ConversationalPipelinea  
    Multi-turn conversational pipeline.

    Example:

    ```python
    >>> from transformers import pipeline, Conversation

    >>> chatbot = pipeline(model="microsoft/DialoGPT-medium")
    >>> conversation = Conversation("Going to the movies tonight - any suggestions?")
    >>> conversation = chatbot(conversation)
    >>> conversation.generated_responses[-1]
    'The Big Lebowski'

    >>> conversation.add_user_input("Is it an action movie?")
    >>> conversation = chatbot(conversation)
    >>> conversation.generated_responses[-1]
    "It's a comedy."
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"conversational"`.

    The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
    currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=conversational).
    c                    s*   t  j|| | jjd kr&| jj| j_d S r9   )superr'   	tokenizerZpad_token_idZ	eos_tokenZ	pad_token)r#   argskwargs	__class__r%   r&   r'      s    zConversationalPipeline.__init__Nc                 K   sh   i }i }i }|d k	r||d< |d k	r,||d< d|kr@|d |d< |d k	rP||d< |r^| | |||fS )Nmin_length_for_responseminimum_tokens
max_lengthclean_up_tokenization_spaces)update)r#   rW   rX   rZ   generate_kwargsZpreprocess_paramsZforward_paramsZpostprocess_paramsr%   r%   r&   _sanitize_parameters   s    
z+ConversationalPipeline._sanitize_parametersr   )conversationsc                    s:   t  j|fd|i|}t|tr6t|dkr6|d S |S )a,  
        Generate responses for the conversation(s) given as inputs.

        Args:
            conversations (a [`Conversation`] or a list of [`Conversation`]):
                Conversations to generate responses for.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./model#generative-models)).

        Returns:
            [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those
            containing a new user input.
        num_workersr   r   )rQ   __call__r   listr!   )r#   r^   r_   rT   outputsrU   r%   r&   r`      s    zConversationalPipeline.__call__    )conversationreturnc                 C   sD   | j |}| jdkr$t|g}n| jdkr:t|g}||dS )Npttf)	input_idsrd   )rR   Zapply_chat_templateZ	frameworktorchZ
LongTensorrg   Zconstant)r#   rd   rW   rh   r%   r%   r&   
preprocess  s    

z!ConversationalPipeline.preprocess
   c           
      K   s   | d| jjj}|d jd }|| |k rtd| d||  d || }|d d d | d f |d< d|kr|d d d | d f |d< |d}||d< | jjf ||}| jjj	rd}	n|}	|d d |	d f |d	S )
NrY   rh   r   z Conversation input is too long (z), trimming it to z> tokens. Consider increasing `max_length` to avoid truncation.Zattention_maskrd   )
output_idsrd   )
getmodelconfigrY   shaper2   r3   r   generateZis_encoder_decoder)
r#   Zmodel_inputsrX   r\   rY   nZtrimrd   rl   Zstart_positionr%   r%   r&   _forward  s"    

zConversationalPipeline._forwardTc                 C   s:   |d }| j j|d d|d}|d }|d|d |S )Nrl   r   T)Zskip_special_tokensrZ   rd   r   r   )rR   decoder/   )r#   Zmodel_outputsrZ   rl   Zanswerrd   r%   r%   r&   postprocess&  s    z"ConversationalPipeline.postprocess)NNN)r   )rc   )rk   )T)rI   rJ   rK   rL   r'   r]   r   r   r   r`   r   r   r   rj   rs   ru   __classcell__r%   r%   rU   r&   rP      s   
     
 	
rP   )r   typingr   r   r   r   utilsr   r   r	   r
   baser   r   Z
tensorflowrg   ri   Z
get_loggerrI   r2   r   rP   r%   r%   r%   r&   <module>   s   
 	