U
    9%el=                     @   s   d dl Z d dlZddlmZmZmZ ddlmZmZ e rFddl	m
Z
 e r`d dlZddlmZ G dd	 d	e jZeeG d
d deZdS )    N   )add_end_docstringsis_tf_availableis_torch_available   )PIPELINE_INIT_ARGSPipeline)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)$TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMESc                   @   s   e Zd ZdZdZdZdS )
ReturnTyper   r   r   N)__name__
__module____qualname__TENSORSNEW_TEXT	FULL_TEXT r   r   e/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/pipelines/text_generation.pyr      s   r   c                	       sh   e Zd ZdZdZ fddZdddZ fd	d
Z fddZdddZ	dd Z
ejdfddZ  ZS )TextGenerationPipelinea-  
    Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
    specified text prompt.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> generator = pipeline(model="gpt2")
    >>> generator("I can't believe you did such a ", do_sample=False)
    [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]

    >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
    >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
    generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
    text generation parameters in [Text generation strategies](../generation_strategies) and [Text
    generation](text_generation).

    This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"text-generation"`.

    The models that this pipeline can use are models that have been trained with an autoregressive language modeling
    objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models
    on [huggingface.co/models](https://huggingface.co/models?filter=text-generation).
    a  
    In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
    voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
    Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
    and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
    accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
    the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
    begging for his blessing. <eod> </s> <eos>
    c                    s   t  j|| | | jdkr tnt d| jkrd }| jjj	d k	rL| jjj	}|d krh| jj
jdkrh| j}|d k	r| jf d|i| j\}}}| j|| _| j|| _d S )Ntfprefix)ZXLNetLMHeadModelTransfoXLLMHeadModelZTFXLNetLMHeadModelZTFTransfoXLLMHeadModel)super__init__Zcheck_model_type	frameworkr
   r	   Z_preprocess_paramsmodelconfigr   	__class__r   	XL_PREFIX_sanitize_parametersZ_forward_params)selfargskwargsr   preprocess_paramsforward_params_r   r   r   r   E   s    

zTextGenerationPipeline.__init__NFc
                 K   sJ  d|	i}|d k	r||d< |rB| j |d|	| jd}|d jd |
d< |d k	rh|dkr`t| d	||d
< ||
 |
}i }|d k	r|d kr|d k	rtd|d k	rtd|rtjntj}|d k	r|d kr|d k	rtdtj}|d k	r||d< |d k	r||d< |d k	r@| j j	|dd}t
|dkr4td |d |
d< |||fS )Nadd_special_tokensr   Fpaddingr'   return_tensors	input_idsprefix_length>   holezT is not a valid value for `handle_long_generation` parameter expected [None, 'hole']handle_long_generationz;`return_text` is mutually exclusive with `return_full_text`z>`return_full_text` is mutually exclusive with `return_tensors`z9`return_text` is mutually exclusive with `return_tensors`return_typeclean_up_tokenization_spaces)r'   r   zStopping on a multiple token sequence is not yet supported on transformers. The first token of the stop sequence will be used as the stop sequence string in the interim.r   Zeos_token_id)	tokenizerr   shape
ValueErrorupdater   r   r   r   encodelenwarningswarn)r    Zreturn_full_textr*   Zreturn_textr0   r1   r   r/   Zstop_sequencer'   generate_kwargsr#   Zprefix_inputsr$   Zpostprocess_paramsZstop_sequence_idsr   r   r   r   `   sT       


z+TextGenerationPipeline._sanitize_parametersc                    s*   | j jjdkr|ddi t j||S )z.
        Parse arguments and tokenize
        )r   Zadd_space_before_punct_symbolT)r   r   r   r5   r   _parse_and_tokenize)r    r!   r"   r&   r   r   r;      s    z*TextGenerationPipeline._parse_and_tokenizec                    s   t  j|f|S )a	  
        Complete the prompt(s) given as inputs.

        Args:
            args (`str` or `List[str]`):
                One or several prompts (or one list of prompts) to complete.
            return_tensors (`bool`, *optional*, defaults to `False`):
                Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to
                `True`, the decoded text is not returned.
            return_text (`bool`, *optional*, defaults to `True`):
                Whether or not to return the decoded texts in the outputs.
            return_full_text (`bool`, *optional*, defaults to `True`):
                If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if
                *return_text* is set to True.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            prefix (`str`, *optional*):
                Prefix added to prompt.
            handle_long_generation (`str`, *optional*):
                By default, this pipelines does not handle long generation (ones that exceed in one form or the other
                the model maximum length). There is no perfect way to adress this (more info
                :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
                strategies to work around that problem depending on your use case.

                - `None` : default strategy where nothing in particular happens
                - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
                  truncate a lot of the prompt and not suitable when generation exceed the model capacity)

            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./model#generative-models)).

        Return:
            A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination
            of both `generated_text` and `generated_token_ids`):

            - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
            - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
              ids of the generated text.
        )r   __call__)r    Ztext_inputsr"   r&   r   r   r<      s    )zTextGenerationPipeline.__call__ c           
      K   s   | j || d|| jd}||d< |dkr|d jd }d|krH|d }n&|d| jjj| }|d	k rntd
|| | j jkr| j j| }	|	d	krtd|d d d |	 d f |d< d|kr|d d d |	 d f |d< |S )NFr(   prompt_textr.   r+   r,   max_new_tokens
max_lengthr   z0We cannot infer how many new tokens are expectedziWe cannot use `hole` to handle this generation the number of desired tokens exceeds the models max lengthattention_mask)	r2   r   r3   getr   r   r@   r4   Zmodel_max_length)
r    r>   r   r/   r'   r:   inputscur_lenZ
new_tokensZkeep_lengthr   r   r   
preprocess   s0       
z!TextGenerationPipeline.preprocessc                 K   sf  |d }| dd }|jd dkr0d }d }d}n
|jd }|d}|dd}|dkrd|kptd|kot|d jd k	}|s| d	p| jjj|d	< |d	  |7  < d
|kpd|ko|d jd k	}	|	sd|kr|d  |7  < | jjf ||d|}
|
jd }| j	dkr,|
j
||| f|
jdd   }
n.| j	dkrZt
|
||| f|
jdd  }
|
||dS )Nr+   rA   r   r   r>   r-   r?   Zgeneration_configr@   min_new_tokensZ
min_length)r+   rA   ptr   )generated_sequencer+   r>   )rB   r3   popr?   r   r   r@   rF   generater   Zreshaper   )r    Zmodel_inputsr:   r+   rA   Zin_br>   r-   Zhas_max_new_tokensZhas_min_new_tokensrH   Zout_br   r   r   _forward   s<    


""zTextGenerationPipeline._forwardTc                 C   s   |d d }|d }|d }|   }g }|D ]}|tjkrHd|i}	np|tjtjhkr| jj|d|d}
|d krxd}nt| jj|d d|d}|
|d  }|tjkr|| }d|i}	|	|	 q0|S )	NrH   r   r+   r>   Zgenerated_token_idsT)Zskip_special_tokensr1   Zgenerated_text)
numpytolistr   r   r   r   r2   decoder7   append)r    Zmodel_outputsr0   r1   rH   r+   r>   recordssequencerecordtextZprompt_lengthZall_textr   r   r   postprocess  s:    


z"TextGenerationPipeline.postprocess)	NNNNNNNNF)r=   NF)r   r   r   __doc__r   r   r   r;   r<   rE   rK   r   r   rT   __classcell__r   r   r&   r   r      s*   "
         
=
,     
'r   )enumr8   utilsr   r   r   baser   r   Zmodels.auto.modeling_autor	   Z
tensorflowr   Zmodels.auto.modeling_tf_autor
   Enumr   r   r   r   r   r   <module>   s   