U
    9%e,                     @   sF  d dl Z d dlZd dlZd dlmZmZ d dlmZ d dlm	Z	m
Z
mZmZmZmZmZmZmZmZ d dlZd dlmZ ddlmZmZmZmZ dd	lmZmZmZ e	rdd
lm Z  ddl!m"Z" ddl#m$Z$ ddl%m&Z& e rd dl'm(Z( e)e*Z+dZ,dZ-ej.G dd dZ/G dd deZ0G dd de0eZ1G dd de1Z2dS )    N)ABCabstractmethod)OrderedDict)
TYPE_CHECKINGAnyCallableDictIterableListMappingOptionalTupleUnion)version   )
TensorTypeis_torch_availableis_vision_availablelogging   )ParameterFormat compute_effective_axis_dimension"compute_serialized_parameters_size)PretrainedConfigFeatureExtractionMixinImageProcessingMixinPreTrainedTokenizerBase)Image   l        c                   @   sJ   e Zd ZU dZeed< eed< eed< dZe	e ed< dZ
e	e ed< dS )PatchingSpeca  
    Data class that holds patching specifications.

    Args:
        o: Module / object where the op to patch is located
        name: Name of the op to monkey patch
        custom_op: Custom op that patches the original op
        orig_op: Original op that is being patched
        op_wrapper: Wrapper (optional) that wraps both the original and custom ops.
            It is useful for ops that are class or static methods for instance.
    oname	custom_opNorig_op
op_wrapper)__name__
__module____qualname____doc__r   __annotations__strr   r&   r   r'    r.   r.   W/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/onnx/config.pyr"   /   s   
r"   c                   @   s
  e Zd ZdZdZdZdZedZ	e
dddd	ie
d
ddd	ie
dddd	ie
ddd	ddd	ddd	de
dddd	ie
dddd	ie
dddiie
ddd	ddd	de
ddd	ddd	de
ddddddie
dddd	ie
dddiie
dddd	ie
dddd	ie
dddd	idZdPdeee dddZedQded dddZeeeeeeef f ddd Zeeeeeef f dd!d"Zeeeeef  dd#d$Zeedd%d&Zeedd'd(Zeedd)d*Zeedd+d,Zeedd-d.Z ee!dd/d0Z"e#ee!d1d2d3Z$dReeeed6d7d8Z%dSeeeed<d=d>Z&dTe'dA eeee!ee( eeeeeedBeeef dCdDdEZ)eeef eeef dFdGdHZ*dIdJ Z+dKdL Z,eee-e e.eef dMdNdOZ/dS )U
OnnxConfigzv
    Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.
    r         z1.8logitsbatchsequencer   r   Zlast_hidden_state)r3   
pred_boxesZ
pred_masksr   )r3   r7   )Zstart_logitsZ
end_logitsZ
num_labelsheightwidth)r   r   r      decoder_sequence)z	causal-lmdefaultzimage-classificationzimage-segmentationz	masked-imz	masked-lmmultiple-choicezobject-detectionzquestion-answeringzsemantic-segmentationz
seq2seq-lmzsequence-classificationztoken-classificationzvision2seq-lmzspeech2seq-lmr<   Nr   )configtaskpatching_specsc                 C   s   || _ || jkr(t| d| j  || _g | _|d k	r@|ng D ]6}|}|jd krntj|t	|j
|jd}| j| qDd S )Nz+ is not a supported task, supported tasks: )r&   )_config_tasks_to_common_outputs
ValueErrorkeysr?   _patching_specsr&   dataclassesreplacegetattrr#   r$   append)selfr>   r?   r@   specZ
final_specr.   r.   r/   __init__o   s    

zOnnxConfig.__init__r>   r?   returnc                 C   s   | ||dS )z
        Instantiate a OnnxConfig for a specific model

        Args:
            config: The model's configuration to use when exporting to ONNX

        Returns:
            OnnxConfig for this model
        )r?   r.   clsr>   r?   r.   r.   r/   from_model_config   s    zOnnxConfig.from_model_configrN   c                 C   s
   t  dS )z
        Mapping containing the axis definition of the input tensors to provide to the model

        Returns:
            For each input: its name associated to the axes symbolic name and the axis position within the tensor
        N)NotImplementedErrorrJ   r.   r.   r/   inputs   s    	zOnnxConfig.inputsc                 C   s   | j | j }t|S )z
        Mapping containing the axis definition of the output tensors to provide to the model

        Returns:
            For each output: its name associated to the axes symbolic name and the axis position within the tensor
        )rB   r?   copydeepcopyrJ   common_outputsr.   r.   r/   outputs   s    zOnnxConfig.outputsc                 C   s   t | jdrddiS dS )z
        Dictionary of keys to override in the model's config before exporting

        Returns:
            Dictionary with the keys (and their corresponding values) to override
        	use_cacheFN)hasattrrA   rT   r.   r.   r/   values_override   s    zOnnxConfig.values_overridec                 C   s   t jS )zp
        The default batch size to use if no other indication

        Returns:
            Integer > 0
        )r0   default_fixed_batchrT   r.   r.   r/   default_batch_size   s    	zOnnxConfig.default_batch_sizec                 C   s   t jS )zu
        The default sequence length to use if no other indication

        Returns:
            Integer > 0
        )r0   default_fixed_sequencerT   r.   r.   r/   default_sequence_length   s    z"OnnxConfig.default_sequence_lengthc                 C   s   t jS )zw
        The default number of choices to use if no other indication

        Returns:
            Integer > 0
        )r0   default_fixed_num_choicesrT   r.   r.   r/   default_num_choices   s    zOnnxConfig.default_num_choicesc                 C   s   t S )z{
        Which onnx opset to use when exporting the model

        Returns:
            Integer ONNX Opset version
        )DEFAULT_ONNX_OPSETrT   r.   r.   r/   default_onnx_opset   s    zOnnxConfig.default_onnx_opsetc                 C   s   dS )z
        What absolute tolerance value to use during model conversion validation.

        Returns:
            Float absolute tolerance value.
        gh㈵>r.   rT   r.   r.   r/   atol_for_validation   s    zOnnxConfig.atol_for_validationc                 C   s,   t  r$ddlm} t| | jkS dS dS )z
        The minimum PyTorch version required to export the model.

        Returns:
            `bool`: Whether the installed version of PyTorch is compatible with the model.
        r   )get_torch_versionFN)r   Ztransformers.utilsrg   r   parsetorch_onnx_minimum_version)rJ   rg   r.   r.   r/   is_torch_support_available   s    z%OnnxConfig.is_torch_support_available)num_parametersrN   c                 C   s   t | tjtkS )a  
        Flag indicating if the model requires using external data format

        Args:
            num_parameters: Number of parameter on the model

        Returns:
            True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
        )r   r   FloatEXTERNAL_DATA_FORMAT_SIZE_LIMIT)rk   r.   r.   r/   use_external_data_format   s    
z#OnnxConfig.use_external_data_formatr:   (   )
batch_sizenum_channelsimage_heightimage_widthc                 C   sF   g }t |D ]4}tj|||d }|t|dd q|S )N   Zuint8RGB)	rangenprandomZrandrI   r    Z	fromarrayZastypeconvert)rJ   rp   rq   rr   rs   images_datar.   r.   r/   _generate_dummy_images  s
    z!OnnxConfig._generate_dummy_images"V        @   )rp   sampling_ratetime_duration	frequencyc                 C   sR   g }t |D ]@}tjd|t|| dd}|dtdtj | |   q|S )Nr   F)Zendpointg      ?r   )rv   rw   ZlinspaceintrI   sinpi)rJ   rp   r   r   r   Z
audio_datar{   tr.   r.   r/   _generate_dummy_audio  s
    $z OnnxConfig._generate_dummy_audioF)r   r   r   r   )preprocessorrp   
seq_lengthnum_choicesis_pair	frameworkrq   rs   rr   r   r   r   	tokenizerrN   c                    s^  ddl m} ddlm} ddlm} t||r>|dk	r>td|dk	r`t	dt
 td |}t||rVt|tjd	d
}||}t|tj|d
}|jdk	rt|jd	kr|jnd}d|g| g| }| jdkrFt tjd	d
 |  }|||d}| D ].\} fddtd	t D ||< qt|j|dS t|||dS t||r|jd	 dkrtd|jj d|jd	  t|tjd}| |||	|}t|||dS t||r|jd	 dkrt|tjd}| |||	|}t|||dS t||rR|jd	 dkrRt|tjd}| ||
||}t|||dS tddS )am  
        Generate inputs to provide to the ONNX exporter for the specific framework

        Args:
            preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]):
                The preprocessor associated with this model configuration.
            batch_size (`int`, *optional*, defaults to -1):
                The batch size to export the model for (-1 means dynamic axis).
            num_choices (`int`, *optional*, defaults to -1):
                The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
            seq_length (`int`, *optional*, defaults to -1):
                The sequence length to export the model for (-1 means dynamic axis).
            is_pair (`bool`, *optional*, defaults to `False`):
                Indicate if the input is a pair (sentence 1, sentence 2)
            framework (`TensorType`, *optional*, defaults to `None`):
                The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
            num_channels (`int`, *optional*, defaults to 3):
                The number of channels of the generated images.
            image_width (`int`, *optional*, defaults to 40):
                The width of the generated images.
            image_height (`int`, *optional*, defaults to 40):
                The height of the generated images.
            sampling_rate (`int`, *optional* defaults to 22050)
                The sampling rate for audio data generation.
            time_duration (`float`, *optional* defaults to 5.0)
                Total seconds of sampling for audio data generation.
            frequency (`int`, *optional* defaults to 220)
                The desired natural frequency of generated audio.

        Returns:
            Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
        r   r   r   r   NzPYou cannot provide both a tokenizer and a preprocessor to generate dummy inputs.ztThe `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.zSOverwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.r   )fixed_dimensionZnum_token_to_add0 r=   )Z	text_pairc                    s   g | ]}||   qS r.   r.   ).0ir   vr.   r/   
<listcomp>r  s     z4OnnxConfig.generate_dummy_inputs.<locals>.<listcomp>)Ztensor_type)return_tensorsZpixel_valuesz*The `preprocessor` is an image processor (zC) and expects `model_input_names[0]` to be "pixel_values", but got )r   )rz   r   Zinput_featuresz\Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.) feature_extraction_utilsr   image_processing_utilsr   tokenization_utils_baser   
isinstancerC   warningswarnFutureWarningloggerwarningr   r0   r^   Znum_special_tokens_to_addr`   Z	unk_tokenlenjoinr?   rb   itemsrv   dictZconvert_to_tensorsZmodel_input_names	__class__r(   r}   r   )rJ   r   rp   r   r   r   r   rq   rs   rr   r   r   r   r   r   r   r   Ztoken_to_addZinput_tokenZdummy_inputZtokenized_inputkr.   r   r/   generate_dummy_inputs  s    0
  
    (z OnnxConfig.generate_dummy_inputs)reference_model_inputsrN   c                 C   s   |S )a  
        Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq
        models which have the encoder and decoder exported as separate ONNX files.

        Args:
            reference_model_inputs ([`Mapping[str, Tensor]`):
                Reference inputs for the model.

        Returns:
            `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
        r.   )rJ   r   r.   r.   r/   !generate_dummy_inputs_onnxruntime  s    z,OnnxConfig.generate_dummy_inputs_onnxruntimec                 C   s<   | j D ]0}|jd kr|jn
||j}t|j|j| qd S N)rE   r'   r%   setattrr#   r$   )rJ   rK   r%   r.   r.   r/   	patch_ops  s    
zOnnxConfig.patch_opsc                 C   s<   | j D ]0}|jd kr|jn
||j}t|j|j| qd S r   )rE   r'   r&   r   r#   r$   )rJ   rK   r&   r.   r.   r/   restore_ops  s    
zOnnxConfig.restore_opsr$   fieldrN   c                    s(   ddl m}  fddt||D S )a  
        Flatten any potential nested structure expanding the name of the field with the index of the element within the
        structure.

        Args:
            name: The name of the nested structure
            field: The structure to, potentially, be flattened

        Returns:
            (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure.

        r   )chainc                    s    i | ]\}}  d | |qS ).r.   )r   idxitemr$   r.   r/   
<dictcomp>  s      zAOnnxConfig.flatten_output_collection_property.<locals>.<dictcomp>)	itertoolsr   	enumeratefrom_iterable)rP   r$   r   r   r.   r   r/   "flatten_output_collection_property  s    z-OnnxConfig.flatten_output_collection_property)r<   N)r<   )r   r:   ro   ro   )r   r~   r   r   )r   r   r   FNr:   ro   ro   r~   r   r   N)0r(   r)   r*   r+   r^   r`   rb   r   rh   ri   r   rB   r-   r
   r"   rL   classmethodrQ   propertyr   r   r   rU   rZ   r   r   r]   r_   ra   rc   re   floatrf   boolrj   staticmethodrn   r}   r   r   r   r   r   r   r   r	   r   r   r.   r.   r.   r/   r0   D   s   
""	 

				          
                      
x r0   c                	       s&  e Zd Zd!deee ed fddZed"ded dd	d
Z	e
eeeeef f d fddZe
eeeef  dddZe
edddZe
edddZd#deeeee eeef d fddZd$eeeeef f eedddZdd Zeee eeef d fdd Z  ZS )%OnnxConfigWithPastr<   NFr   )r>   r?   r@   use_pastc                    s   t  j|||d || _d S )N)r?   r@   )superrL   r   )rJ   r>   r?   r@   r   r   r.   r/   rL     s    zOnnxConfigWithPast.__init__rM   c                 C   s   | ||ddS )z
        Instantiate a OnnxConfig with `use_past` attribute set to True

        Args:
            config: The underlying model's config to use when exporting to ONNX

        Returns:
            OnnxConfig with `.use_past = True`
        T)r?   r   r.   rO   r.   r.   r/   	with_past  s    zOnnxConfigWithPast.with_pastrR   c                    s    t  j}| jr| j|dd |S )NrZ   	direction)r   rZ   r   fill_with_past_key_values_rX   r   r.   r/   rZ     s    zOnnxConfigWithPast.outputsc                 C   s   t | jdrd| jiS d S )Nr[   )r\   rA   r   rT   r.   r.   r/   r]     s    
z"OnnxConfigWithPast.values_overridec                 C   s   t | jdstd| jjS )z
        The number of layers attribute retrieved from the model config. Override this for model configs where the
        number of layers attribute is not called `num_layers`.
        
num_layerszcould not find the number of layers attribute in the model configuration, override the num_layers property of the model OnnxConfig to solve this)r\   rA   AttributeErrorr   rT   r.   r.   r/   r     s
    zOnnxConfigWithPast.num_layersc                 C   s   t | jdstd| jjS )z
        The number of attention heads attribute retrieved from the model config. Override this for model configs where
        the number of attention heads attribute is not called `num_attention_heads`.
        num_attention_headszcould not find the number of attention heads attribute in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this)r\   rA   r   r   rT   r.   r.   r/   r     s
    z&OnnxConfigWithPast.num_attention_headsr   r   r   rp   r   r   r   rN   c                    s   t  j|||||d}| jrt s,tdndd l}|d j\}}	|	d }
|| j|
| jj	| j f}d|kr|d j
}|j|d |j||
|dgdd	|d< g |d
< t| jD ]"}|d
 ||||f q|S )Nrp   r   r   r   ACannot generate dummy past_keys inputs without PyTorch installed.r   	input_idsr   Zattention_mask)dtyper   )dimpast_key_values)r   r   r   r   rC   torchshaper   rA   hidden_sizer   catZonesrv   r   rI   zeros)rJ   r   rp   r   r   r   common_inputsr   r4   ZseqlenZpast_key_values_lengthr   Z
mask_dtyper{   r   r.   r/   r     s8    	    


 z(OnnxConfigWithPast.generate_dummy_inputs)inputs_or_outputsr   inverted_values_shapec                 C   s   |dkrt d| d|dkr$dnd}t| jD ]X}ddd	|| d
| d< |rpddd|| d
| d< q2ddd	|| d
| d< q2dS )a  
        Fill the input_or_outputs mapping with past_key_values dynamic axes considering.

        Args:
            inputs_or_outputs: The mapping to fill.
            direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
                output mapping, this is important for axes naming.
            inverted_values_shape:
                If `True`, store values on dynamic axis 1, else on axis 2.

        rU   rZ   4direction must either be "inputs" or "outputs", but 
 was givenrU   r   presentr4   zpast_sequence + sequencer   r   r   .keyr6   .valueN)rC   rv   r   )rJ   r   r   r   r$   r   r.   r.   r/   r   &  s    z-OnnxConfigWithPast.fill_with_past_key_values_c                 C   s4   |d || d| d< |d || d| d< d S )Nr   r   r   r   r   r.   rJ   flattened_outputr$   r   r   r.   r.   r/   _flatten_past_key_values_?  s    z,OnnxConfigWithPast._flatten_past_key_values_r   c                    sB   i }|dkr0t |D ]\}}| |||| qnt ||}|S )N)r   r   )r   r   r   r   )rJ   r$   r   r   r   r   r   r.   r/   r   C  s    z5OnnxConfigWithPast.flatten_output_collection_property)r<   NF)r<   )r   r   FN)F)r(   r)   r*   r-   r
   r"   r   rL   r   r   r   r   r   rZ   r   r   r]   r   r   r   r   r   r   r	   r   r   __classcell__r.   r.   r   r/   r     sN      
$    
+   r   c                	       s   e Zd Zeeeeeef f d fddZeee d fddZ	eee d fddZ
ddeeeee eeef d fddZeeeeef f edddZdd Z  ZS )OnnxSeq2SeqConfigWithPastrR   c                    sr   t t| j}| D ]D\}}d|kr(dnd}| D ]"\}}d|krN|||< q4|||< q4q| jrn| j|dd |S )Nencoderencoder_sequencer;   r5   rZ   r   )r   r   rZ   r   r   r   )rJ   rY   r$   Z
axes_namesZsequence_nameZaxis_idxr   r.   r/   rZ   O  s    
z!OnnxSeq2SeqConfigWithPast.outputsc                    s`   zt  j}||f}W nF tk
rZ   t| jdrNt| jdrN| jj| jjf}ntdY nX |S )Nencoder_layersdecoder_layerszcould not find the number of encoder and decoder layers attributes in the model configuration, override the num_layers property of the model OnnxConfig to solve this)r   r   r   r\   rA   r   r   )rJ   r   r   r.   r/   r   `  s    
z$OnnxSeq2SeqConfigWithPast.num_layersc                    s`   zt  j}||f}W nF tk
rZ   t| jdrNt| jdrN| jj| jjf}ntdY nX |S )Nencoder_attention_headsdecoder_attention_headszcould not find the number of attention heads for the encoder and the decoder attributes in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this)r   r   r   r\   rA   r   r   )rJ   r   r   r.   r/   r   p  s    
z-OnnxSeq2SeqConfigWithPast.num_attention_headsr   FNr   r   c              	      s  t t| j|||||d}| js$|nd}t t| j|||||d}dd | D }tf ||}	| jrt sztdndd l}
|	d j	d }|	d j	d }|	d j	d }| j
\}}|||| jj| f}|||d	 | jj| f}g |	d
< | j\}}t||}t||| }||krdnd}t|D ]4}|	d
 |
||
||
||
|f q&|dkrj|n|}t||D ]$}|	d
 |
||
|f qx|	S )Nr   r   c                 S   s   i | ]\}}d | |qS )Zdecoder_r.   )r   r$   Ztensorr.   r.   r/   r     s      zCOnnxSeq2SeqConfigWithPast.generate_dummy_inputs.<locals>.<dictcomp>r   r   r   Zdecoder_input_idsr:   r   r   decoder)r   r   r   r   r   r   r   rC   r   r   r   rA   r   r   minmaxrv   rI   r   )rJ   r   rp   r   r   r   Zencoder_inputsZdecoder_seq_lengthZdecoder_inputsr   r   r4   Zencoder_seq_lengthZnum_encoder_attention_headsZnum_decoder_attention_headsZencoder_shapeZdecoder_shapenum_encoder_layersnum_decoder_layersmin_num_layersmax_num_layersremaining_side_namer{   r   r   r.   r/   r     sh    
    
    






"z/OnnxSeq2SeqConfigWithPast.generate_dummy_inputs)r   r   c              	   C   s2  |dkrt d| d|dkr$dnd}| j\}}t||}t||| }||krVdnd}d	}	|dkrjd
nd}
t|D ]l}d|
d|| d| d< d|
d|| d| d< d|	d|| d| d< d|	d|| d| d< qvt||D ]>}|dkrd|	d}n
d|
d}||| d| d| d< qd S )Nr   r   r   rU   r   r   r   r   Zpast_encoder_sequenceZpast_decoder_sequencez past_decoder_sequence + sequencer4   r   r   .decoder.key.decoder.value.encoder.key.encoder.valuer   )rC   r   r   r   rv   )rJ   r   r   r$   r   r   r   r   r   r   r;   r   Z	axes_infor.   r.   r/   r     s&    



z4OnnxSeq2SeqConfigWithPast.fill_with_past_key_values_c                 C   sd   |d || d| d< |d || d| d< |d || d| d< |d || d| d	< d S )
Nr   r   r   r   r   r   r   r:   r   r.   r   r.   r.   r/   r     s    z3OnnxSeq2SeqConfigWithPast._flatten_past_key_values_)r   r   FN)r(   r)   r*   r   r   r-   r   rZ   r   r   r   r   r   r   r   r   r   r   r   r.   r.   r   r/   r   N  s(   $    
E r   )3rV   rF   r   abcr   r   collectionsr   typingr   r   r   r   r	   r
   r   r   r   r   numpyrw   	packagingr   utilsr   r   r   r   r   r   r   Zconfiguration_utilsr   r   r   r   r   r   r   ZPILr    Z
get_loggerr(   r   rd   rm   	dataclassr"   r0   r   r   r.   r.   r.   r/   <module>   s6   0
  y 