U
    9%es>                    @   s  d Z ddlZddlZddlmZmZmZmZ ddlZ	ddl
Z
ddlZ
ddl
mZ ddlmZmZmZ ddlmZ ddlmZ dd	lmZmZmZmZmZ dd
lmZ ddlmZmZm Z m!Z! ddl"m#Z#m$Z$ e %e&Z'dZ(dZ)dddgZ*e
j+e,e,dddZ-d}e
j+e,dddZ.d~e
j/e
j0e
j1e,dddZ2de
j+e
j0ee, dddZ3dee,e,f e4e,ee
j5 e,e	j6ddd Z7G d!d" d"ej8Z9G d#d$ d$ej8Z:G d%d& d&ej8Z;G d'd( d(ej8Z<G d)d* d*ej8Z=G d+d, d,ej8Z>G d-d. d.e
jj8Z?G d/d0 d0ej8Z@G d1d2 d2ej8ZAG d3d4 d4ej8ZBG d5d6 d6ej8ZCG d7d8 d8ej8ZDG d9d: d:ej8ZEG d;d< d<ej8ZFG d=d> d>ej8ZGG d?d@ d@ej8ZHG dAdB dBej8ZIG dCdD dDej8ZJG dEdF dFej8ZKG dGdH dHej8ZLG dIdJ dJej8ZMG dKdL dLeZNG dMdN dNeNZOG dOdP dPeNZPG dQdR dReNZQG dSdT dTeNZRG dUdV dVeNZSG dWdX dXeNZTG dYdZ dZeNZUG d[d\ d\eNZVG d]d^ d^ej8ZWG d_d` d`ej8ZXdaZYdbZZdcZ[eddeYG dedf dfeNZ\edgeZG dhdi dieNZ]deNe
j^ee
j^ e4e4e4eej8 e_ee
j^ee
j^e
j^f f dn	dodpZ`edqeZG drds dseNZaedteZG dudv dveNZbdwZcG dxdy dyej8ZdedzecG d{d| d|eZedS )z PyTorch SpeechT5 model.    N)ListOptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossL1Loss   )ACT2FN)is_deepspeed_zero3_enabled)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutputSeq2SeqSpectrogramOutput)PreTrainedModel)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )SpeechT5ConfigSpeechT5HifiGanConfigr   zmicrosoft/speecht5_asrzmicrosoft/speecht5_ttszmicrosoft/speecht5_vc)	input_idspad_token_iddecoder_start_token_idc                 C   sh   |  | j}| ddddf  |ddddf< ||dddf< |dkrTtd||dk| |S )z1
    Shift input ids one token to the right.
    Nr   r   z1self.model.config.pad_token_id has to be defined.i)	new_zerosshapeclone
ValueErrormasked_fill_)r   r   r   Zshifted_input_ids r#   m/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/speecht5/modeling_speecht5.pyshift_tokens_right;   s    (r%   )input_valuesreduction_factorc                 C   sj   |dkr"| dd|d d|f } |  | j}| ddddf  |ddddf< ||dkd |S )zw
    Shift input spectrograms one timestep to the right. Also applies the reduction factor to the sequence length.
    r   Nr         Y        )r   r   r    r"   )r&   r'   Zshifted_input_valuesr#   r#   r$   shift_spectrograms_rightK   s    (r*   )input_ids_shapedtypedevicepast_key_values_lengthc                 C   s   | \}}t j||ft |j|d}t j|d|d}|||d |ddk d ||}|dkrt j	t j
||||d|gdd}|ddddddf |d||| S )zB
    Make causal mask used for bi-directional self-attention.
    r-   r   r   r   r,   r-   dimN)torchfullfinfominarangesizer"   viewtocatzerosexpand)r+   r,   r-   r.   bsztgt_lenmaskZ	mask_condr#   r#   r$   _make_causal_mask]   s    "
 rA   )r@   r,   r?   c                 C   sj   |   \}}|dk	r|n|}| ddddddf |d|||}d| }||tjt|jS )z_
    Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
    Nr         ?)r8   r=   r:   Zmasked_fillr3   boolr5   r6   )r@   r,   r?   r>   src_lenZexpanded_maskZinverted_maskr#   r#   r$   _expand_masko   s
    *rE   )r   	mask_probmask_lengthattention_mask	min_masksreturnc                    s  | \}dk rt dkr6t d d dtjd   fdd}|dk	rt|d	  nfd
dt|D }tj	|ft
d}g }	|}
|
dkr|S |D ]v}||}tjjt|d  |dd}t|dkrd }n|d }t|tj|
| tjd| g}|	| qt|	}	t|	dddddf ||
f}	|	||
 }	tddddf }t|||
f||
 }|	| }	|	 d kr҈d |	|	d k< t||	dd	 |S )af  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    sX   t |     }t|}| kr2 }| d  |k rTt| d  d}|S )z;Given input length, compute how many spans should be maskedr   r   )intmax)input_lengthnum_masked_spanepsilonrG   rF   rI   sequence_lengthr#   r$   compute_num_masked_span   s    
z6_compute_mask_indices.<locals>.compute_num_masked_spanNr   c                    s   g | ]} qS r#   r#   .0_)rR   r#   r$   
<listcomp>   s     z)_compute_mask_indices.<locals>.<listcomp>r,   r   F)replace)r!   nprandomranditemsumdetachtolistranger<   rC   choicer7   lenZconcatenateonesZint32appendarrayZbroadcast_toreshaperM   Zput_along_axis)r   rF   rG   rH   rI   
batch_sizerS   input_lengthsZspec_aug_maskZspec_aug_mask_idxsZmax_num_masked_spanrN   rO   Zspec_aug_mask_idxZdummy_mask_idxoffsetsr#   rP   r$   _compute_mask_indices~   s`      

  rk   c                       s&   e Zd Zd fdd	Zdd Z  ZS )SpeechT5NoLayerNormConvLayerr   c                    sj   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _d S )Nr   r   kernel_sizestridebias)super__init__conv_dimin_conv_dimout_conv_dimr   Conv1dconv_kernelconv_stride	conv_biasconvr   feat_extract_activation
activationselfconfiglayer_id	__class__r#   r$   rr      s    
z%SpeechT5NoLayerNormConvLayer.__init__c                 C   s   |  |}| |}|S N)rz   r|   r~   hidden_statesr#   r#   r$   forward  s    

z$SpeechT5NoLayerNormConvLayer.forward)r   __name__
__module____qualname__rr   r   __classcell__r#   r#   r   r$   rl      s   rl   c                       s&   e Zd Zd fdd	Zdd Z  ZS )SpeechT5LayerNormConvLayerr   c                    s|   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
tj| jdd| _t|j | _d S )Nr   r   rm   T)Zelementwise_affine)rq   rr   rs   rt   ru   r   rv   rw   rx   ry   rz   	LayerNorm
layer_normr   r{   r|   r}   r   r#   r$   rr     s    
z#SpeechT5LayerNormConvLayer.__init__c                 C   s:   |  |}|dd}| |}|dd}| |}|S )Nr   )rz   	transposer   r|   r   r#   r#   r$   r     s    


z"SpeechT5LayerNormConvLayer.forward)r   r   r#   r#   r   r$   r     s   r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )SpeechT5GroupNormConvLayerr   c                    s   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _tj| j| jdd| _d S )Nr   r   rm   T)Z
num_groupsZnum_channelsZaffine)rq   rr   rs   rt   ru   r   rv   rw   rx   ry   rz   r   r{   r|   	GroupNormr   r}   r   r#   r$   rr   )  s    
z#SpeechT5GroupNormConvLayer.__init__c                 C   s"   |  |}| |}| |}|S r   )rz   r   r|   r   r#   r#   r$   r   9  s    


z"SpeechT5GroupNormConvLayer.forward)r   r   r#   r#   r   r$   r   (  s   r   c                       s   e Zd ZdZdeeee d fddZdeeee dddZedeeee dd	d
Z	e
 de
jedddZde
jeee dddZ  ZS )%SpeechT5SinusoidalPositionalEmbeddingzDThis module produces sinusoidal positional embeddings of any length.N)num_positionsembedding_dimpadding_idxc                    s4   t    d| _|| _|| _| || j || d S N   )rq   rr   offsetr   r   make_weights)r~   r   r   r   r   r#   r$   rr   D  s
    
z.SpeechT5SinusoidalPositionalEmbedding.__init__)num_embeddingsr   r   c                 C   sP   |  |||}t| dr.|j| jj| jjd}t|| _d| j_| j	  d S )Nweightsr0   F)
get_embeddinghasattrr:   r   r,   r-   r   	Parameterrequires_gradZdetach_)r~   r   r   r   Zemb_weightsr#   r#   r$   r   K  s    
z2SpeechT5SinusoidalPositionalEmbedding.make_weightsc                 C   s   |d }t d|d  }ttj|tjd|  }tj| tjdd|d }tjt|t	|gdd
| d}|d dkrtj|t| dgdd}|dk	rd||ddf< |t S )	z
        Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
        description in Section 3.5 of "Attention Is All You Need".
        r   i'  r   rX   r   r1   r   N)mathlogr3   expr7   float	unsqueezer;   sincosr9   r<   r:   Zget_default_dtype)r   r   r   Zhalf_dimembr#   r#   r$   r   U  s     &z3SpeechT5SinusoidalPositionalEmbedding.get_embeddingr   )r   r.   c                 C   s|   |  \}}| || j||j}| jd | }|| j dkrZ| || j | j| j | j	d|
d
||d S )Nr   r   r   )r8   "create_position_ids_from_input_idsr   r:   r-   r   r   r   r   index_selectr9   r_   )r~   r   r.   r>   seq_lenZposition_idsZmax_posr#   r#   r$   r   g  s    z-SpeechT5SinusoidalPositionalEmbedding.forward)r   r   r.   c                 C   s6   | | }tj|dd|| | }| | S )a  
        Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
        symbols are ignored. This is modified from fairseq's `utils.make_positions`.

        Args:
            x: torch.Tensor x:
        Returns: torch.Tensor
        r   r1   )nerL   r3   cumsumZtype_aslong)r~   r   r   r.   r@   Zincremental_indicesr#   r#   r$   r   v  s    zHSpeechT5SinusoidalPositionalEmbedding.create_position_ids_from_input_ids)N)N)N)r   )r   )r   r   r   __doc__rL   r   rr   r   staticmethodr   r3   no_gradTensorr   r   r   r#   r#   r   r$   r   A  s   
   r   c                       s$   e Zd Z fddZdd Z  ZS )SpeechT5PositionalConvEmbeddingc              	      s   t    tj|j|j|j|jd |jd| _tjj	}t
tjjdrNtjjj	}t rdd l}|jj| jjdd || jddd| _W 5 Q R X |j| | jj |j| | jj n|| jddd| _t|j| _t|j | _d S )Nr   )rn   paddinggroupsweight_normr   )Zmodifier_rankweight)namer2   )rq   rr   r   rv   hidden_sizenum_conv_pos_embeddingsZnum_conv_pos_embedding_groupsrz   utilsr   r   Zparametrizationsr   	deepspeedzeroZGatheredParametersr   Zregister_external_parameterZweight_vZweight_gSpeechT5SamePadLayerr   r   r{   r|   )r~   r   r   r   r   r#   r$   rr     s(    

z(SpeechT5PositionalConvEmbedding.__init__c                 C   s:   | dd}| |}| |}| |}| dd}|S Nr   r   )r   rz   r   r|   r   r#   r#   r$   r     s    


z'SpeechT5PositionalConvEmbedding.forwardr   r#   r#   r   r$   r     s   r   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS ) SpeechT5ScaledPositionalEncodinguS   
    Scaled positional encoding, see §3.2 in https://arxiv.org/abs/1809.08895
      c                    s   t ||}t d|d}t t jd|dt jdtd|   }t | | |d d dd df< t 	| | |d d dd df< |d}t
   | jd|dd tj|d	| _|| _t jt d
| _d S )Nr   r   r   rX   g     @peF)
persistent)prB   )r3   r<   r7   r   r   r   r   r   r   r   rq   rr   register_bufferr   Dropoutdropoutr2   r   tensoralpha)r~   r   r2   max_lenr   positionZdiv_termr   r#   r$   rr     s    *$$

z)SpeechT5ScaledPositionalEncoding.__init__c                 C   s4   || j | jd d d |df   }| |}|S )Nr   )r   r   r8   r   )r~   r   r#   r#   r$   r     s    &
z(SpeechT5ScaledPositionalEncoding.forward)r   )r   r   r   r   rr   r   r   r#   r#   r   r$   r     s   r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )"SpeechT5RelativePositionalEncoding  c                    s.   t    || _|| _tjd| || _d S r   )rq   rr   r2   
max_lengthr3   r   	Embeddingpe_k)r~   r2   r   r   r#   r$   rr     s    
z+SpeechT5RelativePositionalEncoding.__init__c                 C   s~   |j d }td| |j}|d d d f |d d d f  }| j ||| j k < | jd ||| jk< || j }| |S )Nr   r   )r   r3   r7   r   r:   r-   r   r   )r~   r   r   Zpos_seqr#   r#   r$   r     s    
 
z*SpeechT5RelativePositionalEncoding.forward)r   r   r#   r#   r   r$   r     s   r   c                       s$   e Zd Z fddZdd Z  ZS )r   c                    s$   t    |d dkrdnd| _d S )Nr   r   r   )rq   rr   num_pad_remove)r~   r   r   r#   r$   rr     s    
zSpeechT5SamePadLayer.__init__c                 C   s,   | j dkr(|d d d d d | j  f }|S )Nr   )r   r   r#   r#   r$   r     s    
zSpeechT5SamePadLayer.forwardr   r#   r#   r   r$   r     s   r   c                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )SpeechT5FeatureEncoderz.Construct the features from raw audio waveformc                    s   t     jdkr@t ddg fddt jd D  }n6 jdkrd fddt jD }ntd	 j d
t|| _	d| _
d| _d S )Ngroupr   r   c                    s   g | ]}t  |d  dqS )r   r   )rl   rU   ir   r#   r$   rW     s    z3SpeechT5FeatureEncoder.__init__.<locals>.<listcomp>r   layerc                    s   g | ]}t  |d qS )r   )r   r   r   r#   r$   rW     s    z`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)rq   rr   Zfeat_extract_normr   ra   Znum_feat_extract_layersr!   r   
ModuleListconv_layersgradient_checkpointing_requires_grad)r~   r   r   r   r   r$   rr     s    




zSpeechT5FeatureEncoder.__init__c                 C   s   |   D ]
}d|_qd| _d S NF)
parametersr   r   )r~   paramr#   r#   r$   _freeze_parameters  s    z)SpeechT5FeatureEncoder._freeze_parametersc                 C   sj   |d d d f }| j r"| jr"d|_| jD ]<}| j r\| jr\| jr\dd }tjj|||}q(||}q(|S )NTc                    s    fdd}|S )Nc                     s    |  S r   r#   inputsmoduler#   r$   custom_forward  s    zUSpeechT5FeatureEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr#   r   r   r#   r   r$   create_custom_forward  s    z=SpeechT5FeatureEncoder.forward.<locals>.create_custom_forward)r   trainingr   r   r   r3   r   
checkpoint)r~   r&   r   Z
conv_layerr   r#   r#   r$   r     s    

zSpeechT5FeatureEncoder.forward)r   r   r   r   rr   r   r   r   r#   r#   r   r$   r     s   r   c                       s$   e Zd Z fddZdd Z  ZS )SpeechT5FeatureProjectionc                    sJ   t    tj|jd |jd| _t|jd |j| _	t
|j| _d S )Nr   Zeps)rq   rr   r   r   rs   layer_norm_epsr   Linearr   
projectionr   Zfeat_proj_dropoutr   r~   r   r   r#   r$   rr     s    
z"SpeechT5FeatureProjection.__init__c                 C   s&   |  |}| |}| |}||fS r   )r   r   r   )r~   r   Znorm_hidden_statesr#   r#   r$   r   $  s    


z!SpeechT5FeatureProjection.forwardr   r#   r#   r   r$   r     s   r   c                       s   e Zd Z fddZdd Zdejeej eej	 dddZ
eejd	d
dZeejef dddZdej	eej	 eej dddZ  ZS )SpeechT5SpeechEncoderPrenetc                    s|   t    || _t|| _t|| _|jdks8|jdkrPt	
t|j | _t|| _t|j|j d |j|j| _d S )Nr)   r   )rq   rr   r   r   feature_encoderr   feature_projectionmask_time_probmask_feature_probr   r   r3   FloatTensorr   uniform_masked_spec_embedr   pos_conv_embedr   max_speech_positionsr   pos_sinusoidal_embedr   r   r#   r$   rr   -  s    



z$SpeechT5SpeechEncoderPrenet.__init__c                 C   s   | j   d S r   )r   r   r~   r#   r#   r$   freeze_feature_encoder>  s    z2SpeechT5SpeechEncoderPrenet.freeze_feature_encoderN)r&   rH   mask_time_indicesc           	      C   s   |  |}|dd}|d k	r0| |jd |}| |\}}| j|||d}| |}|| }|d k	rx|d }nt	j
|jd d t	j|jd}| |}|| }||fS )Nr   r   )r   rH   r0   )r   r   "_get_feature_vector_attention_maskr   r   _mask_hidden_statesr   r   r   r3   r<   r-   r   )	r~   r&   rH   r   Zextract_featuresr   Zpositional_conv_embeddingpadding_maskZ positional_sinusoidal_embeddingsr#   r#   r$   r   A  s*    
  

z#SpeechT5SpeechEncoderPrenet.forward)feature_vector_lengthrH   c                 C   s   |j ddd d df }| |tj}|jd }tj||f|j|jd}d|tj	|jd |jd|d f< |
dg d
dg }|S )Nr   r1   r   r0   r   r/   )r    _get_feat_extract_output_lengthsr:   r3   r   r   r<   r,   r-   r7   fliprC   )r~   r  rH   Znon_padded_lengthsoutput_lengthsrh   r#   r#   r$   r   d  s    
  "z>SpeechT5SpeechEncoderPrenet._get_feature_vector_attention_mask)ri   c                 C   s4   dd }t | jj| jjD ]\}}||||}q|S )zH
        Computes the output length of the convolutional layers
        c                 S   s   t j| | |ddd S )Nfloor)Zrounding_moder   )r3   div)rN   rn   ro   r#   r#   r$   _conv_out_lengthy  s    zVSpeechT5SpeechEncoderPrenet._get_feat_extract_output_lengths.<locals>._conv_out_length)zipr   rw   rx   )r~   ri   r  rn   ro   r#   r#   r$   r  t  s    z<SpeechT5SpeechEncoderPrenet._get_feat_extract_output_lengths)r   r   rH   c                 C   s  t | jdds|S | \}}}|dk	r<| j|j||< nZ| jjdkr| jrt||f| jj| jj	|| jj
d}tj||jtjd}| j|j||< | jjdkr| jrt||f| jj| jj| jjd}tj||jtjd}|dddf d|d}d||< |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://arxiv.org/abs/1904.08779).
        Zapply_spec_augmentTNr   )rF   rG   rH   rI   )r-   r,   )rF   rG   rI   r   )getattrr   r8   r   r:   r,   r   r   rk   Zmask_time_lengthZmask_time_min_masksr3   r   r-   rC   r   Zmask_feature_lengthZmask_feature_min_masksr=   )r~   r   r   rH   rh   rR   r   Zmask_feature_indicesr#   r#   r$   r     s4    z/SpeechT5SpeechEncoderPrenet._mask_hidden_states)NN)NN)r   r   r   rr   r   r3   r   r   
LongTensorr   r   rL   r   r   r  r   r   r#   r#   r   r$   r   ,  s$     #  r   c                       s6   e Zd Z fddZdejeej dddZ  ZS )SpeechT5SpeechDecoderPrenetc                    sr   t     | _t fddt jD | _t j	 j
| _t j j
 j| _t j j
  j
| _d S )Nc                    s*   g | ]"}t |d kr jn j jqS )r   )r   r   num_mel_binsspeech_decoder_prenet_unitsr   r   r#   r$   rW     s
   z8SpeechT5SpeechDecoderPrenet.__init__.<locals>.<listcomp>)rq   rr   r   r   r   ra   Zspeech_decoder_prenet_layerslayersr   r  r   final_layerr   positional_dropoutr   encode_positionsZspeaker_embedding_dimspeaker_embeds_layerr   r   r   r$   rr     s    


z$SpeechT5SpeechDecoderPrenet.__init__N)r&   speaker_embeddingsc                 C   s   |}| j D ]*}tj||}tjj|| jjdd}q
| |}| |}|d k	rtj	|}|
d}|d|dd}tj||gdd}tj| |}|S )NT)r   r   r   r1   )r  r   
functionalZrelur   r   Zspeech_decoder_prenet_dropoutr  r  	normalizer   r=   r8   r3   r;   r  )r~   r&   r  inputs_embedsr   r#   r#   r$   r     s"    
  


z#SpeechT5SpeechDecoderPrenet.forward)N)	r   r   r   rr   r3   r   r   r   r   r#   r#   r   r$   r    s    r  c                       s&   e Zd Zd fdd	Zdd Z  ZS )SpeechT5BatchNormConvLayerr   c                    s   t    |dkr|j}n|j}||jd kr6|j}n|j}tj|||jd|jd d dd| _t	|| _
||jd k rt | _nd | _t|j| _d S )Nr   r   r   F)rn   ro   r   rp   )rq   rr   r  Zspeech_decoder_postnet_unitsspeech_decoder_postnet_layersr   rv   Zspeech_decoder_postnet_kernelrz   ZBatchNorm1d
batch_normZTanhr|   r   Zspeech_decoder_postnet_dropoutr   )r~   r   r   rt   ru   r   r#   r$   rr     s(    
z#SpeechT5BatchNormConvLayer.__init__c                 C   s6   |  |}| |}| jd k	r(| |}| |}|S r   )rz   r  r|   r   r   r#   r#   r$   r     s    




z"SpeechT5BatchNormConvLayer.forward)r   r   r#   r#   r   r$   r    s   r  c                       s<   e Zd Z fddZejdddZejdddZ  ZS )SpeechT5SpeechDecoderPostnetc                    s^   t     | _t j j j | _t j j| _	t
 fddt jD | _d S )Nc                    s   g | ]}t  |qS r#   )r  r   r   r#   r$   rW     s     z9SpeechT5SpeechDecoderPostnet.__init__.<locals>.<listcomp>)rq   rr   r   r   r   r   r  r'   feat_outprob_outr   ra   r  r  r   r   r   r$   rr     s    
z%SpeechT5SpeechDecoderPostnet.__init__r   c                 C   sJ   |  ||dd| jj}| |}| ||dd}|||fS )Nr   r   )r  r9   r8   r   r  postnetr  )r~   r   outputs_before_postnetoutputs_after_postnetlogitsr#   r#   r$   r     s    
z$SpeechT5SpeechDecoderPostnet.forwardc                 C   s0   | dd}| jD ]}||}q|| dd S r   )r   r  )r~   r   Zlayer_outputr   r#   r#   r$   r  !  s    

z$SpeechT5SpeechDecoderPostnet.postnet)	r   r   r   rr   r3   r   r   r  r   r#   r#   r   r$   r    s   r  c                       s<   e Zd Z fddZdd Zdd Zejddd	Z  Z	S )
SpeechT5TextEncoderPrenetc                    s>   t    || _t|j|j|j| _t	|j
|j|j| _d S r   )rq   rr   r   r   r   
vocab_sizer   r   embed_tokensr   r  max_text_positionsr  r   r   r#   r$   rr   )  s    
z"SpeechT5TextEncoderPrenet.__init__c                 C   s   | j S r   r$  r   r#   r#   r$   get_input_embeddings3  s    z.SpeechT5TextEncoderPrenet.get_input_embeddingsc                 C   s
   || _ d S r   r&  r~   valuer#   r#   r$   set_input_embeddings6  s    z.SpeechT5TextEncoderPrenet.set_input_embeddings)r   c                 C   s   |  |}| |}|S r   )r$  r  )r~   r   r  r#   r#   r$   r   9  s    

z!SpeechT5TextEncoderPrenet.forward)
r   r   r   rr   r'  r*  r3   r   r   r   r#   r#   r   r$   r"  (  s   
r"  c                       sR   e Zd Z fddZdd Zdd Zdejeej	 ee
ej  dd	d
Z  ZS )SpeechT5TextDecoderPrenetc                    sn   t    || _t|j| _|jr0t	|j
nd| _t|j|j
|j| _t|j|j d |j
|j| _d S )NrB   r   )rq   rr   r   r   r   r  r   Zscale_embeddingr   sqrtr   embed_scaler   r#  r   r$  r   r%  embed_positionsr   r   r#   r$   rr   @  s    
z"SpeechT5TextDecoderPrenet.__init__c                 C   s   | j S r   r&  r   r#   r#   r$   r'  N  s    z.SpeechT5TextDecoderPrenet.get_input_embeddingsc                 C   s
   || _ d S r   r&  r(  r#   r#   r$   r*  Q  s    z.SpeechT5TextDecoderPrenet.set_input_embeddingsN)r   rH   past_key_valuesc                 C   s~   |d k	r"|  }|d|d }ntd|d k	rD|d d jd nd}| ||}| || j }||7 }| |}||fS )Nr   z'You have to specify `decoder_input_ids`r   r   )r8   r9   r!   r   r.  r$  r-  r   )r~   r   rH   r/  input_shaper.   Z	positionsr  r#   r#   r$   r   T  s    
z!SpeechT5TextDecoderPrenet.forward)NN)r   r   r   rr   r'  r*  r3   r   r   r
  r   r   r   r   r#   r#   r   r$   r+  ?  s     r+  c                       s<   e Zd Z fddZejdddZdd Zdd	 Z  Z	S )
SpeechT5TextDecoderPostnetc                    s*   t    || _tj|j|jdd| _d S )NFrp   )rq   rr   r   r   r   r   r#  lm_headr   r   r#   r$   rr   k  s    
z#SpeechT5TextDecoderPostnet.__init__r  c                 C   s
   |  |S r   r3  r   r#   r#   r$   r   p  s    z"SpeechT5TextDecoderPostnet.forwardc                 C   s   | j S r   r4  r   r#   r#   r$   get_output_embeddingss  s    z0SpeechT5TextDecoderPostnet.get_output_embeddingsc                 C   s
   || _ d S r   r4  r~   Znew_embeddingsr#   r#   r$   set_output_embeddingsv  s    z0SpeechT5TextDecoderPostnet.set_output_embeddings)
r   r   r   rr   r3   r   r   r5  r7  r   r#   r#   r   r$   r1  j  s   r1  c                       s   e Zd ZdZdeeeeed fddZej	eedd	d
Z
dej	eej	 eeej	  eej	 eej	 eej	 eeej	eej	 eeej	  f dddZ  ZS )SpeechT5Attentionz
    Multi-headed attention from 'Attention Is All You Need' paper with relative position bias (see
    https://aclanthology.org/N18-2074.pdf)
    r)   FT)	embed_dim	num_headsr   
is_decoderrp   c                    s   t    || _|| _|| _|| | _| j| | jkrNtd| j d| d| jd | _|| _t	j
|||d| _t	j
|||d| _t	j
|||d| _t	j
|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      r2  )rq   rr   r9  r:  r   head_dimr!   scalingr;  r   r   k_projv_projq_projout_proj)r~   r9  r:  r   r;  rp   r   r#   r$   rr     s    

zSpeechT5Attention.__init__)r   r   r>   c                 C   s    | ||| j| jdd S r   )r9   r:  r<  r   
contiguous)r~   r   r   r>   r#   r#   r$   _shape  s    zSpeechT5Attention._shapeN)r   key_value_statespast_key_valuerH   layer_head_maskposition_biasoutput_attentionsrJ   c                 C   s  |dk	}|  \}	}
}| || j }|rD|dk	rD|d }|d }n|rr| | |d|	}| | |d|	}n|dk	r| | |d|	}| | |d|	}tj|d |gdd}tj|d |gdd}n(| | |d|	}| | |d|	}| jr||f}|	| j	 d| j
f}| ||
|	j| }|j| }|j| }| d}t||dd}|  |	| j	 |
|fkrtd|	| j	 |
|f d|   |dk	r| |	| j	 d| j
dd}t||d	d}|dd|	| j	 | d| d}||7 }|dk	rx|  |	d|
|fkrNtd
|	d|
|f d|   ||	| j	|
|| }||	| j	 |
|}tjj|dd}|dk	r|  | j	fkrtd| j	f d|   |dddd||	| j	|
| }||	| j	 |
|}|r$||	| j	|
|}||	| j	 |
|}nd}tjj|| j| jd}t||}|  |	| j	 |
| j
fkrtd|	| j	|
| j
f d|   ||	| j	|
| j
}|dd}||	|
| j}| |}|||fS )z#Input shape: Batch x Time x ChannelNr   r   r   r   r1   z$Attention weights should be of size z	, but is r   z!Attention mask should be of size z/Head mask for a single layer should be of size )r   r   z `attn_output` should be of size )r8   r@  r=  rC  r>  r?  r3   r;   r;  r:  r<  r9   Zbmmr   r!   rB  matmulr   r  Zsoftmaxr   r   rg   r9  rA  )r~   r   rD  rE  rH   rF  rG  rH  Zis_cross_attentionr>   r?   rV   Zquery_statesZ
key_statesZvalue_statesZ
proj_shaperD   attn_weightsZ	reshape_qZrel_pos_biasZattn_weights_reshapedZ
attn_probsZattn_outputr#   r#   r$   r     s    




"  

"
zSpeechT5Attention.forward)r)   FT)NNNNNF)r   r   r   r   rL   r   rC   rr   r3   r   rC  r   r   r   r   r#   r#   r   r$   r8  z  s8   	         r8  c                       s$   e Zd Z fddZdd Z  ZS )SpeechT5FeedForwardc                    sl   t    t|j| _t|j|| _t	|j
trBt|j
 | _n|j
| _t||j| _t|j| _d S r   )rq   rr   r   r   Zactivation_dropoutintermediate_dropoutr   r   intermediate_dense
isinstanceZ
hidden_actstrr   intermediate_act_fnoutput_densehidden_dropoutoutput_dropout)r~   r   Zintermediate_sizer   r#   r$   rr     s    
zSpeechT5FeedForward.__init__c                 C   s6   |  |}| |}| |}| |}| |}|S r   )rM  rP  rL  rQ  rS  r   r#   r#   r$   r   (  s    




zSpeechT5FeedForward.forwardr   r#   r#   r   r$   rK    s   rK  c                       sN   e Zd Zed fddZd	ejeej eej eej edddZ	  Z
S )
SpeechT5EncoderLayerr   c                    sj   t    t|j|j|jdd| _t|j	| _
tj|j|jd| _t||j| _tj|j|jd| _d S )NFr9  r:  r   r;  r   )rq   rr   r8  r   encoder_attention_headsattention_dropout	attentionr   r   rR  r   r   r   r   rK  Zencoder_ffn_dimfeed_forwardfinal_layer_normr   r   r#   r$   rr   3  s    
zSpeechT5EncoderLayer.__init__NFr   rH   rF  rG  rH  c           
      C   sj   |}| j |||||d\}}}| |}|| }| |}|| | }| |}|f}	|rf|	|f7 }	|	S )as  
        Args:
            hidden_states (`torch.FloatTensor`):
                input to the layer of shape `(batch, seq_len, hidden_size)`
            attention_mask (`torch.FloatTensor`):
                attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
                large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(config.encoder_attention_heads,)`.
            position_bias (`torch.FloatTensor`):
                relative position embeddings of size `(seq_len, seq_len, hidden_size // encoder_attention_heads)`
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        r[  )rX  r   r   rY  rZ  )
r~   r   rH   rF  rG  rH  residualrJ  rV   outputsr#   r#   r$   r   @  s"    



zSpeechT5EncoderLayer.forward)NNNF)r   r   r   r   rr   r3   r   r   rC   r   r   r#   r#   r   r$   rT  2  s       rT  c                       st   e Zd Zed fddZd
ejeej eej eej eej eej eeej  ee	 ee	 d	dd	Z
  ZS )SpeechT5DecoderLayerr   c                    s   t    t|j|j|jdd| _t|j	| _
tj|j|jd| _t|j|j|jdd| _tj|j|jd| _t||j| _tj|j|jd| _d S )NTrU  r   )r   r;  )rq   rr   r8  r   Zdecoder_attention_headsrW  	self_attnr   r   rR  r   r   r   self_attn_layer_normencoder_attnencoder_attn_layer_normrK  Zdecoder_ffn_dimrY  rZ  r   r   r#   r$   rr   p  s$    
zSpeechT5DecoderLayer.__init__NFT)	r   rH   encoder_hidden_statesencoder_attention_maskrF  cross_attn_layer_head_maskrE  rH  	use_cachec
                 C   s   |}
|dk	r|dd nd}| j |||||d\}}}| |}|
| }| |}d}d}|dk	r|}
|dk	rz|dd nd}| j||||||d\}}}| |}|
| }| |}|| }|| | }| |}|f}|r|||f7 }|	r||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, hidden_size)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        Nr   )r   rE  rH   rF  rH  r   )r   rD  rH   rF  rE  rH  )r_  r   r`  ra  rb  rY  rZ  )r~   r   rH   rc  rd  rF  re  rE  rH  rf  r\  Zself_attn_past_key_valueZself_attn_weightsZpresent_key_valueZcross_attn_present_key_valueZcross_attn_weightsZcross_attn_past_key_valuer]  r#   r#   r$   r     sJ    





zSpeechT5DecoderLayer.forward)NNNNNNFT)r   r   r   r   rr   r3   r   r   r   rC   r   r   r#   r#   r   r$   r^  o  s(           r^  c                   @   s2   e Zd ZdZeZdZdZdZdd Z	ddd	Z
d
S )SpeechT5PreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    speecht5r&   Tc              	   C   s  t |trTtjj|jjddtd|jj	d |jj
   d tj|jjd nXt |trtd|jj }tjj|jj| |d tjj|jj| |d nt |tjr|jjjd| jjd |jdk	r|jj  nt |tjtjfr|jj  |jjd nt |tjrjtj|j |jdk	rt|j|j
|j	d   }tjj|j| |d nBt |tjr|jjjd| jjd |jdk	r|jj|j   dS )	zInitialize the weightsr   r   r   meanZstd)abr)   NrB   )rN  r   r   initnormal_rz   r   r   r,  rn   Zin_channelsZ	constant_rp   r   r   Zin_featuresr   r   datar   initializer_rangezero_r   r   Zfill_rv   Zkaiming_normal_r   r   r   )r~   r   kr#   r#   r$   _init_weights  s6    
 

z%SpeechT5PreTrainedModel._init_weightsFc                 C   s   t |tttfr||_d S r   )rN  SpeechT5EncoderSpeechT5Decoderr   r   )r~   r   r)  r#   r#   r$   _set_gradient_checkpointing  s    z3SpeechT5PreTrainedModel._set_gradient_checkpointingN)F)r   r   r   r   r   config_classZbase_model_prefixmain_input_nameZsupports_gradient_checkpointingrs  rv  r#   r#   r#   r$   rg    s   rg  c                
       sd   e Zd ZdZed fddZd	ejeej	 eej	 ee
 ee
 ee
 eeef dddZ  ZS )
rt  zu
    Transformer encoder consisting of *config.encoder_layers* layers. Each layer is a [`SpeechT5EncoderLayer`].
    r   c                    s~   t    tj j jd| _t j| _	 j
| _t fddt jD | _t j j  j| _d| _|   d S )Nr   c                    s   g | ]}t  qS r#   )rT  rT   r   r#   r$   rW     s     z,SpeechT5Encoder.__init__.<locals>.<listcomp>F)rq   rr   r   r   r   r   r   r   rR  r   Zencoder_layerdrop	layerdropr   ra   Zencoder_layersr  r   rV  Zencoder_max_relative_positionr.  r   	post_initr   r   r   r$   rr     s     
 zSpeechT5Encoder.__init__N)r   rH   	head_maskrH  output_hidden_statesreturn_dictrJ   c                    s   dk	r n| j j |dk	r |n| j j}|dk	r4|n| j j}|dk	rPt||j}| |}| |}| |}t	 }|r|dnd}	 rdnd}
|dk	r|
 d t| jkrtdt| j d|
 d  dt| jD ]\}}|r|	|f }	d}| jrtg }|| jk }|r|r| jrd| jrd fdd	}tjj|||||dk	rZ|| nd|}n$|||||dk	r~|| nd d
}|d }|rd} r|
|d f }
q|r|	|f }	|stdd ||	|
fD S t||	|
dS )a  
        Args:
            hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
                Features extracted from the speech or text input by the encoder prenet.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
                `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr#   r   z&The head_mask should be specified for  layers, but it is for .Fc                    s    fdd}|S )Nc                     s    | f S r   r#   r   )r   rH  r#   r$   r   o  s    zNSpeechT5Encoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr#   r   rH  r   r$   r   n  s    z6SpeechT5Encoder.forward.<locals>.create_custom_forward)rH   rG  rF  rH  )NNr   c                 s   s   | ]}|d k	r|V  qd S r   r#   rU   vr#   r#   r$   	<genexpr>  s      z*SpeechT5Encoder.forward.<locals>.<genexpr>last_hidden_stater   
attentions)r   rH  r|  use_return_dictrE   r,   r   r   r.  r   r8   rc   r  r!   	enumerater   r3   r\   ry  r   r   r   tupler   )r~   r   rH   r{  rH  r|  r}  rG  deepspeed_zero3_is_enabledall_hidden_statesall_self_attentionsidxZencoder_layerskip_the_layerdropout_probabilityr   layer_outputsr#   r  r$   r     sn    $






zSpeechT5Encoder.forward)NNNNNr   r   r   r   r   rr   r3   r   r   r   rC   r   r   r   r   r   r#   r#   r   r$   rt  	  s         
rt  c                
       sd   e Zd ZdZed fddZd	ejeej	 eej	 ee
 ee
 ee
 eeef dddZ  ZS )
SpeechT5EncoderWithSpeechPrenetz
    Wrapper around SpeechT5Encoder that applies SpeechT5SpeechEncoderPrenet to convert the audio waveform data to
    hidden features.
    r   c                    s2   t  | t|| _t|| _d| _|   d S r   )rq   rr   r   prenetrt  wrapped_encoderr   rz  r   r   r#   r$   rr     s
    

z(SpeechT5EncoderWithSpeechPrenet.__init__Nr&   rH   r{  rH  r|  r}  rJ   c           	      C   s*   |  ||\}}| j||||||d}|S N)r   rH   r{  rH  r|  r}  r  r  	r~   r&   rH   r{  rH  r|  r}  r   r]  r#   r#   r$   r     s    		z'SpeechT5EncoderWithSpeechPrenet.forward)NNNNNr  r#   r#   r   r$   r    s         
r  c                
       st   e Zd ZdZed fddZdd Zdd Zdej	e
ej e
ej e
e e
e e
e eeef d
ddZ  ZS )SpeechT5EncoderWithTextPrenetz|
    Wrapper around SpeechT5Encoder that applies SpeechT5TextEncoderPrenet to convert the input_ids to hidden features.
    r   c                    s2   t  | t|| _t|| _d| _|   d S r   )rq   rr   r"  r  rt  r  r   rz  r   r   r#   r$   rr     s
    

z&SpeechT5EncoderWithTextPrenet.__init__c                 C   s
   | j  S r   r  r'  r   r#   r#   r$   r'    s    z2SpeechT5EncoderWithTextPrenet.get_input_embeddingsc                 C   s   | j | d S r   r  r*  r(  r#   r#   r$   r*    s    z2SpeechT5EncoderWithTextPrenet.set_input_embeddingsNr  c           	      C   s$   |  |}| j||||||d}|S r  r  r  r#   r#   r$   r     s    	
	z%SpeechT5EncoderWithTextPrenet.forward)NNNNN)r   r   r   r   r   rr   r'  r*  r3   r   r   r   rC   r   r   r   r   r   r#   r#   r   r$   r    s$   	     
r  c                
       sd   e Zd ZdZed fddZd	ejeej	 eej	 ee
 ee
 ee
 eeef dddZ  ZS )
SpeechT5EncoderWithoutPrenet
    This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with
    [`SpeechT5Model`].
    r   c                    s(   t  | t|| _d| _|   d S r   )rq   rr   rt  r  r   rz  r   r   r#   r$   rr     s    
z%SpeechT5EncoderWithoutPrenet.__init__Nr  c                 C   s   | j ||||||dS r  )r  )r~   r&   rH   r{  rH  r|  r}  r#   r#   r$   r     s    	z$SpeechT5EncoderWithoutPrenet.forward)NNNNNr  r#   r#   r   r$   r    s         
r  c                       s   e Zd ZdZed fddZdd Zdeej	 eej
 eej	 eej
 eej eej eeej	  ee ee ee ee eeef dd	d
Z  ZS )ru  zt
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SpeechT5DecoderLayer`]
    r   c                    sF   t     j| _t fddt jD | _d| _	| 
  d S )Nc                    s   g | ]}t  qS r#   )r^  rT   r   r#   r$   rW     s     z,SpeechT5Decoder.__init__.<locals>.<listcomp>F)rq   rr   Zdecoder_layerdropry  r   r   ra   Zdecoder_layersr  r   rz  r   r   r   r$   rr     s
     zSpeechT5Decoder.__init__c                 C   s`   d }|d dkr$t ||j|j|d}|d k	r\t||j|d d|j}|d krT|n|| }|S )Nr   r   )r-   r.   r?   )rA   r,   r-   rE   r:   )r~   rH   r0  r  r.   Zcombined_attention_maskZexpanded_attn_maskr#   r#   r$   _prepare_decoder_attention_mask  s    z/SpeechT5Decoder._prepare_decoder_attention_maskN)r   rH   rc  rd  r{  cross_attn_head_maskr/  rf  rH  r|  r}  rJ   c                    s.   dk	r n| j j |
dk	r |
n| j j}
dk	r4n| j j|dk	rH|n| j j}| dd }|dk	rz|d d jd nd}| ||||}|dk	r|dk	rt||j	|d d}t
 }| jr| jrֈrtd d|
rdnd} rdnd} r|dk	rdnd}rdnd}t||gd	d
gD ]V\}}|dk	r"| d t| jkr"td| dt| j d| d  dq"t| jD ]T\}}|
r||f }d}| jrtg }|| jk }|r|sΐq|dk	r|| nd}| jrH| jrH fdd}tjj|||||||dk	r*|| nd|dk	r>|| ndd}n>||||||dk	rd|| nd|dk	rx|| nd| d	}|d }r|| rdnd f7 } r||d f }|dk	r||d f }q|
r||f }r|nd}|stdd |||||fD S t|||||dS )a  
        Args:
            hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
                Features extracted from the speech or text input by the decoder prenet.
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
                Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
                selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
                cross-attention on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
                shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
                `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
                control over how to convert `input_ids` indices into associated vectors than the model's internal
                embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr   r   r   r  zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr#   r{  r  zThe `z` should be specified for r~  r  c                    s    fdd}|S )Nc                     s    | f S r   r#   r   )r   rH  rf  r#   r$   r     s    zNSpeechT5Decoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr#   r   rH  rf  r   r$   r     s    z6SpeechT5Decoder.forward.<locals>.create_custom_forward)rH   rc  rd  rF  re  rE  rH  rf  r
   r   c                 s   s   | ]}|d k	r|V  qd S r   r#   r  r#   r#   r$   r    s   z*SpeechT5Decoder.forward.<locals>.<genexpr>)r  r/  r   r  cross_attentions)r   rH  r|  rf  r  r8   r   r  rE   r,   r   r   r   loggerZwarning_oncer  rc   r  r!   r  r3   r\   ry  r   r   r  r   )r~   r   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  r0  r.   r  r  r  Zall_cross_attentionsZnext_decoder_cacheZ	attn_maskZ	mask_namer  Zdecoder_layerr  r  rE  r   r  Z
next_cacher#   r  r$   r   2  s    I   
$




zSpeechT5Decoder.forward)NNNNNNNNNNN)r   r   r   r   r   rr   r  r   r3   r   r
  r   r   rC   r   r   r   r   r   r#   r#   r   r$   ru  
  s8              
ru  c                       s   e Zd ZdZed fddZd	eej eej	 eej eej	 eej
 eej
 eej
 eeej  ee ee ee ee eeef dddZ  ZS )
SpeechT5DecoderWithSpeechPrenetz
    Wrapper around SpeechT5Decoder that applies SpeechT5SpeechDecoderPrenet to convert log-mel filterbanks to hidden
    features.
    r   c                    s2   t  | t|| _t|| _d| _|   d S r   )rq   rr   r  r  ru  wrapped_decoderr   rz  r   r   r#   r$   rr     s
    

z(SpeechT5DecoderWithSpeechPrenet.__init__N)r&   rH   rc  rd  r  r{  r  r/  rf  rH  r|  r}  rJ   c                 C   s0   |  ||}| j||||||||	|
||d}|S N)r   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  r  r  )r~   r&   rH   rc  rd  r  r{  r  r/  rf  rH  r|  r}  decoder_hidden_statesr]  r#   r#   r$   r     s    z'SpeechT5DecoderWithSpeechPrenet.forward)NNNNNNNNNNNNr   r   r   r   r   rr   r   r3   r   r
  r   r   rC   r   r   r   r   r   r#   r#   r   r$   r    s:               
r  c                       s   e Zd ZdZed fddZdd Zdd Zdee	j
 ee	j ee	j
 ee	j ee	j ee	j eee	j
  ee ee ee ee eeef d
ddZ  ZS )SpeechT5DecoderWithTextPrenetz{
    Wrapper around SpeechT5Decoder that applies SpeechT5TextDecoderPrenet to convert input tokens to hidden features.
    r   c                    s2   t  | t|| _t|| _d| _|   d S r   )rq   rr   r+  r  ru  r  r   rz  r   r   r#   r$   rr   +  s
    

z&SpeechT5DecoderWithTextPrenet.__init__c                 C   s
   | j  S r   r  r   r#   r#   r$   r'  4  s    z2SpeechT5DecoderWithTextPrenet.get_input_embeddingsc                 C   s   | j | d S r   r  r(  r#   r#   r$   r*  7  s    z2SpeechT5DecoderWithTextPrenet.set_input_embeddingsNr&   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  rJ   c                 C   s6   |  |||\}}| j|||||||||	|
|d}|S r  r  )r~   r&   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  r  r]  r#   r#   r$   r   :  s    z%SpeechT5DecoderWithTextPrenet.forward)NNNNNNNNNNN)r   r   r   r   r   rr   r'  r*  r   r3   r   r
  r   r   rC   r   r   r   r   r   r#   r#   r   r$   r  &  s:   	           
r  c                       s   e Zd ZdZed fddZd	eej eej	 eej eej	 eej
 eej
 eeej  ee ee ee ee eeef dddZ  ZS )
SpeechT5DecoderWithoutPrenetr  r   c                    s(   t  | t|| _d| _|   d S r   )rq   rr   ru  r  r   rz  r   r   r#   r$   rr   a  s    
z%SpeechT5DecoderWithoutPrenet.__init__Nr  c                 C   s$   | j |||||||||	|
|d}|S r  )r  )r~   r&   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  r]  r#   r#   r$   r   i  s    z$SpeechT5DecoderWithoutPrenet.forward)NNNNNNNNNNNr  r#   r#   r   r$   r  [  s6   
           
r  c                       sV   e Zd ZdZed fddZejejejej	dddZ
dd	 Zed
d Z  ZS )$SpeechT5GuidedMultiheadAttentionLossz
    Guided attention loss from the paper [Efficiently Trainable Text-to-Speech System Based on Deep Convolutional
    Networks with Guided Attention](https://arxiv.org/abs/1710.08969), adapted for multi-head attention.
    r   c                    s   t    |j| _|j| _d S r   )rq   rr   Zguided_attention_loss_sigmasigmaZguided_attention_loss_scalescaler   r   r#   r$   rr     s    
z-SpeechT5GuidedMultiheadAttentionLoss.__init__)r  input_masksoutput_masksrJ   c                 C   sX   |  |||j}|d|d@ }||jd}|| }t||}| j| S )aY  
        Compute the attention loss.

        Args:
            attentions (`torch.FloatTensor` of shape `(batch_size, layers * heads, output_sequence_length, input_sequence_length)`):
                Batch of multi-head attention weights
            input_masks (`torch.BoolTensor` of shape `(batch_size, input_sequence_length)`):
                Input attention mask as booleans.
            output_masks (`torch.BoolTensor` of shape `(batch_size, output_sequence_length)`):
                Target attention mask as booleans.

        Returns:
            `torch.Tensor` with the loss value
        r   r   r   )_make_guided_attention_masksr-   r   r:   r3   rj  masked_selectr  )r~   r  r  r  guided_attn_masksmasksZlosseslossr#   r#   r$   r     s    z,SpeechT5GuidedMultiheadAttentionLoss.forwardc           
      C   s   | d}| d}tjt||jd |jd f|d}tt||D ]0\}\}}	| ||	| j|||d |	d |f< qF|	dS )Nr   r   r/   )
r^   r3   r<   rc   r   r  r  _make_guided_attention_maskr  r   )
r~   r  r  r-   ri   r  r  r  ZilenZolenr#   r#   r$   r    s    

$&zASpeechT5GuidedMultiheadAttentionLoss._make_guided_attention_masksc                 C   sd   t jt j| |dt j||ddd\}}| | }| |  }dt || d  d|d    S )Nr/   Zxy)ZindexingrB   r   )r3   Zmeshgridr7   r   r   )rN   Zoutput_lengthr  r-   Zgrid_yZgrid_xr#   r#   r$   r    s    
z@SpeechT5GuidedMultiheadAttentionLoss._make_guided_attention_mask)r   r   r   r   r   rr   r3   r   Z
BoolTensorr   r   r  r   r  r   r#   r#   r   r$   r    s     r  c                	       sT   e Zd ZdZed fddZd	ejejejejeje	ej ej
dddZ  ZS )
SpeechT5SpectrogramLossz;
    Loss computation used by SpeechT5ForTextToSpeech.
    r   c                    sP   t    |j| _|j| _|j| _t | _tt	dd| _
| jrLt|| _d S )Ng      @)Z
pos_weight)rq   rr   use_guided_attention_lossguided_attention_loss_num_headsr'   r	   l1_criterionr   r3   r   bce_criterionr  attn_criterionr   r   r#   r$   rr     s    
z SpeechT5SpectrogramLoss.__init__N)rH   r  r   r!  labelsr  rJ   c                    s@  |dk}| |}| |}| |} || || }|d d d d df }	tj|	 d t|	dd|	jgdd}
|
d d dd f  |	}
| |	} ||
}|| } j	r<tj fdd|D dd}|dk}|d d d d df } j
dkr&|d d  j
d d  j
f } |||}||7 }|S )Nr(   r   rB   r   r1   c                    s"   g | ]}|d d d  j f qS r   )r  )rU   xr   r#   r$   rW     s     z3SpeechT5SpectrogramLoss.forward.<locals>.<listcomp>)r  r  r3   r;   rd   r8   r:   r-   r  r  r'   r  )r~   rH   r  r   r!  r  r  r   Zl1_lossr  stop_labelsZbce_lossr  Zattnr  r  Z	attn_lossr#   r   r$   r     s(    	


.
zSpeechT5SpectrogramLoss.forward)N)r   r   r   r   r   rr   r3   r
  r   r   r   r   r   r#   r#   r   r$   r    s    r  a  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`SpeechT5Config`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
        encoder ([`SpeechT5EncoderWithSpeechPrenet`] or [`SpeechT5EncoderWithTextPrenet`] or `None`):
            The Transformer encoder module that applies the appropiate speech or text encoder prenet. If `None`,
            [`SpeechT5EncoderWithoutPrenet`] will be used and the `input_values` are assumed to be hidden states.
        decoder ([`SpeechT5DecoderWithSpeechPrenet`] or [`SpeechT5DecoderWithTextPrenet`] or `None`):
            The Transformer decoder module that applies the appropiate speech or text decoder prenet. If `None`,
            [`SpeechT5DecoderWithoutPrenet`] will be used and the `decoder_input_values` are assumed to be hidden
            states.
aM  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`SpeechT5Config`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
            1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)

            <Tip warning={true}>

            `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
            True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
            **not** be passed to avoid degraded performance when doing batched inference. For such models
            `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
            models also yield slightly different results depending on whether `input_values` is padded or not.

            </Tip>

        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will
            also be used by default.

            If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`]
            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
            information on the default strategy.

        head_mask (`torch.FloatTensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        decoder_head_mask (`torch.FloatTensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.

        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_values` (those
            that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_values` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor`
            of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
            `decoder_input_values` you can choose to directly pass an embedded representation. If `past_key_values` is
            used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is
            useful if you want more control over how to convert `decoder_input_values` indices into associated vectors
            than the model's internal embedding lookup matrix.

        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.

        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.

        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
zlThe bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets.c                       s  e Zd Zdeeej eej d fddZdd Zdd Z	d	d
 Z
dd Zdd Zeeeeeddeej eej eej eej eej eej eej eeeej   eeeej   ee eej ee ee ee eeej ef dddZ  ZS )SpeechT5ModelN)r   encoderdecoderc                    sJ   t  | || _|d kr"t|n|| _|d kr8t|n|| _|   d S r   )rq   rr   r   r  r  r  r  rz  )r~   r   r  r  r   r#   r$   rr     s
    zSpeechT5Model.__init__c                 C   s0   t | jtr| j S t | jtr,| j S d S r   )rN  r  r  r'  r  r  r   r#   r#   r$   r'    s
    

z"SpeechT5Model.get_input_embeddingsc                 C   s4   t | jtr| j| t | jtr0| j| d S r   )rN  r  r  r*  r  r  r(  r#   r#   r$   r*    s    z"SpeechT5Model.set_input_embeddingsc                 C   s   | j S r   )r  r   r#   r#   r$   get_encoder  s    zSpeechT5Model.get_encoderc                 C   s   | j S r   )r  r   r#   r#   r$   get_decoder  s    zSpeechT5Model.get_decoderc                 C   s   t | jtr| jj  dS z
        Calling this function will disable the gradient computation for the feature encoder so that its parameter will
        not be updated during training.
        N)rN  r  r  r  r   r   r#   r#   r$   r     s    z$SpeechT5Model.freeze_feature_encoderoutput_typerw  )r&   rH   decoder_input_valuesdecoder_attention_maskr{  decoder_head_maskr  encoder_outputsr/  rf  r  rH  r|  r}  rJ   c                 C   sj  |dk	r|n| j j}|dk	r |n| j j}|
dk	r4|
n| j j}
|dk	rH|n| j j}|dkrp| j||||||d}nH|rt|tst|d t|dkr|d ndt|dkr|d ndd}|dk	rt| jt	r| jj
|d jd |}n|}t| jtrd|i}ni }| jf |||d ||||	|
|||d|}|sB|| S t|j|j|j|j|j|j|j|jd	S )
au  
        input_values (`torch.Tensor` of shape `(batch_size, sequence_length)`):
            Depending on which encoder is being used, the `input_values` are either: float values of the input raw
            speech waveform, or indices of input sequence tokens in the vocabulary, or hidden states.

        decoder_input_values (`torch.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Depending on which decoder is being used, the `decoder_input_values` are either: float values of log-mel
            filterbank features extracted from the raw speech waveform, or indices of decoder input sequence tokens in
            the vocabulary, or hidden states.

        speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
            Tensor containing the speaker embeddings.

        Returns:
        N)r&   rH   r{  rH  r|  r}  r   r   r   r  r  )r&   rH   rc  rd  r{  r  r/  rf  rH  r|  r}  )r  r/  r  decoder_attentionsr  encoder_last_hidden_staterc  encoder_attentions)r   rH  r|  rf  r  r  rN  r   rc   r  r  r   r   r  r  r   r  r/  r   r  r  )r~   r&   rH   r  r  r{  r  r  r  r/  rf  r  rH  r|  r}  rd  Zdecoder_argsZdecoder_outputsr#   r#   r$   r     sp    "	 
zSpeechT5Model.forward)NN)NNNNNNNNNNNNNN)r   r   r   r   r   r   Modulerr   r'  r*  r  r  r   r   SPEECHT5_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr3   r   r
  r   r   rC   r   r   r   r#   r#   r   r$   r  ~  sZ     
              r  z8SpeechT5 Model with a speech encoder and a text decoder.c                       s
  e Zd ZdgZed fddZdd Zdd Zd	d
 Zdd Z	dd Z
eeeeeddeej eej eej eej eej eej eej eeeej   eeeej   ee ee ee ee eej eeef dddZdddZedd Z  ZS )SpeechT5ForSpeechToTextz#text_decoder_postnet.lm_head.weightr   c                    s\   t  | |jd kr(td| j dt|}t|}t|||| _t	|| _
|   d S )NYou are trying to instantiate a    with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `SpeechT5ForSpeechToText.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.)rq   rr   r#  r!   r   r  r  r  rh  r1  text_decoder_postnetrz  )r~   r   speech_encoderZtext_decoderr   r#   r$   rr   	  s    

z SpeechT5ForSpeechToText.__init__c                 C   s
   | j  S r   rh  r  r   r#   r#   r$   r  -	  s    z#SpeechT5ForSpeechToText.get_encoderc                 C   s
   | j  S r   rh  r  r   r#   r#   r$   r  0	  s    z#SpeechT5ForSpeechToText.get_decoderc                 C   s   |   j  dS r  r  r  r   r   r#   r#   r$   r   3	  s    z.SpeechT5ForSpeechToText.freeze_feature_encoderc                 C   s
   | j  S r   )r  r5  r   r#   r#   r$   r5  :	  s    z-SpeechT5ForSpeechToText.get_output_embeddingsc                 C   s   | j | d S r   )r  r7  r6  r#   r#   r$   r7  =	  s    z-SpeechT5ForSpeechToText.set_output_embeddingsr  N)r&   rH   decoder_input_idsr  r{  r  r  r  r/  rf  rH  r|  r}  r  rJ   c                 C   s   |dk	r|n| j j}|dk	r8|dkr8t|| j j| j j}| j|||||||||	|
||dd}| |d }d}|dk	rt }||d| j j	|d}|s|f|dd  }|dk	r|f| S |S t
|||j|j|j|j|j|j|jd	S )ay  
        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
            into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install
            soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding
            and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.

        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            SpeechT5 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).

        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
            or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
            only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

            Label indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

        Returns:

        Example:

        ```python
        >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToText
        >>> from datasets import load_dataset

        >>> dataset = load_dataset(
        ...     "hf-internal-testing/librispeech_asr_demo", "clean", split="validation"
        ... )  # doctest: +IGNORE_RESULT
        >>> dataset = dataset.sort("id")
        >>> sampling_rate = dataset.features["audio"].sampling_rate

        >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_asr")
        >>> model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr")

        >>> # audio file is decoded on the fly
        >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
        >>> predicted_ids = model.generate(**inputs, max_length=100)

        >>> # transcribe speech
        >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
        >>> transcription[0]
        'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
        ```

        ```python
        >>> inputs["labels"] = processor(text_target=dataset[0]["text"], return_tensors="pt").input_ids

        >>> # compute loss
        >>> loss = model(**inputs).loss
        >>> round(loss.item(), 2)
        19.68
        ```
        NT)r&   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r   r   r   )	r  r!  r/  r  r  r  r  rc  r  )r   r  r%   r   r   rh  r  r   r9   r#  r   r/  r  r  r  r  rc  r  )r~   r&   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r  r]  r!  r  Zloss_fctoutputr#   r#   r$   r   @	  sT    R  zSpeechT5ForSpeechToText.forwardc	           
   	   K   s2   |d k	r|d d dd f }||||||||dS )Nr   )r  r/  r  rH   r{  r  r  rf  r#   )
r~   r  r/  rH   r{  r  r  rf  r  kwargsr#   r#   r$   prepare_inputs_for_generation	  s    z5SpeechT5ForSpeechToText.prepare_inputs_for_generationc                    s.   d}| D ] }|t  fdd|D f7 }q|S )Nr#   c                 3   s"   | ]}| d  |jV  qdS )r   N)r   r:   r-   )rU   Z
past_statebeam_idxr#   r$   r  	  s     z9SpeechT5ForSpeechToText._reorder_cache.<locals>.<genexpr>)r  )r/  r  Zreordered_pastZ
layer_pastr#   r  r$   _reorder_cache	  s    z&SpeechT5ForSpeechToText._reorder_cache)NNNNNNNNNNNNNN)NNNNNNN)r   r   r   Z_tied_weights_keysr   rr   r  r  r   r5  r7  r   r  r   r   r  r   r3   r   r
  r   r   rC   r   r   r  r   r  r   r#   r#   r   r$   r  	  sf   
              
        
r        ?r)         4@F)	modelr&   r  	thresholdminlenratiomaxlenratiovocoderoutput_cross_attentionsrJ   c              
   C   s  t |}| jj||dd}	|	j}
t| jjtrL| jjj|	d j	d |}t
|
d| | jj }t
|
d| | jj }|
dd| jj}g }g }d }d}|d7 }| jj||}| jjj|d d dd f d |
||d|dd}|r|t j|jdd |jd }|j}| j|}|| jj| jj}|| t j||d dd| jjfdd}t | j|}||krt
t||kdks||krt j|ddd}| j|}|d}qq|d k	r||}n|}|rt j|d	d}||f}|S )
NT)r&   rH   r}  r   r   r   )r   rH   rc  rd  r/  rf  rH  r}  r1   )r   r   r   )r3   Z	ones_likerh  r  r  rN  r  r  r   r   rL   r8   r   r'   r   r  r  r  re   r;   r  r/  speech_decoder_postnetr  r9   Zsigmoidr  r^   r   r  squeeze)r  r&   r  r  r  r  r  r  rd  Zencoder_outr  maxlenZminlenZoutput_sequencespectrogramr  r/  r  r  Zdecoder_outZlast_decoder_outputZspectrumZprobr]  r#   r#   r$   _generate_speech	  sh    


 

$&


r  z8SpeechT5 Model with a text encoder and a speech decoder.c                       s~  e Zd ZdZed fddZdd Zdd Zee	e
eed	deej eej eej eej eej eej eej eeeej   eeeej   ee ee ee ee eej eej eej eeef dddZe dejeej eeeeej eeejeejejf f dddZe dejeej eeeeej eeejeejejf f dddZ  ZS )SpeechT5ForTextToSpeechr   r   c                    s\   t  | |jd kr(td| j dt|}t|}t|||| _t	|| _
|   d S )Nr  a    with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `SpeechT5ForTextToSpeech.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.)rq   rr   r#  r!   r   r  r  r  rh  r  r  rz  )r~   r   Ztext_encoderspeech_decoderr   r#   r$   rr   H
  s    

z SpeechT5ForTextToSpeech.__init__c                 C   s
   | j  S r   r  r   r#   r#   r$   r  \
  s    z#SpeechT5ForTextToSpeech.get_encoderc                 C   s
   | j  S r   r  r   r#   r#   r$   r  _
  s    z#SpeechT5ForTextToSpeech.get_decoderr  N)r   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r  r  r  rJ   c                 C   s
  |dk	r|n| j j}|dk	r(tdt |dk	rR|dkrFt|| j j}| j jrRd}| j|||||||||	|
|||dd}| 	|d \}}}d}|dk	rt
| j }|||||||j}|s|f|dd  }|dk	r|f| S |S t|||j|j|j|j|j|j|jd	S )a  
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. The `batch_size` should be 1 currently.

            Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
            [`~PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):
            Float values of input mel spectrogram.

            SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see
            `past_key_values`).
        speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
            Tensor containing the speaker embeddings.
        labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):
            Float values of target mel spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss
            computation. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`]
            for details.

        Returns:

        Example:

        ```python
        >>> from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, set_seed
        >>> import torch

        >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
        >>> model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
        >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")

        >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
        >>> speaker_embeddings = torch.zeros((1, 512))  # or load xvectors from a file

        >>> set_seed(555)  # make deterministic

        >>> # generate speech
        >>> speech = model.generate(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
        >>> speech.shape
        torch.Size([15872])
        ```
        N^The argument `stop_labels` is deprecated and will be removed in version 4.30.0 of TransformersTr&   rH   r  r  r{  r  r  r  r/  rf  r  rH  r|  r}  r   r   	r  r  r/  r  r  r  r  rc  r  )r   r  warningswarnFutureWarningr*   r'   r  rh  r  r  r  r   r/  r  r  r  rc  r  )r~   r   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r  r  r  r]  r  r   r!  r  	criterionr  r#   r#   r$   r   b
  sj    A
	zSpeechT5ForTextToSpeech.forwardr  r)   r  F)r   r  r  r  r  r  r  rJ   c           	   	   K   s   t | |||||||S )a	  
        Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
        speech waveform using a vocoder.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. The `batch_size` should be 1 currently.

                Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
                [`~PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
                Tensor containing the speaker embeddings.
            threshold (`float`, *optional*, defaults to 0.5):
                The generated sequence ends when the predicted stop token probability exceeds this value.
            minlenratio (`float`, *optional*, defaults to 0.0):
                Used to calculate the minimum required length for the output sequence.
            maxlenratio (`float`, *optional*, defaults to 20.0):
                Used to calculate the maximum allowed length for the output sequence.
            vocoder (`nn.Module`, *optional*):
                The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
                spectrogram.
            output_cross_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of the decoder's cross-attention layers.

        Returns:
            `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
            - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
              `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
            - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
              `(num_frames,)` -- The predicted speech waveform.
            - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor`
              of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length,
              input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
        r  )	r~   r   r  r  r  r  r  r  r  r#   r#   r$   generate
  s    0z SpeechT5ForTextToSpeech.generatec              	   C   s   t | |||||||S )a	  
        Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
        speech waveform using a vocoder.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. The `batch_size` should be 1 currently.

                Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
                [`~PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
                Tensor containing the speaker embeddings.
            threshold (`float`, *optional*, defaults to 0.5):
                The generated sequence ends when the predicted stop token probability exceeds this value.
            minlenratio (`float`, *optional*, defaults to 0.0):
                Used to calculate the minimum required length for the output sequence.
            maxlenratio (`float`, *optional*, defaults to 20.0):
                Used to calculate the maximum allowed length for the output sequence.
            vocoder (`nn.Module`, *optional*, defaults to `None`):
                The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
                spectrogram.
            output_cross_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of the decoder's cross-attention layers.

        Returns:
            `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
            - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
              `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
            - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
              `(num_frames,)` -- The predicted speech waveform.
            - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor`
              of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length,
              input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
        r  )r~   r   r  r  r  r  r  r  r#   r#   r$   generate_speech  s    /z'SpeechT5ForTextToSpeech.generate_speech)NNNNNNNNNNNNNNNN)Nr  r)   r  NF)Nr  r)   r  NF)r   r   r   rx  r   rr   r  r  r   r  r   r   r  r   r3   r
  r   r   r   rC   r   r   r   r   r   r  r  r  r   r#   r#   r   r$   r  A
  s   
                
|      :      r  z:SpeechT5 Model with a speech encoder and a speech decoder.c                       s$  e Zd Zed fddZdd Zdd Zdd	 Zee	e
eed
deej eej eej eej eej eej eej eeeej   eeeej   ee ee ee ee eej eej eej eeef dddZe dejeej eeeeej eejdddZ  ZS )SpeechT5ForSpeechToSpeechr   c                    s@   t  | t|}t|}t|||| _t|| _|   d S r   )	rq   rr   r  r  r  rh  r  r  rz  )r~   r   r  r  r   r#   r$   rr   [  s    
z"SpeechT5ForSpeechToSpeech.__init__c                 C   s
   | j  S r   r  r   r#   r#   r$   r  g  s    z%SpeechT5ForSpeechToSpeech.get_encoderc                 C   s
   | j  S r   r  r   r#   r#   r$   r  j  s    z%SpeechT5ForSpeechToSpeech.get_decoderc                 C   s   |   j  dS r  r  r   r#   r#   r$   r   m  s    z0SpeechT5ForSpeechToSpeech.freeze_feature_encoderr  N)r&   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r  r  r  rJ   c                 C   s   |dk	r|n| j j}|dk	r(tdt |dk	rF|dkrFt|| j j}| j|||||||||	|
|||dd}| |d \}}}d}|s|f|dd  }|dk	r|f| S |S t	|||j
|j|j|j|j|j|jd	S )a
  
        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
            into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install
            soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding
            and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.
        decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):
            Float values of input mel spectrogram.

            SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see
            `past_key_values`).
        speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
            Tensor containing the speaker embeddings.
        labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):
            Float values of target mel spectrogram. Spectrograms can be obtained using [`SpeechT5Processor`]. See
            [`SpeechT5Processor.__call__`] for details.

        Returns:

        Example:

        ```python
        >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech, SpeechT5HifiGan, set_seed
        >>> from datasets import load_dataset
        >>> import torch

        >>> dataset = load_dataset(
        ...     "hf-internal-testing/librispeech_asr_demo", "clean", split="validation"
        ... )  # doctest: +IGNORE_RESULT
        >>> dataset = dataset.sort("id")
        >>> sampling_rate = dataset.features["audio"].sampling_rate

        >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc")
        >>> model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc")
        >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")

        >>> # audio file is decoded on the fly
        >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")

        >>> speaker_embeddings = torch.zeros((1, 512))  # or load xvectors from a file

        >>> set_seed(555)  # make deterministic

        >>> # generate speech
        >>> speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder)
        >>> speech.shape
        torch.Size([77824])
        ```
        Nr  Tr  r   r   r  )r   r  r  r  r  r*   r'   rh  r  r   r/  r  r  r  r  rc  r  )r~   r&   rH   r  r  r{  r  r  r  r/  rf  rH  r|  r}  r  r  r  r]  rV   r  r!  r  r  r#   r#   r$   r   t  sR    Gz!SpeechT5ForSpeechToSpeech.forwardr  r)   r  F)r&   r  r  r  r  r  r  rJ   c              	   C   s.   |dkrt jd|jd}t| |||||||S )av
  
        Converts a raw speech waveform into a sequence of mel spectrograms, which are subsequently turned back into a
        speech waveform using a vocoder.

        Args:
            input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
                Float values of input raw speech waveform. The `batch_size` should be 1 currently.

                Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `List[float]` or
                a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array
                into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor
                of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.
            speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
                Tensor containing the speaker embeddings.
            threshold (`float`, *optional*, defaults to 0.5):
                The generated sequence ends when the predicted stop token probability exceeds this value.
            minlenratio (`float`, *optional*, defaults to 0.0):
                Used to calculate the minimum required length for the output sequence.
            maxlenratio (`float`, *optional*, defaults to 20.0):
                Used to calculate the maximum allowed length for the output sequence.
            vocoder (`nn.Module`, *optional*, defaults to `None`):
                The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
                spectrogram.
            output_cross_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of the decoder's cross-attention layers.

        Returns:
            `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
            - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
              `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
            - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
              `(num_frames,)` -- The predicted speech waveform.
            - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor`
              of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length,
              input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
        N)r   i   r/   )r3   r<   r-   r  )r~   r&   r  r  r  r  r  r  r#   r#   r$   r    s    /z)SpeechT5ForSpeechToSpeech.generate_speech)NNNNNNNNNNNNNNNN)Nr  r)   r  NF)r   r   r   r   rr   r  r  r   r   r  r   r   r  r   r3   r   r
  r   r   rC   r   r   r   r   r   r  r  r   r#   r#   r   r$   r  V  st   
                
v      r  aT  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`SpeechT5HifiGanConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
c                       s@   e Zd Zd fdd	ZdddZd	d
 Zdd Zdd Z  ZS )HifiGanResidualBlockr
   r   r
      皙?c                    sb   t    |_t fddttD _t fddttD _d S )Nc                    s2   g | ]*}t j  d | | dqS r   )ro   dilationr   r   rv   get_paddingr   channelsr   rn   r~   r#   r$   rW   A  s   	z1HifiGanResidualBlock.__init__.<locals>.<listcomp>c                    s*   g | ]"}t j  d d d dqS r  r  rT   )r  rn   r~   r#   r$   rW   N  s   	
)	rq   rr   leaky_relu_sloper   r   ra   rc   convs1convs2)r~   r  rn   r   r  r   r  r$   rr   <  s    
	
	
zHifiGanResidualBlock.__init__r   c                 C   s   || | d S r   r#   )r~   rn   r   r#   r#   r$   r  [  s    z HifiGanResidualBlock.get_paddingc                 C   s4   | j D ]}tj| q| jD ]}tj| qd S r   )r  r   r   r   r  r~   r   r#   r#   r$   apply_weight_norm^  s    

z&HifiGanResidualBlock.apply_weight_normc                 C   s4   | j D ]}tj| q| jD ]}tj| qd S r   )r  r   r   remove_weight_normr  r  r#   r#   r$   r
  d  s    

z'HifiGanResidualBlock.remove_weight_normc                 C   sX   t | j| jD ]D\}}|}tj|| j}||}tj|| j}||}|| }q|S r   )r  r  r  r   r  
leaky_relur  )r~   r   Zconv1Zconv2r\  r#   r#   r$   r   j  s    
zHifiGanResidualBlock.forward)r
   r  r  )r   )	r   r   r   rr   r  r	  r
  r   r   r#   r#   r   r$   r  ;  s
   
r  zHiFi-GAN vocoder.c                       sV   e Zd ZeZdZed fddZdd Zdd Zd	d
 Z	e
je
jdddZ  ZS )SpeechT5HifiGanr  r   c              
      sN  t  | t|j| _t|j| _tj|j	|j
dddd| _t | _tt|j|jD ]H\}\}}| jtj|j
d|  |j
d|d   |||| d d qZt | _tt| jD ]F}|j
d|d   }t|j|jD ] \}}| jt||||j qqtj|ddddd| _| dt|j	 | dt|j	 |   d S )N   r   r
   )rn   ro   r   r   rj  r  )rq   rr   rc   Zresblock_kernel_sizesnum_kernelsZupsample_ratesnum_upsamplesr   rv   Zmodel_in_dimZupsample_initial_channelconv_prer   	upsamplerr  r  Zupsample_kernel_sizesre   ZConvTranspose1d	resblocksra   Zresblock_dilation_sizesr  r  	conv_postr   r3   r<   rd   rz  )r~   r   r   Zupsample_ratern   r  r   r   r#   r$   rr   }  s<    



zSpeechT5HifiGan.__init__c                 C   sB   t |tjtjfr>|jjjd| jjd |j	dk	r>|j	j
  dS )zInitialize the weights.r)   ri  N)rN  r   r   rv   r   ro  rn  r   rp  rp   rq  )r~   r   r#   r#   r$   rs    s    
zSpeechT5HifiGan._init_weightsc                 C   sL   t j| j | jD ]}t j| q| jD ]}|  q,t j| j d S r   )r   r   r   r  r  r  r	  r  r  r#   r#   r$   r	    s    


z!SpeechT5HifiGan.apply_weight_normc                 C   sL   t j| j | jD ]}t j| q| jD ]}|  q,t j| j d S r   )r   r   r
  r  r  r  r  r  r#   r#   r$   r
    s    


z"SpeechT5HifiGan.remove_weight_norm)r  rJ   c                 C   s  | j jr|| j | j }| dk}|s2|d}|dd}| |}t| j	D ]p}t
j|| j j}| j| |}| j|| j  |}td| jD ] }|| j|| j |  |7 }q|| j }qRt
j|}| |}t|}|s|dddd}n
|d}|S )a  
        Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
        of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
        waveform.

        Args:
            spectrogram (`torch.FloatTensor`):
                Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
                config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.

        Returns:
            `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
            shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
        r
   r   r   r   r   )r   Znormalize_beforerj  r  r2   r   r   r  ra   r  r   r  r  r  r  r  r  r  r3   tanhr  r9   )r~   r  Z
is_batchedr   r   Z	res_statejZwaveformr#   r#   r$   r     s*    




zSpeechT5HifiGan.forward)r   r   r   r   rw  rx  rr   rs  r	  r
  r3   r   r   r   r#   r#   r   r$   r  u  s   &r  )r   )r   )N)Nr   )Nr  r)   r  NF)fr   r   r  typingr   r   r   r   numpyrZ   r3   Ztorch.utils.checkpointr   Ztorch.nnr   r   r	   Zactivationsr   Zintegrations.deepspeedr   Zmodeling_outputsr   r   r   r   r   Zmodeling_utilsr   r   r   r   r   r   Zconfiguration_speecht5r   r   Z
get_loggerr   r  Z_HIDDEN_STATES_START_POSITIONr  Z&SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LISTr   rL   r%   r*   Sizer,   r-   rA   rE   r   r
  Zndarrayrk   r  rl   r   r   r   r   r   r   r   r   r   r   r  r  r  r"  r+  r1  r8  rK  rT  r^  rg  rt  r  r  r  ru  r  r  r  r  r  ZSPEECHT5_BASE_START_DOCSTRINGZSPEECHT5_START_DOCSTRINGr  r  r  r   rC   r  r  r  ZHIFIGAN_START_DOCSTRINGr  r  r#   r#   r#   r$   <module>   s  
	      
xG'6 4(+ !=l. &+! l15,;=V  T      [   Q: