U
    9%e(5                    @   s4  d Z ddlZddlZddlmZmZmZ ddlZddl	Z	ddl
m  mZ ddlZ	ddl	mZ ddlmZ ddlmZ ddlmZ dd	lmZmZmZmZmZmZ dd
lmZ ddlmZm Z m!Z!m"Z" ddl#m$Z$ e"%e&Z'dZ(dZ)dZ*dddgZ+dZ,dZ-dZ.ddgZ/dZ0dZ1dddgZ2dWee3e3f e4e3ee	j5 e3ej6dddZ7G dd dej8Z9G d d! d!ej8Z:G d"d# d#ej8Z;G d$d% d%ej8Z<G d&d' d'ej8Z=G d(d) d)ej8Z>G d*d+ d+e>Z?G d,d- d-ej8Z@G d.d/ d/ej8ZAG d0d1 d1ej8ZBG d2d3 d3ej8ZCG d4d5 d5ej8ZDG d6d7 d7ej8ZEG d8d9 d9ej8ZFG d:d; d;ej8ZGG d<d= d=ej8ZHG d>d? d?ej8ZIG d@dA dAeZJdBZKdCZLe dDeKG dEdF dFeJZMe dGeKG dHdI dIeJZNe dJeKG dKdL dLeJZOe dMeKG dNdO dOeJZPG dPdQ dQej8ZQG dRdS dSej8ZRe dTeKG dUdV dVeJZSdS )Xz PyTorch WavLM model.    N)OptionalTupleUnion)nn)CrossEntropyLoss   )ACT2FN)is_deepspeed_zero3_enabled)BaseModelOutputCausalLMOutputSequenceClassifierOutputTokenClassifierOutputWav2Vec2BaseModelOutputXVectorOutput)PreTrainedModel)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardlogging   )WavLMConfig   r   z1patrickvonplaten/wavlm-libri-clean-100h-base-plusi$  i   zZ'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'gQ)@zmicrosoft/wavlm-base-plus-sdzmicrosoft/wavlm-base-plus-svg
ףp=
?zmicrosoft/wavlm-basezmicrosoft/wavlm-base-pluszmicrosoft/wavlm-large)shape	mask_probmask_lengthattention_mask	min_masksreturnc                    s  | \}dk rt dkr6t d d dtjd   fdd}|dk	rt|d	  nfd
dt|D }tj	|ft
d}g }	|}
|
dkr|S |D ]v}||}tjjt|d  |dd}t|dkrd }n|d }t|tj|
| tjd| g}|	| qt|	}	t|	dddddf ||
f}	|	||
 }	tddddf }t|||
f||
 }|	| }	|	 d kr҈d |	|	d k< t||	dd	 |S )af  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    sX   t |     }t|}| kr2 }| d  |k rTt| d  d}|S )z;Given input length, compute how many spans should be maskedr   r   )intmax)input_lengthnum_masked_spanepsilonr   r   r   sequence_length g/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/wavlm/modeling_wavlm.pycompute_num_masked_spanr   s    
z6_compute_mask_indices.<locals>.compute_num_masked_spanNc                    s   g | ]} qS r&   r&   .0_)r%   r&   r'   
<listcomp>   s     z)_compute_mask_indices.<locals>.<listcomp>dtyper   F)replace)
ValueErrornprandomranditemsumdetachtolistrangezerosboolchoicearangelenZconcatenateonesZint32appendarraybroadcast_toZreshaper    Zput_along_axis)r   r   r   r   r   
batch_sizer(   input_lengthsZspec_aug_maskZspec_aug_mask_idxsZmax_num_masked_spanr!   r"   Zspec_aug_mask_idxZdummy_mask_idxoffsetsr&   r#   r'   _compute_mask_indicesL   s`      

  rF   c                       s&   e Zd Zd fdd	Zdd Z  ZS )WavLMNoLayerNormConvLayerr   c                    sj   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _d S )Nr   r   kernel_sizestridebias)super__init__conv_dimin_conv_dimout_conv_dimr   Conv1dconv_kernelconv_stride	conv_biasconvr   feat_extract_activation
activationselfconfiglayer_id	__class__r&   r'   rM      s    
z"WavLMNoLayerNormConvLayer.__init__c                 C   s   |  |}| |}|S N)rU   rW   rY   hidden_statesr&   r&   r'   forward   s    

z!WavLMNoLayerNormConvLayer.forward)r   __name__
__module____qualname__rM   ra   __classcell__r&   r&   r\   r'   rG      s   rG   c                       s&   e Zd Zd fdd	Zdd Z  ZS )WavLMLayerNormConvLayerr   c                    s|   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
tj| jdd| _t|j | _d S )Nr   r   rH   T)Zelementwise_affine)rL   rM   rN   rO   rP   r   rQ   rR   rS   rT   rU   	LayerNorm
layer_normr   rV   rW   rX   r\   r&   r'   rM      s    
z WavLMLayerNormConvLayer.__init__c                 C   s:   |  |}|dd}| |}|dd}| |}|S )Nr)   )rU   	transposeri   rW   r_   r&   r&   r'   ra      s    


zWavLMLayerNormConvLayer.forward)r   rb   r&   r&   r\   r'   rg      s   rg   c                       s&   e Zd Zd fdd	Zdd Z  ZS )WavLMGroupNormConvLayerr   c                    s   t    |dkr |j|d  nd| _|j| | _tj| j| j|j| |j| |j	d| _
t|j | _tj| j| jdd| _d S )Nr   r   rH   T)
num_groupsZnum_channelsZaffine)rL   rM   rN   rO   rP   r   rQ   rR   rS   rT   rU   r   rV   rW   	GroupNormri   rX   r\   r&   r'   rM      s    
z WavLMGroupNormConvLayer.__init__c                 C   s"   |  |}| |}| |}|S r^   )rU   ri   rW   r_   r&   r&   r'   ra     s    


zWavLMGroupNormConvLayer.forward)r   rb   r&   r&   r\   r'   rl      s   rl   c                       s$   e Zd Z fddZdd Z  ZS )WavLMPositionalConvEmbeddingc              	      s   t    tj|j|j|j|jd |jd| _tjj	}t
tjjdrNtjjj	}t rdd l}|jj| jjdd || jddd| _W 5 Q R X |j| | jj |j| | jj n|| jddd| _t|j| _t|j | _d S )Nr   )rI   paddinggroupsweight_normr   )Zmodifier_rankweight)namedim)rL   rM   r   rQ   hidden_sizenum_conv_pos_embeddingsZnum_conv_pos_embedding_groupsrU   utilsrr   hasattrZparametrizationsr	   	deepspeedzeroZGatheredParametersrs   Zregister_external_parameterZweight_vZweight_gWavLMSamePadLayerrp   r   rV   rW   )rY   rZ   rr   rz   r\   r&   r'   rM     s(    

z%WavLMPositionalConvEmbedding.__init__c                 C   s:   | dd}| |}| |}| |}| dd}|S Nr   r   )rk   rU   rp   rW   r_   r&   r&   r'   ra   +  s    


z$WavLMPositionalConvEmbedding.forwardrb   r&   r&   r\   r'   ro     s   ro   c                       s$   e Zd Z fddZdd Z  ZS )r|   c                    s$   t    |d dkrdnd| _d S Nr   r   r   )rL   rM   num_pad_remove)rY   rw   r\   r&   r'   rM   8  s    
zWavLMSamePadLayer.__init__c                 C   s,   | j dkr(|d d d d d | j  f }|S )Nr   )r   r_   r&   r&   r'   ra   <  s    
zWavLMSamePadLayer.forwardrb   r&   r&   r\   r'   r|   7  s   r|   c                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )WavLMFeatureEncoderz.Construct the features from raw audio waveformc                    s   t     jdkr@t ddg fddt jd D  }n6 jdkrd fddt jD }ntd	 j d
t|| _	d| _
d| _d S )Ngroupr   r[   c                    s   g | ]}t  |d  dqS )r   r   )rG   r+   irZ   r&   r'   r-   J  s    z0WavLMFeatureEncoder.__init__.<locals>.<listcomp>r   layerc                    s   g | ]}t  |d qS )r   )rg   r   r   r&   r'   r-   N  s     z`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)rL   rM   Zfeat_extract_normrl   r9   Znum_feat_extract_layersr1   r   
ModuleListconv_layersgradient_checkpointing_requires_grad)rY   rZ   r   r\   r   r'   rM   F  s    



zWavLMFeatureEncoder.__init__c                 C   s   |   D ]
}d|_qd| _d S )NF)
parametersrequires_gradr   rY   paramr&   r&   r'   _freeze_parametersW  s    z&WavLMFeatureEncoder._freeze_parametersc                 C   sj   |d d d f }| j r"| jr"d|_| jD ]<}| j r\| jr\| jr\dd }tjj|||}q(||}q(|S )NTc                    s    fdd}|S )Nc                     s    |  S r^   r&   inputsmoduler&   r'   custom_forwardg  s    zRWavLMFeatureEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr&   r   r   r&   r   r'   create_custom_forwardf  s    z:WavLMFeatureEncoder.forward.<locals>.create_custom_forward)r   trainingr   r   r   torchrx   
checkpoint)rY   input_valuesr`   Z
conv_layerr   r&   r&   r'   ra   \  s    

zWavLMFeatureEncoder.forward)rc   rd   re   __doc__rM   r   ra   rf   r&   r&   r\   r'   r   C  s   r   c                       s   e Zd Z fddZ  ZS )WavLMFeatureExtractorc                    s8   t  | td| jj d| jjd j dt d S )NzThe class `zD` has been depreciated and will be removed in Transformers v5. Use `r   z
` instead.)rL   rM   warningswarnr]   rc   	__bases__FutureWarningrY   rZ   r\   r&   r'   rM   w  s
    zWavLMFeatureExtractor.__init__)rc   rd   re   rM   rf   r&   r&   r\   r'   r   v  s   r   c                       s$   e Zd Z fddZdd Z  ZS )WavLMFeatureProjectionc                    sJ   t    tj|jd |jd| _t|jd |j| _	t
|j| _d S )Nr)   Zeps)rL   rM   r   rh   rN   layer_norm_epsri   Linearrv   
projectionDropoutZfeat_proj_dropoutdropoutr   r\   r&   r'   rM     s    
zWavLMFeatureProjection.__init__c                 C   s&   |  |}| |}| |}||fS r^   )ri   r   r   )rY   r`   Znorm_hidden_statesr&   r&   r'   ra     s    


zWavLMFeatureProjection.forwardrb   r&   r&   r\   r'   r     s   r   c                       s   e Zd ZdZdeeeeeed fddZdej	e
ej	 e
ej	 eeej	e
ej	 e
eej	  f dddZejeejejf ejeejejfdddZeeejdddZejejdddZ  ZS )WavLMAttentionz=Multi-headed attention from 'Attention Is All You Need' paper        @     T	embed_dim	num_headsr   num_bucketsmax_distancehas_relative_position_biasc                    s   t    || _|| _|| _|| | _| j| | jkrNtd| j d| d| jd | _t	||| _
t	||| _t	||| _t	||| _|| _|| _ttd| jdd| _t	| jd| _|rt| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      r      )rL   rM   r   r   r   Zhead_dimr1   Zscalingr   r   k_projv_projq_projout_projr   r   	Parameterr   r?   gru_rel_pos_constgru_rel_pos_linearZ	Embeddingrel_attn_embed)rY   r   r   r   r   r   r   r\   r&   r'   rM     s(    	

zWavLMAttention.__init__NFr   )r`   r   position_biasoutput_attentionsr   c                 C   s  |  \}}}|dkrH| ||}|d|ddd|| j ||}||jdd | jdf }	|	dddd}	| |	}
|
|	jdd d 	d}
t
|
jddd\}}||| j d	  d
 }||| j dd| }|d||f}| ||||\}}|||fS )z'Attention layer with relative attentionNr   r   r)   r   r   )r      ru         ?g       @)sizecompute_bias	unsqueezerepeatviewr   r   permuter   r6   r   Zsigmoidchunkr   torch_multi_head_self_attention)rY   r`   r   r   r   indexZbszZtgt_lenr,   Zgated_hidden_statesZrelative_position_projZgate_aZgate_bZgate_outputgated_position_biasattn_outputattn_weightsr&   r&   r'   ra     s(    	$
   zWavLMAttention.forward)r`   r   r   r   r   c                 C   s   | dd } }}|dk	r&|dnd}d }	}
d}tj|||| j| jtdgt| j	j
| jj
| jj
f|	|
|| j| jj| jj
| j|||d| j	j| jj| jjd\}}| dd}|dk	r|dddf |jdd | jf |jdd  }||fS )zCsimple wrapper around torch's multi_head_attention_forward functionr   r   NFT)Zuse_separate_proj_weightZq_proj_weightZk_proj_weightZv_proj_weight)rk   neFZmulti_head_attention_forwardr   r   r   emptycatr   rK   r   r   r   r   rs   r   rB   r   )rY   r`   r   r   r   querykeyvalueZkey_padding_maskZbias_kZbias_vZadd_zero_attnr   r   r&   r&   r'   r     sB    	

"z.WavLMAttention.torch_multi_head_self_attention)query_length
key_lengthr   c                 C   sx   t j|t jdd d d f }t j|t jdd d d f }|| }| |}|| jjj}| |}|dddg}|S )Nr.   r   r   r   )	r   r=   long_relative_positions_buckettor   rs   devicer   )rY   r   r   Zcontext_positionZmemory_positionZrelative_positionZrelative_position_bucketvaluesr&   r&   r'   r     s    

zWavLMAttention.compute_bias)relative_positionsr   c                 C   s   | j d }|dktj| }t|}|d }||k }t| | }|t| j|  }|||  }|| tj}t	|t
||d }|t|||7 }|S r~   )r   r   r   r   abslogfloatmathr   minZ	full_likewhere)rY   r   r   Zrelative_bucketsZ	max_exactZis_smallZrelative_positions_if_largeZrelative_position_if_larger&   r&   r'   r   "  s    

 z)WavLMAttention._relative_positions_bucket)r   r   r   T)NNFr   )rc   rd   re   r   r   r   r;   rM   r   Tensorr   r   ra   FloatTensorr   
LongTensorZ
BoolTensorr   r   r   rf   r&   r&   r\   r'   r     s@       '    +
7
r   c                       s$   e Zd Z fddZdd Z  ZS )WavLMFeedForwardc                    sp   t    t|j| _t|j|j| _	t
|jtrDt|j | _n|j| _t|j|j| _t|j| _d S r^   )rL   rM   r   r   Zactivation_dropoutintermediate_dropoutr   rv   Zintermediate_sizeintermediate_dense
isinstanceZ
hidden_actstrr   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutr   r\   r&   r'   rM   9  s    
zWavLMFeedForward.__init__c                 C   s6   |  |}| |}| |}| |}| |}|S r^   )r   r   r   r   r   r_   r&   r&   r'   ra   F  s    




zWavLMFeedForward.forwardrb   r&   r&   r\   r'   r   8  s   r   c                       s0   e Zd Zd
eed fddZddd	Z  ZS )WavLMEncoderLayerTrZ   r   c                    sn   t    t|j|j|j|j|j|d| _t	
|j| _t	j|j|jd| _t|| _t	j|j|jd| _d S Nr   r   rL   rM   r   rv   Znum_attention_headsZattention_dropoutr   Zmax_bucket_distance	attentionr   r   r   r   rh   r   ri   r   feed_forwardfinal_layer_normrY   rZ   r   r\   r&   r'   rM   Q  s    

zWavLMEncoderLayer.__init__NFr   c           	      C   sl   |}| j |||||d\}}}| |}|| }| |}|| | }| |}||f}|rh||f7 }|S )Nr   r   r   r   )r   r   ri   r   r   )	rY   r`   r   r   r   r   attn_residualr   outputsr&   r&   r'   ra   `  s"    



zWavLMEncoderLayer.forward)T)NNFr   rc   rd   re   r   r;   rM   ra   rf   r&   r&   r\   r'   r   P  s   r   c                       s0   e Zd Zd	eed fddZd
ddZ  ZS ) WavLMEncoderLayerStableLayerNormTr   c                    sn   t    t|j|j|j|j|j|d| _t	
|j| _t	j|j|jd| _t|| _t	j|j|jd| _d S r   r   r   r\   r&   r'   rM   z  s    

z)WavLMEncoderLayerStableLayerNorm.__init__NFc                 C   sf   |}|  |}| j||||d\}}}| |}|| }|| | | }||f}|rb||f7 }|S )N)r   r   r   )ri   r   r   r   r   )rY   r`   r   r   r   r   r   r   r&   r&   r'   ra     s    


z(WavLMEncoderLayerStableLayerNorm.forward)T)NNFr   r&   r&   r\   r'   r   y  s   r   c                       s&   e Zd Z fddZdddZ  ZS )	WavLMEncoderc                    sf   t     | _t | _tj j jd| _	t
 j| _t fddt jD | _d| _d S )Nr   c                    s   g | ]}t  |d kdqS r   )r   )r   r   r   r&   r'   r-     s     z)WavLMEncoder.__init__.<locals>.<listcomp>FrL   rM   rZ   ro   pos_conv_embedr   rh   rv   r   ri   r   r   r   r   r9   num_hidden_layerslayersr   r   r\   r   r'   rM     s    

zWavLMEncoder.__init__NFTc                    sT  |rdnd } rdnd }|d k	r*d|| < |  |}|| }| |}| |}t }	d }
t| jD ]\}}|rz||f }tg }| jo|dko|| j	j
k }|r|	r| jr| jr؇ fdd}tjj|||||
}n||||
 |d}|d d \}}
|rd} rd||d f }qd|r(||f }|sFtd	d
 |||fD S t|||dS )Nr&   r   r   c                    s    fdd}|S )Nc                     s    | f S r^   r&   r   r   r   r&   r'   r     s    zKWavLMEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr&   r   r   r   r'   r     s    z3WavLMEncoder.forward.<locals>.create_custom_forwardr   r   NNc                 s   s   | ]}|d k	r|V  qd S r^   r&   r+   vr&   r&   r'   	<genexpr>  s      z'WavLMEncoder.forward.<locals>.<genexpr>last_hidden_stater`   
attentions)r   ri   r   r	   	enumerater   r   r4   r   rZ   	layerdropr   rx   r   tupler
   rY   r`   r   r   output_hidden_statesreturn_dictZall_hidden_statesZall_self_attentionsZposition_embeddingsZdeepspeed_zero3_is_enabledr   r   r   Zdropout_probabilityZskip_the_layerr   Zlayer_outputsr&   r  r'   ra     sZ    






zWavLMEncoder.forward)NFFTrb   r&   r&   r\   r'   r     s       r   c                       s&   e Zd Z fddZdddZ  ZS )	WavLMEncoderStableLayerNormc                    sf   t     | _t | _tj j jd| _	t
 j| _t fddt jD | _d| _d S )Nr   c                    s   g | ]}t  |d kdqS r   )r   r   r   r&   r'   r-     s   z8WavLMEncoderStableLayerNorm.__init__.<locals>.<listcomp>Fr   r   r\   r   r'   rM     s    


z$WavLMEncoderStableLayerNorm.__init__NFTc                    sP  |rdnd } rdnd }|d k	r*d|| < |  |}|| }| |}t }	d }
t| jD ]\}}|rp||f }tg }| jo|dko|| jj	k }|r|	r| j
r| jr· fdd}tjj|||||
}n||| |
d}|d d \}}
|rd} rZ||d f }qZ| |}|r$||f }|sBtdd	 |||fD S t|||d
S )Nr&   r   c                    s    fdd}|S )Nc                     s    | f S r^   r&   r   r  r&   r'   r   )  s    zZWavLMEncoderStableLayerNorm.forward.<locals>.create_custom_forward.<locals>.custom_forwardr&   r   r  r   r'   r   (  s    zBWavLMEncoderStableLayerNorm.forward.<locals>.create_custom_forward)r   r   r   r   r  c                 s   s   | ]}|d k	r|V  qd S r^   r&   r  r&   r&   r'   r  I  s      z6WavLMEncoderStableLayerNorm.forward.<locals>.<genexpr>r  )r   r   r	   r
  r   r   r4   r   rZ   r  r   rx   r   ri   r  r
   r  r&   r  r'   ra     sX    






  z#WavLMEncoderStableLayerNorm.forward)NFFTrb   r&   r&   r\   r'   r    s       r  c                       s4   e Zd ZdZ fddZedd Zdd Z  ZS )WavLMGumbelVectorQuantizerz
    Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
    GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
    c                    s   t    |j| _|j| _|j| j dkrDtd|j d| j dt	t
d| j| j |j| j | _t|jd | j| j | _d| _d S )Nr   z`config.codevector_dim z5 must be divisible by `config.num_codevector_groups` z for concatenation.r   r)   r   )rL   rM   Znum_codevector_groupsrm   Znum_codevectors_per_groupnum_varsZcodevector_dimr1   r   r   r   r   codevectorsr   rN   weight_projtemperaturer   r\   r&   r'   rM   U  s    
z#WavLMGumbelVectorQuantizer.__init__c                 C   s8   | j dd}ttj|t|d  dd  }|S )Nr   r   gHz>r)   )meanr   expr6   r   )ZprobsZmarginal_probs
perplexityr&   r&   r'   _compute_perplexityj  s    (z.WavLMGumbelVectorQuantizer._compute_perplexityc                 C   s  |j \}}}| |}||| | j d}| jrtjj| | j	dd}|
|}tj||| | jd dd}| |}nH|jdd}|j|j  d|ddd}||| | jd}| |}||| d}|d| j }	|	|| | j| jd}
|
d||d}
|
|fS )Nr)   T)tauhardr   r   r   rj   )r   r  r   rm   r   r   
functionalZgumbel_softmaxr   r  Ztype_asr   softmaxr  argmaxZ	new_zerosZscatter_r   r  r  r6   )rY   r`   rC   r%   rv   Zcodevector_probsZcodevector_soft_distr  Zcodevector_idxZcodevectors_per_groupr  r&   r&   r'   ra   p  s0    

  
 
z"WavLMGumbelVectorQuantizer.forward)	rc   rd   re   r   rM   staticmethodr  ra   rf   r&   r&   r\   r'   r  O  s
   
r  c                       s$   e Zd Z fddZdd Z  ZS )WavLMAdapterc                    sp   t     j jkr8t j j| _t j| _nd  | _| _t	 fddt
 jD | _ j| _d S )Nc                 3   s   | ]}t  V  qd S r^   )WavLMAdapterLayerr*   r   r&   r'   r    s     z(WavLMAdapter.__init__.<locals>.<genexpr>)rL   rM   output_hidden_sizerv   r   r   projrh   proj_layer_normr   r9   num_adapter_layersr   r  r   r\   r   r'   rM     s    
 zWavLMAdapter.__init__c                 C   sr   | j d k	r(| jd k	r(|  |}| |}|dd}| jD ]&}tj }| jrX|| jkr:||}q:|dd}|S r}   )r#  r$  rk   r   r2   r3   r   r  )rY   r`   r   Zlayerdrop_probr&   r&   r'   ra     s    




zWavLMAdapter.forwardrb   r&   r&   r\   r'   r     s   r   c                       s$   e Zd Z fddZdd Z  ZS )r!  c                    s0   t    tj|jd|j |j|jdd| _d S )Nr   r   )rJ   rp   )rL   rM   r   rQ   r"  Zadapter_kernel_sizeadapter_striderU   r   r\   r&   r'   rM     s    
zWavLMAdapterLayer.__init__c                 C   s   |  |}tjj|dd}|S )Nr   r   )rU   r   r  Zglur_   r&   r&   r'   ra     s    
zWavLMAdapterLayer.forwardrb   r&   r&   r\   r'   r!    s   
r!  c                   @   sf   e Zd ZdZeZdZdZdZdd Z	de
ejef ee dd	d
ZdeejdddZdddZdS )WavLMPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    wavlmr   Tc              	   C   s  t |tr>|jjjjddd |jjj  tj	
|j njt |trtj	j|jjddtd|jjd |jj   d tj	|jjd nt |trtd|jj }tj	j
|jj| |d tj	j
|jj| |d nt |tjr|jjjd| jjd |jdk	r|jj  nt |tjtjfrN|jj  |jjd nZt |tjrtj	|j |jdk	rt|j|j|jd   }tj	j
|j| |d dS )	zInitialize the weightsr   r   )r  stdr   r   )abNr   )r   r  r  rs   dataZnormal_rK   Zzero_r   inituniform_r  ro   rU   r   sqrtrI   Zin_channelsZ	constant_r   r   Zin_featuresr   rZ   Zinitializer_rangerh   rn   Zfill_rQ   Zkaiming_normal_rq   )rY   r   kr&   r&   r'   _init_weights  s6    

 
z"WavLMPreTrainedModel._init_weightsN)rD   add_adapterc                 C   sn   |dkr| j jn|}dd }t| j j| j jD ]\}}||||}q.|rjt| j jD ]}||d| j j}qT|S )zH
        Computes the output length of the convolutional layers
        Nc                 S   s   t j| | |ddd S )Nfloor)Zrounding_moder   )r   divr!   rI   rJ   r&   r&   r'   _conv_out_length  s    zOWavLMPreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_lengthr   )rZ   r2  ziprR   rS   r9   r%  r&  )rY   rD   r2  r6  rI   rJ   r,   r&   r&   r'    _get_feat_extract_output_lengths  s    z5WavLMPreTrainedModel._get_feat_extract_output_lengths)feature_vector_lengthr   c                 C   s   |j ddd d df }| j||d}|tj}|jd }tj||f|j|jd}d|tj	|jd |jd|d f< |
dg d
dg }|S )Nr)   r   r2  r   )r/   r   r   )r   )Zcumsumr8  r   r   r   r   r:   r/   r   r=   flipr;   )rY   r9  r   r2  Znon_padded_lengthsZoutput_lengthsrC   r&   r&   r'   "_get_feature_vector_attention_mask  s    
  "z7WavLMPreTrainedModel._get_feature_vector_attention_maskFc                 C   s   t |tttfr||_d S r^   )r   r   r  r   r   )rY   r   r   r&   r&   r'   _set_gradient_checkpointing  s    z0WavLMPreTrainedModel._set_gradient_checkpointing)N)N)F)rc   rd   re   r   r   config_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingr1  r   r   r   r   r   r;   r8  r<  r=  r&   r&   r&   r'   r'    s"   "    r'  a  
    WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled
    Data](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo
    Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian,
    Jian Wu, Michael Zeng, Xiangzhan Yu, Furu Wei.

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving etc.).

    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`WavLMConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aI  
    Args:
        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
            into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
            soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
            conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
        attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
            1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)

            <Tip warning={true}>

            `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
            True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
            **not** be passed to avoid degraded performance when doing batched inference. For such models
            `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
            models also yield slightly different results depending on whether `input_values` is padded or not.

            </Tip>

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z_The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zed fddZdd Zdd Zdeje	ej e	ej
 d	d
dZeeeeeededde	ej e	ej e	ej e	e e	e e	e eeef dddZ  ZS )
WavLMModelr   c                    s   t  | || _t|| _t|| _|jdks:|jdkrRt	
t|j | _|jrdt|| _n
t|| _|jr|t|nd | _|   d S )Nr   )rL   rM   rZ   r   feature_extractorr   feature_projectionmask_time_probmask_feature_probr   r   r   r   rv   r.  masked_spec_embedZdo_stable_layer_normr  encoderr   r2  r   adapter	post_initr   r\   r&   r'   rM   c  s    


zWavLMModel.__init__c                 C   s   t dt |   dS z
        Calling this function will disable the gradient computation for the feature encoder so that its parameters will
        not be updated during training.
        The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.Please use the equivalent `freeze_feature_encoder` method instead.Nr   r   r   freeze_feature_encoderrY   r&   r&   r'   freeze_feature_extractorw  s
    z#WavLMModel.freeze_feature_extractorc                 C   s   | j   dS 
        Calling this function will disable the gradient computation for the feature encoder so that its parameter will
        not be updated during training.
        N)r@  r   rL  r&   r&   r'   rK    s    z!WavLMModel.freeze_feature_encoderN)r`   mask_time_indicesr   c                 C   s  t | jdds|S | \}}}|dk	r<| j|j||< nZ| jjdkr| jrt||f| jj| jj	|| jj
d}tj||jtjd}| j|j||< | jjdkr| jrt||f| jj| jj| jjd}tj||jtjd}|dddf d|d}d||< |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://arxiv.org/abs/1904.08779).
        Zapply_spec_augmentTNr   )r   r   r   r   )r   r/   )r   r   r   r)   )getattrrZ   r   rD  r   r/   rB  r   rF   Zmask_time_lengthZmask_time_min_masksr   Ztensorr   r;   rC  Zmask_feature_lengthZmask_feature_min_masksexpand)rY   r`   rP  r   rC   r%   rv   Zmask_feature_indicesr&   r&   r'   _mask_hidden_states  s4    zWavLMModel._mask_hidden_statesaudior   output_typer>  modalityexpected_output)r   r   rP  r   r  r  r   c           
      C   s   |d k	r|n| j j}|d k	r |n| j j}|d k	r4|n| j j}| |}|dd}|d k	rp| j|jd |dd}| |\}}| j	|||d}| j
|||||d}	|	d }| jd k	r| |}|s||f|	dd   S t|||	j|	jdS )	Nr   r   Fr:  )rP  r   r   r   r  r  r   )r  extract_featuresr`   r	  )rZ   r   r  use_return_dictr@  rk   r<  r   rA  rS  rE  rF  r   r`   r	  )
rY   r   r   rP  r   r  r  rZ  r`   Zencoder_outputsr&   r&   r'   ra     sH    
    

zWavLMModel.forward)NN)NNNNN)rc   rd   re   r   rM   rM  rK  r   r   r   r   rS  r   WAVLM_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r;   r   r   ra   rf   r&   r&   r\   r'   r?  ]  s@   
  .
     
r?  zcWavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).c                       s   e Zd Zdee d fddZdd Zdd Zd	d
 Zdd Z	e
eeeeeeeddeej eej ee ee ee eej eeef dddZ  ZS )WavLMForCTCN)target_langc                    s~   t  | t|| _t|j| _|| _|j	d krFt
d| j dt|dr\|jr\|jn|j}t||j	| _|   d S )NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.r2  )rL   rM   r?  r(  r   r   Zfinal_dropoutr   ra  
vocab_sizer1   r]   ry   r2  r"  rv   r   lm_headrG  )rY   rZ   ra  r"  r\   r&   r'   rM     s    

zWavLMForCTC.__init__c                 C   sr   | j }|dk	r2t| jdddkr2td| dn<|dkrXt| jdddk	rXtd n|dk	rn| j|dd dS )a'  
        This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
        passing `target_lang=...` to `from_pretrained(...)`.

        This method is **not** supposed to be called by the user and is prone to be changed in the future.
        NZadapter_attn_dimzCannot pass `target_lang`: z- if `config.adapter_attn_dim` is not defined.z)By default `target_lang` is set to 'eng'.T)Z
force_load)ra  rQ  rZ   r1   loggerinfoZload_adapter)rY   ra  r&   r&   r'   tie_weights  s    zWavLMForCTC.tie_weightsc                 C   s   t dt |   dS rO  rI  NrJ  rL  r&   r&   r'   rM  '  s
    z$WavLMForCTC.freeze_feature_extractorc                 C   s   | j j  dS rN  r(  r@  r   rL  r&   r&   r'   rK  3  s    z"WavLMForCTC.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS z
        Calling this function will disable the gradient computation for the base model so that its parameters will not
        be updated during training. Only the classification head will be updated.
        FNr(  r   r   r   r&   r&   r'   freeze_base_model:  s    zWavLMForCTC.freeze_base_model)r   rV  r>  rX  Zexpected_lossr   r   r   r  r  labelsr   c              
   C   sf  |dk	r|n| j j}| j|||||d}|d }| |}| |}	d}
|dk	r"| | j jkrttd| j j |dk	r|ntj	|tj
d}| |dtj
}|dk}|d}||}tjj|	dtjddd}tjjjd	d
, tjj||||| j j| j j| j jd}
W 5 Q R X |sR|	f|td  }|
dk	rN|
f| S |S t|
|	|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
            Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
            the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
            All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
            config.vocab_size - 1]`.
        NrY  r   z$Label values must be <= vocab_size: r.   r)   )ru   r/   r   F)enabled)blankZ	reductionZzero_infinitylosslogitsr`   r	  )rZ   r[  r(  r   rc  r    rb  r1   r   Z	ones_liker   r8  r6   r   Zmasked_selectr   r  Zlog_softmaxZfloat32rk   backendsZcudnnflagsZctc_lossZpad_token_idZctc_loss_reductionZctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r`   r	  )rY   r   r   r   r  r  rm  r   r`   rr  rq  rD   Zlabels_maskZtarget_lengthsZflattened_targetsZ	log_probsoutputr&   r&   r'   ra   B  sR    





   zWavLMForCTC.forward)N)NNNNN)rc   rd   re   r   r   rM   rf  rM  rK  rk  r   r\  r   r]  r   r^  _CTC_EXPECTED_OUTPUT_CTC_EXPECTED_LOSSr   r   r;   r   r   ra   rf   r&   r&   r\   r'   r`    s6   
     
r`  z
    WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                       s   e Zd Z fddZdd Zdd Zdd Zeee	e
eed	d
deej eej ee ee ee eej eeef dddZ  ZS )WavLMForSequenceClassificationc                    s   t  | t|dr$|jr$tdt|| _|jd }|jrTt	
t|| | _t	|j|j| _t	|j|j| _|   d S )Nr2  z\Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)r   )rL   rM   ry   r2  r1   r?  r(  r   use_weighted_layer_sumr   r   r   r?   layer_weightsr   rv   Zclassifier_proj_size	projector
num_labels
classifierrG  rY   rZ   
num_layersr\   r&   r'   rM     s    

z'WavLMForSequenceClassification.__init__c                 C   s   t dt |   dS rH  rJ  rL  r&   r&   r'   rM    s
    z7WavLMForSequenceClassification.freeze_feature_extractorc                 C   s   | j j  dS rN  rh  rL  r&   r&   r'   rK    s    z5WavLMForSequenceClassification.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS ri  rj  r   r&   r&   r'   rk    s    z0WavLMForSequenceClassification.freeze_base_modelrT  )r   rV  r>  rW  Nrl  c                 C   sf  |dk	r|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}|dkr|jdd}
n<| |jd |}d|| < |jdd|jdddd }
| |
}d}|dk	r"t }||d| j j|d}|sR|f|td  }|dk	rN|f| S |S t|||j|jd	S )
  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NTrY  r   r   r)   r   r   rp  )rZ   r[  rz  r(  ru  r   stackr   r  r  r{  r   r6   r|  r  r<  r   r~  r   r}  r   r`   r	  )rY   r   r   r   r  r  rm  r   r`   norm_weightsZpooled_outputZpadding_maskrr  rq  loss_fctrv  r&   r&   r'   ra     sF    

 

z&WavLMForSequenceClassification.forward)NNNNN)rc   rd   re   rM   rM  rK  rk  r   r\  r   r]  r   r^  r   r   r   r;   r   r   ra   rf   r&   r&   r\   r'   ry    s2   
     
ry  za
    WavLM Model with a frame classification head on top for tasks like Speaker Diarization.
    c                       s   e Zd Z fddZdd Zdd Zdd Zeee	e
eed	ed
deej eej eej ee ee ee eeef dddZ  ZS ) WavLMForAudioFrameClassificationc                    sz   t  | t|dr$|jr$tdt|| _|jd }|jrTt	
t|| | _t	|j|j| _|j| _|   d S )Nr2  z_Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)r   )rL   rM   ry   r2  r1   r?  r(  r   rz  r   r   r   r?   r{  r   rv   r}  r~  init_weightsr  r\   r&   r'   rM     s    

z)WavLMForAudioFrameClassification.__init__c                 C   s   t dt |   dS rg  rJ  rL  r&   r&   r'   rM  '  s
    z9WavLMForAudioFrameClassification.freeze_feature_extractorc                 C   s   | j j  dS rN  rh  rL  r&   r&   r'   rK  3  s    z7WavLMForAudioFrameClassification.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS ri  rj  r   r&   r&   r'   rk  :  s    z2WavLMForAudioFrameClassification.freeze_base_modelrT  rU  N)r   r   rm  r   r  r  r   c                 C   s   |dk	r|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}
d}|dk	rt }||
d| jtj|d| jdd}|s|
f|td  }|S t||
|j|jd	S )
r  NTrY  r   r   r)   r   )Zaxisrp  )rZ   r[  rz  r(  ru  r   r  r   r  r  r{  r   r6   r~  r   r}  r  r   r`   r	  )rY   r   r   rm  r   r  r  r   r`   r  rr  rq  r  rv  r&   r&   r'   ra   B  s:    
(z(WavLMForAudioFrameClassification.forward)NNNNN)rc   rd   re   rM   rM  rK  rk  r   r\  r   _FRAME_CLASS_CHECKPOINTr   r^  _FRAME_EXPECTED_OUTPUTr   r   r   r;   r   r   ra   rf   r&   r&   r\   r'   r    s4   
     
r  c                       s&   e Zd Zd fdd	Zdd Z  ZS )AMSoftmaxLoss      >@皙?c                    sF   t t|   || _|| _|| _tjt	||dd| _
t | _d S )NT)r   )rL   r  rM   scalemarginr}  r   r   r   Zrandnrs   r   rq  )rY   Z	input_dimr}  r  r  r\   r&   r'   rM     s    zAMSoftmaxLoss.__init__c           	      C   sx   |  }tjj| jdd}tjj|dd}t||}|| j }tj|| j	}| j
t| || }| ||}|S )Nr   r   r   )flattenr   r  	normalizers   r   mmr  Zone_hotr}  r  r   r;   rq  )	rY   r`   rm  rs   Z	cos_thetapsiZonehotrr  rq  r&   r&   r'   ra     s    
zAMSoftmaxLoss.forward)r  r  rb   r&   r&   r\   r'   r    s   r  c                       s&   e Zd Zd fdd	Zdd Z  ZS )	TDNNLayerr   c                    sv   t    |dkr |j|d  n|j| | _|j| | _|j| | _|j| | _t	
| j| j | j| _t	 | _d S )Nr   r   )rL   rM   tdnn_dimrO   rP   tdnn_kernelrI   Ztdnn_dilationdilationr   r   kernelZReLUrW   rX   r\   r&   r'   rM     s    
"zTDNNLayer.__init__c                 C   sV   | d}tjj|| j| jfd| jf| jdfd}|dd}| |}| 	|}|S )Nr   )rJ   r  r   )
r   r   r  ZunfoldrI   rO   r  rk   r  rW   r_   r&   r&   r'   ra     s    



zTDNNLayer.forward)r   rb   r&   r&   r\   r'   r    s   
r  zi
    WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.
    c                       s   e Zd Z fddZdd Zdd Zdd Zeej	e
f d	d
dZeeeeeededdeej eej ee ee ee eej eeef dddZ  ZS )WavLMForXVectorc                    s   t    t | _ jd } jr<tt	|| | _
t j jd | _ fddtt jD }t|| _t jd d  j| _t j j| _t j j| _|   d S )Nr   r   c                    s   g | ]}t  |qS r&   )r  r   r   r&   r'   r-     s     z,WavLMForXVector.__init__.<locals>.<listcomp>r)   r   )rL   rM   r?  r(  r   rz  r   r   r   r?   r{  r   rv   r  r|  r9   r>   r   tdnnZxvector_output_dimr@  r~  r  r}  	objectiver  )rY   rZ   r  Ztdnn_layersr\   r   r'   rM     s    

zWavLMForXVector.__init__c                 C   s   t dt |   dS rg  rJ  rL  r&   r&   r'   rM    s
    z(WavLMForXVector.freeze_feature_extractorc                 C   s   | j j  dS rN  rh  rL  r&   r&   r'   rK    s    z&WavLMForXVector.freeze_feature_encoderc                 C   s   | j  D ]
}d|_q
dS ri  rj  r   r&   r&   r'   rk    s    z!WavLMForXVector.freeze_base_model)rD   c                 C   s&   dd }| j jD ]}|||d}q|S )z?
        Computes the output length of the TDNN layers
        c                 S   s   | | | d S )Nr   r&   r5  r&   r&   r'   r6    s    zBWavLMForXVector._get_tdnn_output_lengths.<locals>._conv_out_lengthr   )rZ   r  )rY   rD   r6  rI   r&   r&   r'   _get_tdnn_output_lengths  s    z(WavLMForXVector._get_tdnn_output_lengthsrT  rU  Nrl  c                 C   s  |dk	r|n| j j}| j jr dn|}| j|||||d}| j jr|t }tj|dd}tjj	| j
dd}	||	ddd jdd}n|d }| |}| jD ]}
|
|}q|dkr|jdd}|jdd}n| |jdd}| |}g }g }t|D ]D\}}|||d|f jdd |||d|f jdd qt|}t|}tj||gdd}| |}| |}d}|dk	r| ||}|s||f|td  }|dk	r|f| S |S t||||j|jdS )	r  NTrY  r   r   r)   r   )rq  rr  Z
embeddingsr`   r	  )rZ   r[  rz  r(  ru  r   r  r   r  r  r{  r   r6   r|  r  r  r)  r8  r  r
  r@   r   r@  r~  r  r   r`   r	  )rY   r   r   r   r  r  rm  r   r`   r  Z
tdnn_layerZmean_featuresZstd_featuresZfeat_extract_output_lengthsZtdnn_output_lengthsr   lengthZstatistic_poolingZoutput_embeddingsrr  rq  rv  r&   r&   r'   ra     s\    



 




zWavLMForXVector.forward)NNNNN)rc   rd   re   rM   rM  rK  rk  r   r   r   r   r  r   r\  r   _XVECTOR_CHECKPOINTr   r^  _XVECTOR_EXPECTED_OUTPUTr   r   r;   r   ra   rf   r&   r&   r\   r'   r    s6   
     
r  )Nr   )Tr   r   r   typingr   r   r   numpyr2   r   Ztorch.nn.functionalr   r  r   Ztorch.utils.checkpointZtorch.nnr   Zactivationsr   Zintegrations.deepspeedr	   Zmodeling_outputsr
   r   r   r   r   r   Zmodeling_utilsr   rx   r   r   r   r   Zconfiguration_wavlmr   Z
get_loggerrc   rd  ru  r^  r]  r_  rw  rx  r  r  r  r  Z#WAVLM_PRETRAINED_MODEL_ARCHIVE_LISTr   r   r   ZndarrayrF   ModulerG   rg   rl   ro   r|   r   r   r   r   r   r   r   r   r  r  r   r!  r'  ZWAVLM_START_DOCSTRINGr\  r?  r`  ry  r  r  r  r  r&   r&   r&   r'   <module>   s    

  
x(3 ()%XYG \%  vk