U
    9%e                    @   s4  d Z ddlZddlZddlmZmZmZ ddlZddlmZ ddl	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZ ddlmZmZmZ ddlmZmZmZmZm Z  ddl!m"Z" e#e$Z%dZ&zddl'm(Z( dZ&e%)d W n4 e*k
r
   Y n  e+k
r(   e%,d Y nX dZ-dZ.dgZ/dZ0G dd dej1Z2e&sXe(Z2e3e2 G dd dej1Z4G dd dej1Z5G dd dej1Z6G d d! d!ej1Z7G d"d# d#ej1Z8G d$d% d%ej1Z9G d&d' d'ej1Z:G d(d) d)eZ;G d*d+ d+e;Z<G d,d- d-ej1Z=d.Z>ed/e>G d0d1 d1e;Z?dS )2z PyTorch Pop2Piano model.    N)OptionalTupleUnion)nn)CrossEntropyLoss)
checkpoint)GenerationConfig   )ACT2FN)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsSeq2SeqLMOutput)PreTrainedModel)ALL_LAYERNORM_LAYERS find_pruneable_heads_and_indicesprune_linear_layer)add_start_docstrings%add_start_docstrings_to_model_forwardis_torch_fx_proxyloggingreplace_return_docstrings   )Pop2PianoConfigT)FusedRMSNormFzVDiscovered apex.normalization.FusedRMSNorm - will use it instead of Pop2PianoLayerNormzIDiscovered apex but it failed to load, falling back to Pop2PianoLayerNormr   zsweetcocoa/pop2pianoa  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Pop2Piano is a model with relative position embeddings
            so you should be able to pad the inputs on both the right and the left. Indices can be obtained using
            [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail.
            [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining
            take a look a [Pop2Pianp Training](./Pop2Piano#training).
        attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
            [What are attention masks?](../glossary#attention-mask)
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using
            [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
            [What are decoder input IDs?](../glossary#decoder-input-ids) Pop2Piano uses the `pad_token_id` as the
            starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last
            `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare
        decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
            1]`:
            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
            1]`:
            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
                `[0, 1]`:
                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.
        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
            the output of the last layer of the encoder. Used in the cross-attention of the decoder.
        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Does the same task as `inputs_embeds`. If `inputs_embeds` is not present but `input_features` is present
            then `input_features` will be considered as `inputs_embeds`.
        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
            input (see `past_key_values`). This is useful if you want more control over how to convert
            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If
            `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of
            `inputs_embeds`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                       s&   e Zd Zd fdd	Zdd Z  ZS )Pop2PianoLayerNormư>c                    s&   t    tt|| _|| _dS )zj
        Construct a layernorm module in the Pop2Piano style. No bias and no subtraction of mean.
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfZhidden_sizeeps	__class__ o/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/pop2piano/modeling_pop2piano.pyr      s    
zPop2PianoLayerNorm.__init__c                 C   s\   | tjdjddd}|t|| j  }| jjtj	tj
fkrR| | jj}| j| S )N   T)Zkeepdim)tor   Zfloat32powmeanZrsqrtr"   r!   dtypefloat16Zbfloat16)r#   hidden_statesZvariancer'   r'   r(   forward   s
    zPop2PianoLayerNorm.forward)r   __name__
__module____qualname__r   r1   __classcell__r'   r'   r%   r(   r      s   r   c                       s*   e Zd Zed fddZdd Z  ZS )Pop2PianoDenseActDenseconfigc                    sT   t    tj|j|jdd| _tj|j|jdd| _t|j	| _
t|j | _d S NFbias)r   r   r   Lineard_modeld_ffwiwoDropoutdropout_ratedropoutr
   dense_act_fnactr#   r9   r%   r'   r(   r      s
    
zPop2PianoDenseActDense.__init__c                 C   sl   |  |}| |}| |}t| jjtjr^|j| jjjkr^| jjjtj	kr^|
| jjj}| |}|S N)r@   rF   rD   
isinstancerA   r!   r   Tensorr.   int8r+   )r#   r0   r'   r'   r(   r1      s    



zPop2PianoDenseActDense.forwardr3   r4   r5   r   r   r1   r6   r'   r'   r%   r(   r7      s   r7   c                       s*   e Zd Zed fddZdd Z  ZS )Pop2PianoDenseGatedActDenser8   c                    sj   t    tj|j|jdd| _tj|j|jdd| _tj|j|jdd| _t	|j
| _t|j | _d S r:   )r   r   r   r=   r>   r?   wi_0wi_1rA   rB   rC   rD   r
   rE   rF   rG   r%   r'   r(   r      s    
z$Pop2PianoDenseGatedActDense.__init__c                 C   sz   |  | |}| |}|| }| |}t| jjtjrl|j	| jjj	krl| jjj	tj
krl|| jjj	}| |}|S rH   )rF   rN   rO   rD   rI   rA   r!   r   rJ   r.   rK   r+   )r#   r0   Zhidden_geluZhidden_linearr'   r'   r(   r1      s    


z#Pop2PianoDenseGatedActDense.forwardrL   r'   r'   r%   r(   rM      s   rM   c                       s*   e Zd Zed fddZdd Z  ZS )Pop2PianoLayerFFr8   c                    sJ   t    |jrt|| _n
t|| _t|j|jd| _	t
|j| _d S )Nr$   )r   r   Zis_gated_actrM   DenseReluDenser7   r   r>   layer_norm_epsilon
layer_normr   rB   rC   rD   rG   r%   r'   r(   r      s    

zPop2PianoLayerFF.__init__c                 C   s&   |  |}| |}|| | }|S rH   )rT   rR   rD   )r#   r0   Zforwarded_statesr'   r'   r(   r1      s    

zPop2PianoLayerFF.forwardrL   r'   r'   r%   r(   rP      s   
rP   c                       sN   e Zd Zded fddZdd Zedd
dZdddZdddZ	  Z
S )Pop2PianoAttentionFr8   c                    s   t    |j| _|| _|j| _|j| _|j| _|j| _|j	| _
|j| _| j
| j | _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _| jrt| j| j
| _t | _d| _d S r:   )r   r   
is_decoderhas_relative_attention_biasrelative_attention_num_bucketsrelative_attention_max_distancer>   d_kvkey_value_proj_dim	num_headsn_headsrC   rD   	inner_dimr   r=   qkvo	Embeddingrelative_attention_biassetpruned_headsgradient_checkpointingr#   r9   rW   r%   r'   r(   r      s$    
zPop2PianoAttention.__init__c                 C   s   t |dkrd S t|| j| j| j\}}t| j|| _t| j|| _t| j|| _t| j	|dd| _	| jt | | _| j| j | _
| j|| _d S )Nr   r   dim)lenr   r]   r[   rf   r   r_   r`   ra   rb   r^   union)r#   Zheadsindexr'   r'   r(   prune_heads  s       zPop2PianoAttention.prune_headsT       c                 C   s   d}|r4|d }|| dk tj| 7 }t| } nt| t|  } |d }| |k }|t|  | t||  ||   tj }t|t	||d }|t
|| |7 }|S )a  
        Adapted from Mesh Tensorflow:
        https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593

        Translate relative position to a bucket number for relative attention. The relative position is defined as
        memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
        position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
        small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
        positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
        This should allow for more graceful generalization to longer sequences than the model has been trained on

        Args:
            relative_position: an int32 Tensor
            bidirectional: a boolean - whether the attention is bidirectional
            num_buckets: an integer
            max_distance: an integer

        Returns:
            a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
        r   r)   r   )r+   r   longabsminZ
zeros_likelogfloatmathZ	full_likewhere)relative_positionbidirectionalnum_bucketsmax_distanceZrelative_bucketsZ	max_exactZis_smallZrelative_position_if_larger'   r'   r(   _relative_position_bucket%  s.     z,Pop2PianoAttention._relative_position_bucketNc           	      C   s   |dkr| j jj}tj|tj|ddddf }tj|tj|ddddf }|| }| j|| j | j| j	d}|  |}|
dddgd}|S )z%Compute binned relative position biasN)r.   device)ry   rz   r{   r)   r   r   )rd   r!   r}   r   Zarangerq   r|   rV   rX   rY   Zpermute	unsqueeze)	r#   query_length
key_lengthr}   Zcontext_positionZmemory_positionrx   Zrelative_position_bucketvaluesr'   r'   r(   compute_biasU  s    

zPop2PianoAttention.compute_biasc
                    s~  |j dd \ }
|
}|dk	r\t|dkr>tdt| d||dkrV|d j d n|7 }|dkrh|n|j d } fdd fd	d
}fdd}|}||j||dk	r|d nd}||j||dk	r|d nd}t||dd}|dkrj	sDtj
dj||f|j|jd}jrVjrVd|_nj|||jd}|dk	r|dddd|d dddf }|dk	r|| }jrt|j d }d|tj< |dd| f }n|}||7 }tjj| dd|}tjj|jjd}|dk	r$|| }|t||}|}jrT|rT||fnd}|f|f |f }|	rz||f }|S )z
        Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
        Nr)   z?past_key_value should have 2 past states: keys and values. Got z past statesr   r   c                    s   |   djjddS )Z
projectionr*   r   r)   )viewr]   r[   	transposeZstates
batch_sizer#   r'   r(   shape  s    z)Pop2PianoAttention.forward.<locals>.shapec                    s   |  dd  djS )Zreshaper   r)   r*   )r   
contiguousr   r^   r   r   r'   r(   unshape  s    z+Pop2PianoAttention.forward.<locals>.unshapec                    sx   |dkr || } n|dkr* ||} |dk	rt|dkrNt j|| gdd} n&|jd |jd krp ||} n|} | S )z4projects hidden states correctly to key/query statesNr)   ri   r   )r   catr   )r0   Z
proj_layerkey_value_statespast_key_value)r   r'   r(   project  s    z+Pop2PianoAttention.forward.<locals>.projectr	   r}   r.   Tr}   r*   ri   )ptraining)r   rk   
ValueErrorr_   r`   ra   r   matmulr   rW   Zzerosr]   r}   r.   rg   r   Zrequires_gradr   sizerf   r    listboolr   Z
functionalZsoftmaxru   Ztype_asrD   rb   rV   )r#   r0   maskr   position_biasr   layer_head_maskr   	use_cacheoutput_attentions
seq_lengthZreal_seq_lengthr   r   r   Zquery_statesZ
key_statesZvalue_statesZscoresZposition_bias_maskedZattn_weightsZattn_outputpresent_key_value_stateoutputsr'   )r   r#   r   r(   r1   f  s           

  
(
  


zPop2PianoAttention.forward)F)Tro   rp   )N)NNNNNNFF)r3   r4   r5   r   r   rn   staticmethodr|   r   r1   r6   r'   r'   r%   r(   rU      s   /
        rU   c                       s(   e Zd Zd fdd	ZdddZ  ZS )	Pop2PianoLayerSelfAttentionFc                    s<   t    t||d| _t|j|jd| _t	|j
| _d S )NrW   rQ   )r   r   rU   SelfAttentionr   r>   rS   rT   r   rB   rC   rD   rh   r%   r'   r(   r     s    
z$Pop2PianoLayerSelfAttention.__init__Nc              	   C   sJ   |  |}| j|||||||d}	|| |	d  }|f|	dd   }
|
S )N)r   r   r   r   r   r   r   r   )rT   r   rD   )r#   r0   attention_maskr   r   r   r   r   normed_hidden_statesattention_outputr   r'   r'   r(   r1     s    

	z#Pop2PianoLayerSelfAttention.forward)F)NNNNFFr2   r'   r'   r%   r(   r     s   	      r   c                       s&   e Zd Z fddZdddZ  ZS )Pop2PianoLayerCrossAttentionc                    s<   t    t|dd| _t|j|jd| _t	|j
| _d S )NFr   rQ   )r   r   rU   EncDecAttentionr   r>   rS   rT   r   rB   rC   rD   rG   r%   r'   r(   r     s    
z%Pop2PianoLayerCrossAttention.__init__NFc
                 C   sN   |  |}
| j|
||||||||	d	}|| |d  }|f|dd   }|S )N)r   r   r   r   r   r   r   r   r   r   )rT   r   rD   )r#   r0   r   r   r   r   r   r   r   r   r   r   Zlayer_outputr   r'   r'   r(   r1     s    
z$Pop2PianoLayerCrossAttention.forward)NNNNFNFr2   r'   r'   r%   r(   r   
  s   
       r   c                       s(   e Zd Zd fdd	Zd	ddZ  ZS )
Pop2PianoBlockFc                    sZ   t    |j| _t | _| jt||d | jrF| jt| | jt	| d S )Nr   )
r   r   rV   r   
ModuleListlayerappendr   r   rP   rh   r%   r'   r(   r   0  s    

zPop2PianoBlock.__init__NTc                 C   sb  |	d k	rz| j std |d kr$dnd}t|	|kr`td| d|dkrJdnd dt|	 d	|	d d }|	dd  }nd
\}}| jd ||||||
|d}|d d \}}|dd  }|jtjkr
t	t
| t|jjd t|jj}tj|| |d}| j o|d k	}|r|d k	r:|d jd }nd }| jd ||||||||
|d	}|d }|jtjkrt	t
| t|jjd t|jj}tj|| |d}|d k	r||d  }||dd   }| jd |}|jtjkr:t	t
| t|jjd t|jj}tj|| |d}|f}|
rV||f | }n|| }|S )NzN`past_key_values` is passed to the encoder. Please make sure this is intended.r)      zThere should be z past states. z$2 (past / key) for cross attention.  zGot z past key / value states)NNr   )r   r   r   r   r   r   i  )rs   maxr   )r   r   r   r   r   r   r   r   r*   )rV   loggerwarningrk   r   r   r.   r   r/   rw   isinfanyZfinfor   clampr   )r#   r0   r   r   encoder_hidden_statesencoder_attention_maskencoder_decoder_position_biasr   cross_attn_layer_head_maskr   r   r   return_dictZexpected_num_past_key_valuesZself_attn_past_key_valueZcross_attn_past_key_valueZself_attention_outputsr   Zattention_outputsZclamp_valueZdo_cross_attentionr   Zcross_attention_outputsr   r'   r'   r(   r1   :  s    
&	

zPop2PianoBlock.forward)F)NNNNNNNNFFTr2   r'   r'   r%   r(   r   /  s              r   c                   @   sF   e Zd ZdZeZdZdZdZdgZ	dgZ
dd Zdd	d
Zdd ZdS )Pop2PianoPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    ZtransformerFTr   rA   c                 C   s  | j j}t|tr(|jj|d  nt|trN|jjjj	d|d d nt|t
r|jjjj	d|d d t|dr| j js|jjjj	d|d d nLt|tr>|jjjj	d|| j jd  d t|jdr|jjdk	r|jjj  |jjjj	d|| j jd  d t|jdr|jjdk	r|jjj  nt|tr*|jjjj	d|| j jd  d t|jdr|jjdk	r|jjj  |jjjj	d|| j jd  d t|jdr|jjdk	r|jjj  |jjjj	d|| j jd  d t|jdr|jjdk	r|jjj  nt|tr| j j}| j j}| j j}|jjjj	d||| d  d |jjjj	d||d  d |jjjj	d||d  d |jjjj	d||| d  d |j r|j!jjj	d||d  d dS )zInitialize the weightsg      ?        )r-   Zstdlm_head      r<   N)"r9   Zinitializer_factorrI   r   r!   dataZfill_Pop2PianoConcatEmbeddingToMel	embeddingZnormal_!Pop2PianoForConditionalGenerationsharedhasattrtie_word_embeddingsr   r7   r@   r>   r<   Zzero_rA   r?   rM   rN   rO   rU   rZ   r\   r_   r`   ra   rb   rW   rd   )r#   modulefactorr>   r[   r]   r'   r'   r(   _init_weights  sH    


       z&Pop2PianoPreTrainedModel._init_weightsc                 C   s   t |ttfr||_d S rH   )rI   rU   Pop2PianoStackrg   )r#   r   valuer'   r'   r(   _set_gradient_checkpointing  s    z4Pop2PianoPreTrainedModel._set_gradient_checkpointingc                 C   s   | j j}| j j}|d kr tdt|rbt|jd d d |}tj||dd df gdd}n4|	|j}|dd df 
 |ddd f< ||d< |d krtd||d	k| |S )
Nzoself.model.config.decoder_start_token_id has to be defined. In Pop2Piano it is usually set to the pad_token_id.r*   )r   .ri   r   ).r   z1self.model.config.pad_token_id has to be defined.)r9   decoder_start_token_idpad_token_idr   r   r   fullr   r   Z	new_zeroscloneZmasked_fill_)r#   	input_idsr   r   Zshifted_input_idsr'   r'   r(   _shift_right  s       z%Pop2PianoPreTrainedModel._shift_rightN)F)r3   r4   r5   __doc__r   config_classZbase_model_prefixZis_parallelizableZsupports_gradient_checkpointingZ_no_split_modulesZ_keep_in_fp32_modulesr   r   r   r'   r'   r'   r(   r     s   .
r   c                       s8   e Zd Zd
 fdd	Zdd Zdd Zddd	Z  ZS )r   Nc                    sx   t    || _ j| _t fddt jD | _t	 j
 jd| _t j| _|   d| _d | _d| _d S )Nc                    s    g | ]}t  t|d kdqS )r   r   )r   r   ).0ir8   r'   r(   
<listcomp>  s     z+Pop2PianoStack.__init__.<locals>.<listcomp>rQ   F)r   r   embed_tokensrV   r   r   range
num_layersblockr   r>   rS   final_layer_normrB   rC   rD   	post_initZmodel_parallelZ
device_maprg   )r#   r9   r   r%   r8   r(   r     s    zPop2PianoStack.__init__c                 C   s   | j S rH   r   r#   r'   r'   r(   get_input_embeddings  s    z#Pop2PianoStack.get_input_embeddingsc                 C   s
   || _ d S rH   r   r#   Znew_embeddingsr'   r'   r(   set_input_embeddings  s    z#Pop2PianoStack.set_input_embeddingsc           (         sT  d k	rn| j j d k	r  n| j j |d k	r4|n| j j}|d k	rH|n| j j}|d k	r|d k	r| jrjdnd}td| d| dn`|d k	r| }|d|d }n>|d k	r| d d }n$| jrdnd}td| d| d	|d kr| j	d krtd
| 	|}|\}}|d k	r6|d d j
d | n|}dkr\| js\td|  d|d krxtj|||jd}| jr|d kr|d k	r|j
d }tj|||jtjd}|d krd gt| j }| ||}| jr(|d k	r(| \}}}||f}|d krtj||jd}| |}nd }| jrP| jrPrPtd d| || j j}| || j j}rzdnd }|rdnd } rdnd } r| jrdnd }d }d }| |}tt| j|D ]&\} \}!}"||  }#||  }$|r ||f }| jr>| jr> fdd}%t|%|!|||||||#|$d 
}&n|!|||||||#|$|" d}&dkr|&d d d |&dd   }&|&d d \}}'|&d }| jr|d k	r|& rdnd }r||'f } r||&d f }| jr||&d f }q| |}| |}|r ||f }|sBtdd |||||fD S t|||||dS ) NZdecoder_r   zYou cannot specify both zinput_ids and zinputs_embeds at the same timer*   zYou have to specify either zinput_ids or inputs_embedsz<You have to initialize the model with valid token embeddingsr   r)   Tz)`use_cache` can only be set to `True` if z is used as a decoderr   r   r   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr'   c                    s    fdd}|S )Nc                     s   t  | f S rH   )tuple)inputs)r   r   r   r'   r(   custom_forward  s    zMPop2PianoStack.forward.<locals>.create_custom_forward.<locals>.custom_forwardr'   )r   r   r   r   )r   r(   create_custom_forward  s    z5Pop2PianoStack.forward.<locals>.create_custom_forward)
r   r   r   r   r   r   r   r   r   r   rH   r   r	      c                 s   s   | ]}|d k	r|V  qd S rH   r'   )r   ra   r'   r'   r(   	<genexpr>  s   z)Pop2PianoStack.forward.<locals>.<genexpr>)last_hidden_statepast_key_valuesr0   
attentionscross_attentions) r9   r   r   output_hidden_statesuse_return_dictrV   r   r   r   r   r   r   r    r}   rq   rk   r   Zget_extended_attention_maskZinvert_attention_maskrg   r   r   Zwarning_onceZget_head_maskr   rD   	enumeratezipr   r   r   r   )(r#   r   r   r   r   r   	head_maskcross_attn_head_maskr   r   r   r   r   Zerr_msg_prefixZinput_shaper   r   Zmask_seq_lengthZencoder_seq_lengthZextended_attention_maskZencoder_batch_sizeZencoder_sequence_length_Zencoder_hidden_shapeZencoder_extended_attention_maskZpresent_key_value_statesZall_hidden_statesZall_attentionsZall_cross_attentionsr   r   r0   r   Zlayer_moduler   r   r   r   Zlayer_outputsr   r'   r   r(   r1   "  s    

$


   








zPop2PianoStack.forward)N)NNNNNNNNNNNN)r3   r4   r5   r   r   r   r1   r6   r'   r'   r%   r(   r     s                r   c                       s(   e Zd ZdZ fddZdd Z  ZS )r   z'Embedding Matrix for `composer` tokens.c                    s"   t    tj|j|jd| _d S )N)Znum_embeddingsZembedding_dim)r   r   r   rc   composer_vocab_sizer>   r   rG   r%   r'   r(   r     s    
z&Pop2PianoConcatEmbeddingToMel.__init__c                 C   s.   || }|  |d}tj||gdd}|S )Nr   ri   )r   r~   r   r   )r#   featureindex_valueembedding_offsetZindex_shiftedZcomposer_embeddingr   r'   r'   r(   r1     s    z%Pop2PianoConcatEmbeddingToMel.forward)r3   r4   r5   r   r   r1   r6   r'   r'   r%   r(   r     s   r   aB  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`Pop2PianoConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
z7Pop2Piano Model with a `language modeling` head on top.c                       sp  e Zd ZdddgZed fddZdd Zd	d
 Zdd Zdd Z	dd Z
dd Zd%ejeeejdddZeeeeedd&eej eej eej eej eej eej eej eeeej   eeeej   eej eej eej eej ee ee ee ee eeej ef dddZe d' fdd	Zd(ddZ ejd d!d"Z!d#d$ Z"  Z#S ))r   zencoder.embed_tokens.weightzdecoder.embed_tokens.weightzlm_head.weightr8   c                    s   t  | || _|j| _t|j|j| _t	|| _
t|}d|_d|_d|_t|| j| _t|}d|_d|_|j|_t|| j| _tj|j|jdd| _|   d S )NFTr;   )r   r   r9   r>   	model_dimr   rc   Z
vocab_sizer   r   mel_conditionercopydeepcopyrV   r   Zis_encoder_decoderr   encoderZnum_decoder_layersr   decoderr=   r   r   )r#   r9   Zencoder_configZdecoder_configr%   r'   r(   r     s"    


z*Pop2PianoForConditionalGeneration.__init__c                 C   s   | j S rH   )r   r   r'   r'   r(   r     s    z6Pop2PianoForConditionalGeneration.get_input_embeddingsc                 C   s"   || _ | j| | j| d S rH   )r   r   r   r   r   r'   r'   r(   r     s    z6Pop2PianoForConditionalGeneration.set_input_embeddingsc                 C   s
   || _ d S rH   r   r   r'   r'   r(   set_output_embeddings"  s    z7Pop2PianoForConditionalGeneration.set_output_embeddingsc                 C   s   | j S rH   r   r   r'   r'   r(   get_output_embeddings%  s    z7Pop2PianoForConditionalGeneration.get_output_embeddingsc                 C   s   | j S rH   )r   r   r'   r'   r(   get_encoder(  s    z-Pop2PianoForConditionalGeneration.get_encoderc                 C   s   | j S rH   )r   r   r'   r'   r(   get_decoder+  s    z-Pop2PianoForConditionalGeneration.get_decoderN)input_featurescomposergeneration_configr   c                 C   s   |j }|| kr.tdt|  d| || }tj|| jd}||jd }t	|
 }| j|||d}|dk	rd||dddf   < tj|dddf dd	|gd	d
}||fS |dfS )a  
        This method is used to concatenate mel conditioner tokens at the front of the input_features in order to
        control the type of MIDI token generated by the model.

        Args:
            input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                input features extracted from the feature extractor.
            composer (`str`):
                composer token which determines the type of MIDI tokens to be generated.
            generation_config (`~generation.GenerationConfig`):
                The generation is used to get the composer-feature_token pair.
            attention_mask (``, *optional*):
                For batched generation `input_features` are padded to have the same shape across all examples.
                `attention_mask` helps to determine which areas were padded and which were not.
                - 1 for tokens that are **not padded**,
                - 0 for tokens that are **padded**.
        zPlease choose a composer from z. Composer received - r   r   )r   r   r   Nr   r*   r   )Zaxis)composer_to_feature_tokenkeysr   r   r   Ztensorr}   repeatr   rs   r   r   r   Zconcatenater   )r#   r  r  r  r   r  Zcomposer_valuer   r'   r'   r(   get_mel_conditioner_outputs.  s&    &z=Pop2PianoForConditionalGeneration.get_mel_conditioner_outputs)output_typer   )r   r   decoder_input_idsdecoder_attention_maskr   decoder_head_maskr   encoder_outputsr   r   r  decoder_inputs_embedslabelsr   r   r   r   returnc                 C   s  |dk	r|n| j j}|dk	r |n| j j}|
dk	rB|dk	rBtdn|dk	rV|
dkrV|}
|dkrx| j|||
||||d}nH|rt|tst|d t|dkr|d ndt|dkr|d ndd}|d }|dk	r|dkr|dkr| |}| j	||||	||||||||d}|d }| j j
r,|| jd	  }| |}d}|dk	rltd
d}||d|d|d}|s|f|dd  | }|dk	r|f| S |S t|||j|j|j|j|j|j|jd	S )a}  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
            config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
            labels in `[0, ..., config.vocab_size]`
        Returns:
        NzSBoth `inputs_embeds` and `input_features` received! Please provide only one of them)r   r   r   r   r   r   r   r   r   r)   )r   r0   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   )Zignore_indexr*   )	lossZlogitsr   Zdecoder_hidden_statesZdecoder_attentionsr   Zencoder_last_hidden_stater   Zencoder_attentions)r9   r   r   r   r   rI   r   rk   r   r   r   r   r   r   r   r   r   r   r0   r   r   r   )r#   r   r   r  r  r   r  r   r  r   r   r  r  r  r   r   r   r   r0   Zdecoder_outputsZsequence_outputZ	lm_logitsr  Zloss_fctoutputr'   r'   r(   r1   _  sz    
	




z)Pop2PianoForConditionalGeneration.forward	composer1c                    s   |dkr| j }|jf | t|ds,tdt|j| jjkr^td| jj dt|j d| j||||d\}}t	 j
f d|||d|S )	a  
        Generates token ids for midi outputs.

        <Tip warning={true}>

        Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
        model's default generation configuration. You can override any `generation_config` by passing the corresponding
        parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation
        strategies and code examples, check out the [following guide](./generation_strategies).

        </Tip>

        Parameters:
            input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                This is the featurized version of audio generated by `Pop2PianoFeatureExtractor`.
            attention_mask:
                For batched generation `input_features` are padded to have the same shape across all examples.
                `attention_mask` helps to determine which areas were padded and which were not.
                - 1 for tokens that are **not padded**,
                - 0 for tokens that are **padded**.
            composer (`str`, *optional*, defaults to `"composer1"`):
                This value is passed to `Pop2PianoConcatEmbeddingToMel` to generate different embeddings for each
                `"composer"`. Please make sure that the composet value is present in `composer_to_feature_token` in
                `generation_config`. For an example please see
                https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json .
            generation_config (`~generation.GenerationConfig`, *optional*):
                The generation configuration to be used as base parametrization for the generation call. `**kwargs`
                passed to generate matching the attributes of `generation_config` will override them. If
                `generation_config` is not provided, the default will be used, which had the following loading
                priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
                configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
                default values, whose documentation should be checked to parameterize generation.
            kwargs:
                Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
                forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
                specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
        Return:
            [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
            or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
                Since Pop2Piano is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
                [`~utils.ModelOutput`] types are:
                    - [`~generation.GreedySearchEncoderDecoderOutput`],
                    - [`~generation.SampleEncoderDecoderOutput`],
                    - [`~generation.BeamSearchEncoderDecoderOutput`],
                    - [`~generation.BeamSampleEncoderDecoderOutput`]
        Nr  z`composer_to_feature_token` was not found! Please refer to https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.jsonand parse a dict like that.ztconfig.composer_vocab_size must be same as the number of keys in generation_config.composer_to_feature_token! Found z vs .)r  r   r  r  )r   r   r   r  )r  updater   r   rk   r  r9   r   r
  r   generate)r#   r  r   r  r  kwargsr%   r'   r(   r    s2    8

z*Pop2PianoForConditionalGeneration.generatec	           
   	   K   s2   |d k	r|d d dd f }||||||||dS )Nr*   )r  r   r  r   r   r  r   r   r'   )
r#   r   r   r   r   r  r   r   r  r  r'   r'   r(   prepare_inputs_for_generation'  s    z?Pop2PianoForConditionalGeneration.prepare_inputs_for_generation)r  c                 C   s
   |  |S rH   )r   )r#   r  r'   r'   r(   %prepare_decoder_input_ids_from_labelsB  s    zGPop2PianoForConditionalGeneration.prepare_decoder_input_ids_from_labelsc              	   C   s   |d krt d |S d}|D ]}d}|D ]}||d||jf }q*|d j|d jkrtd|d j d|d j dt|t|krtdt| dt| d||f }q|S )	NzHYou might want to consider setting `use_cache=True` to speed up decodingr'   r   z%reordered_layer_past_states[0] shape z  and layer_past_states[0] shape z mismatchedz&length of reordered_layer_past_states z! and length of layer_past_states )r   r   Zindex_selectr+   r}   r   r   rk   )r#   r   Zbeam_idxZreordered_decoder_pastZlayer_past_statesZreordered_layer_past_statesZlayer_past_stater'   r'   r(   _reorder_cacheE  s(    
z0Pop2PianoForConditionalGeneration._reorder_cache)N)NNNNNNNNNNNNNNNNN)Nr  N)NNNNNNN)$r3   r4   r5   Z_tied_weights_keysr   r   r   r   r   r  r  r  r   ZFloatTensorstrr   r
  r   POP2PIANO_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   Z
LongTensorZ
BoolTensorrJ   r   r   r   r1   Zno_gradr  r  r  r  r6   r'   r'   r%   r(   r     s   
 1
                 j   ^       
r   )@r   r   rv   typingr   r   r   r   r   Ztorch.nnr   Ztorch.utils.checkpointr   Ztransformers.generationr   Zactivationsr
   Zmodeling_outputsr   r   r   Zmodeling_utilsr   Zpytorch_utilsr   r   r   utilsr   r   r   r   r   Zconfiguration_pop2pianor   Z
get_loggerr3   r   Z_load_pop2piano_layer_normZapex.normalizationr   infoImportError	Exceptionr   r  Z_CHECKPOINT_FOR_DOCZ'POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LISTr  Moduler   r   r7   rM   rP   rU   r   r   r   r   r   r   ZPop2Piano_START_DOCSTRINGr   r'   r'   r'   r(   <module>   sb   

K
 m!%|Z Y