U
    9%eV}                     @   s   d Z ddlZddlZddlmZmZmZmZ ddlZ	ddl
mZ ddlmZmZmZmZmZ ddlmZmZmZmZmZ e rddlZeeZdd	iZdd
diiZdd Zdd ZG dd deZ dS )z!Tokenization class for Pop2Piano.    N)ListOptionalTupleUnion   )BatchFeature)
AddedTokenBatchEncodingPaddingStrategyPreTrainedTokenizerTruncationStrategy)
TensorTypeis_pretty_midi_availableloggingrequires_backendsto_numpyvocabz
vocab.jsonzsweetcocoa/pop2pianoz@https://huggingface.co/sweetcocoa/pop2piano/blob/main/vocab.jsonc                 C   s   || 7 }|d k	rt ||}|S N)minnumbercutoff_time_idxcurrent_idx r   s/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/pop2piano/tokenization_pop2piano.pytoken_time_to_note-   s    
r   c           	      C   sX   ||  d k	rL||  }||k rT|}| ||| |g |dkr>d n|}||| < n||| < |S )Nr   )append)	r   current_velocitydefault_velocitynote_onsets_readyr   notes	onset_idx
offset_idxZonsets_readyr   r   r   token_note_to_note5   s    
r#   c                       s  e Zd ZdZddgZeZeZd7 fd
d	Z	e
dd Zdd ZeedddZd8edddZejeeedddZd9ejejeeedddZd:ejeed d!d"Zd;ejejed$d%d&Zd<eee ee d'd(d)Zd=eejeej f ee  ee e!d*d+d,Z"d>eejeej f ee  ee e!d*d-d.Z#d?eejeej eeej  f ee$ee%f ee$ee f ee ee ee$ eeee&f  e$e!d1	d2d3Z'd@e(e$d4d5d6Z)  Z*S )APop2PianoTokenizeras  
    Constructs a Pop2Piano tokenizer. This tokenizer does not require training.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab (`str`):
            Path to the vocab file which contains the vocabulary.
        default_velocity (`int`, *optional*, defaults to 77):
            Determines the default velocity to be used while creating midi Notes.
        num_bars (`int`, *optional*, defaults to 2):
            Determines cutoff_time_idx in for each token.
    	token_idsattention_maskM      -1102c           
   	      s   t |trt|dddn|}t |tr4t|dddn|}t |trPt|dddn|}t |trlt|dddn|}|| _|| _t|d}	t|	| _W 5 Q R X dd | j	 D | _
t jf ||||d| d S )NF)lstriprstriprbc                 S   s   i | ]\}}||qS r   r   ).0kvr   r   r   
<dictcomp>p   s      z/Pop2PianoTokenizer.__init__.<locals>.<dictcomp>)	unk_token	eos_token	pad_token	bos_token)
isinstancestrr   r   num_barsopenjsonloadencoderitemsdecodersuper__init__)
selfr   r   r:   r4   r5   r6   r7   kwargsfile	__class__r   r   rB   X   s"    zPop2PianoTokenizer.__init__c                 C   s
   t | jS )z-Returns the vocabulary size of the tokenizer.)lenr>   rC   r   r   r   
vocab_sizez   s    zPop2PianoTokenizer.vocab_sizec                 C   s   t | jf| jS )z(Returns the vocabulary of the tokenizer.)dictr>   Zadded_tokens_encoderrI   r   r   r   	get_vocab   s    zPop2PianoTokenizer.get_vocab)token_idreturnc                 C   sH   | j || j d}|d}d|dd t|d  }}||gS )a?  
        Decodes the token ids generated by the transformer into notes.

        Args:
            token_id (`int`):
                This denotes the ids generated by the transformers to be converted to Midi tokens.

        Returns:
            `List`: A list consists of token_type (`str`) and value (`int`).
        Z_TOKEN_TIME_   Nr   )r@   getr4   splitjoinint)rC   rM   Ztoken_type_value
token_typevaluer   r   r   _convert_id_to_token   s    
 z'Pop2PianoTokenizer._convert_id_to_token
TOKEN_TIME)rN   c                 C   s   | j | d| t| jS )a  
        Encodes the Midi tokens to transformer generated token ids.

        Args:
            token (`int`):
                This denotes the token value.
            token_type (`str`):
                This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME",
                "TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL".

        Returns:
            `int`: returns the id of the token.
        rO   )r>   rQ   rT   r4   )rC   tokenrU   r   r   r   _convert_token_to_id   s    z'Pop2PianoTokenizer._convert_token_to_idtokensbeat_offset_idxbars_per_batchr   c                 C   s   d}t t|D ]b}|| }||| d  }|| }	| j|||	d}
t|
dkrRq|dkr`|
}qtj||
fdd}q|dkrg S |S )a  
        Converts relative tokens to notes which are then used to generate pretty midi object.

        Args:
            tokens (`numpy.ndarray`):
                Tokens to be converted to notes.
            beat_offset_idx (`int`):
                Denotes beat offset index for each note in generated Midi.
            bars_per_batch (`int`):
                A parameter to control the Midi output generation.
            cutoff_time_idx (`int`):
                Denotes the cutoff time index for each note in generated Midi.
        N   )	start_idxr   r   )Zaxis)rangerH   relative_tokens_ids_to_notesnpZconcatenate)rC   r\   r]   r^   r   r    index_tokensZ
_start_idxZ_cutoff_time_idxZ_notesr   r   r   "relative_batch_tokens_ids_to_notes   s$    z5Pop2PianoTokenizer.relative_batch_tokens_ids_to_notesr      )r\   beatstepr]   r^   r   c                 C   s:   |dkrdn|}| j ||||d}| j|||| d}|S )al  
        Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens
        to notes then uses `notes_to_midi` method to convert them to Midi.

        Args:
            tokens (`numpy.ndarray`):
                Denotes tokens which alongside beatstep will be converted to Midi.
            beatstep (`np.ndarray`):
                We get beatstep from feature extractor which is also used to get Midi.
            beat_offset_idx (`int`, *optional*, defaults to 0):
                Denotes beat offset index for each note in generated Midi.
            bars_per_batch (`int`, *optional*, defaults to 2):
                A parameter to control the Midi output generation.
            cutoff_time_idx (`int`, *optional*, defaults to 12):
                Denotes the cutoff time index for each note in generated Midi.
        Nr   r[   )
offset_sec)rf   notes_to_midi)rC   r\   rh   r]   r^   r   r    Zmidir   r   r   !relative_batch_tokens_ids_to_midi   s    z4Pop2PianoTokenizer.relative_batch_tokens_ids_to_midiN)r\   r`   r   c              	      sf   fdd|D }|}d}dd t tdd  j D d D }g }|D ]l\}	}
|	dkrj|
dkr qqL|	dkrt|
||d	}qL|	d
kr|
}qL|	dkrt|
| j|||d}qLtdqLt|D ]N\}}|dk	r|dkr|d }nt	||d }t	||}|
||| jg qt|dkr$g S t|}|dddf d |dddf  }||  }|S dS )a  
        Converts relative tokens to notes which will then be used to create Pretty Midi objects.

        Args:
            tokens (`numpy.ndarray`):
                Relative Tokens which will be converted to notes.
            start_idx (`float`):
                A parameter which denotes the starting index.
            cutoff_time_idx (`float`, *optional*):
                A parameter used while converting tokens to notes.
        c                    s   g | ]}  |qS r   )rW   )r0   rY   rI   r   r   
<listcomp>  s     zCPop2PianoTokenizer.relative_tokens_ids_to_notes.<locals>.<listcomp>r   c                 S   s   g | ]}d qS r   r   r0   ir   r   r   rl     s     c                 S   s   g | ]}| d qS )NOTE)endswith)r0   r1   r   r   r   rl     s     rP   ZTOKEN_SPECIALrX   r   TOKEN_VELOCITY
TOKEN_NOTE)r   r   r   r   r   r    zToken type not understood!N   )ra   sumr>   keysr   r#   r   
ValueError	enumeratemaxr   rH   rc   arrayZargsort)rC   r\   r`   r   wordsr   r   r   r    rU   r   pitchZ
note_onsetcutoffr"   Z
note_orderr   rI   r   rb      sP    *  	



$z/Pop2PianoTokenizer.relative_tokens_ids_to_notes        )r    rh   ri   c                 C   s   t | dg tjddd}tjdd}g }|D ]8\}}}	}
tj|
|	|| | || | d}|| q.||_|j| |  |S )a  
        Converts notes to Midi.

        Args:
            notes (`numpy.ndarray`):
                This is used to create Pretty Midi objects.
            beatstep (`numpy.ndarray`):
                This is the extrapolated beatstep that we get from feature extractor.
            offset_sec (`int`, *optional*, defaults to 0.0):
                This represents the offset seconds which is used while creating each Pretty Midi Note.
        pretty_midii  g      ^@)
resolutionZinitial_tempor   )program)velocityr{   startend)	r   r~   Z
PrettyMIDIZ
InstrumentNoter   r    instrumentsZremove_invalid_notes)rC   r    rh   ri   Znew_pmZnew_instZ	new_notesr!   r"   r{   r   Znew_noter   r   r   rj   1  s     

z Pop2PianoTokenizer.notes_to_midi)save_directoryfilename_prefixrN   c              	   C   sr   t j|s"td| d dS t j||r6|d ndtd  }t|d}|t	
| j W 5 Q R X |fS )a}  
        Saves the tokenizer's vocabulary dictionary to the provided save_directory.

        Args:
            save_directory (`str`):
                A path to the directory where to saved. It will be created if it doesn't exist.
            filename_prefix (`Optional[str]`, *optional*):
                A prefix to add to the names of the files saved by the tokenizer.
        zVocabulary path (z) should be a directoryN- r   w)ospathisdirloggererrorrS   VOCAB_FILES_NAMESr;   writer<   dumpsr>   )rC   r   r   Zout_vocab_filerE   r   r   r   save_vocabularyQ  s    
 z"Pop2PianoTokenizer.save_vocabulary)r    truncation_strategy
max_lengthrN   c                 K   s  t | dg t|d tjr8tdd |D dd}t|tj	}|ddddf 
 }d	d t|d
 D }|D ]0\}}}	}
|| |	|
g || |	dg q|g }d}t|D ]t\}}t|dkrq|| |d |D ]F\}	}
t|
dk}
||
kr|
}|| |
d || |	d qqt|}|tjkrz|rz||krz| jf ||| |d|\}}}td|iS )a  
        This is the `encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer
        generated token ids. It only works on a single batch, to process multiple batches please use
        `batch_encode_plus` or `__call__` method.

        Args:
            notes (`numpy.ndarray` of shape `[sequence_length, 4]` or `list` of `pretty_midi.Note` objects):
                This represents the midi notes. If `notes` is a `numpy.ndarray`:
                    - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`.
                If `notes` is a `list` containing `pretty_midi.Note` objects:
                    - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`.
            truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*):
                Indicates the truncation strategy that is going to be used during truncation.
            max_length (`int`, *optional*):
                Maximum length of the returned list and optionally padding length (see above).

        Returns:
            `BatchEncoding` containing the tokens ids.
        r~   r   c                 S   s    g | ]}|j |j|j|jgqS r   )r   r   r{   r   )r0   Z	each_noter   r   r   rl     s     z2Pop2PianoTokenizer.encode_plus.<locals>.<listcomp>r_   Nr(   c                 S   s   g | ]}g qS r   r   rm   r   r   r   rl     s     rP   rX   rq   rr   )ZidsZnum_tokens_to_remover   r%   )r   r8   r~   r   rc   ry   ZreshaperoundZastypeZint32rx   ra   r   rw   rH   rZ   rT   r   ZDO_NOT_TRUNCATEZtruncate_sequencesr	   )rC   r    r   r   rD   Zmax_time_idxtimesZonsetoffsetr{   r   r\   r   rn   timeZ	total_lenrO   r   r   r   encode_plush  sH     
zPop2PianoTokenizer.encode_plusc                 K   sH   g }t t|D ]*}|| j|| f||d|d  qtd|iS )a  
        This is the `batch_encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer
        generated token ids. It works on multiple batches by calling `encode_plus` multiple times in a loop.

        Args:
            notes (`numpy.ndarray` of shape `[batch_size, sequence_length, 4]` or `list` of `pretty_midi.Note` objects):
                This represents the midi notes. If `notes` is a `numpy.ndarray`:
                    - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`.
                If `notes` is a `list` containing `pretty_midi.Note` objects:
                    - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`.
            truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*):
                Indicates the truncation strategy that is going to be used during truncation.
            max_length (`int`, *optional*):
                Maximum length of the returned list and optionally padding length (see above).

        Returns:
            `BatchEncoding` containing the tokens ids.
        )r   r   r%   )ra   rH   r   r   r	   )rC   r    r   r   rD   Zencoded_batch_token_idsrn   r   r   r   batch_encode_plus  s    	z$Pop2PianoTokenizer.batch_encode_plusFT)	r    padding
truncationr   pad_to_multiple_ofreturn_attention_maskreturn_tensorsverboserN   c	              	   K   s   t |tjr|jdknt |d t}
| jf |||||d|	\}}}}	|
rv|dkrXdn|}| jf |||d|	}n| jf |||d|	}| j|||||||d}|S )a  
        This is the `__call__` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer generated
        token ids.

        Args:
            notes (`numpy.ndarray` of shape `[batch_size, max_sequence_length, 4]` or `list` of `pretty_midi.Note` objects):
                This represents the midi notes.

                If `notes` is a `numpy.ndarray`:
                    - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`.
                If `notes` is a `list` containing `pretty_midi.Note` objects:
                    - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`.
            padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
                Activates and controls padding. Accepts the following values:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
                Activates and controls truncation. Accepts the following values:

                - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
                  to the maximum acceptable input length for the model if that argument is not provided. This will
                  truncate token by token, removing a token from the longest sequence in the pair if a pair of
                  sequences (or a batch of pairs) is provided.
                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
                  greater than the model maximum admissible input size).
            max_length (`int`, *optional*):
                Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
                `None`, this will use the predefined model maximum length if a maximum length is required by one of the
                truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
                truncation/padding to a maximum length will be deactivated.
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
                the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
            return_attention_mask (`bool`, *optional*):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are attention masks?](../glossary#attention-mask)
            return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.

        Returns:
            `BatchEncoding` containing the token_ids.
        r   r   )r   r   r   r   r   NT)r    r   r   )r   r   r   r   r   r   )	r8   rc   ndarrayndimlistZ"_get_padding_truncation_strategiesr   r   pad)rC   r    r   r   r   r   r   r   r   rD   Z
is_batchedZpadding_strategyr   r%   r   r   r   __call__  sH    R$	
zPop2PianoTokenizer.__call__)feature_extractor_outputreturn_midic                 C   s  t t|dot|dot|d}|s@|d jd dkr@td|rt|d dddf dk|d jd ks|d jd |d	 jd krtd
|jd  d|d jd  d|d	 jd  |d jd |jd krtd|d jd  d|jd  nR|d jd dks$|d	 jd dkrNtd|d jd  d|d	 jd  d|rxt|d dddf dkd }n|jd g}g }g }d}t|D ]T\}	}
|||
 }|dddtt|t	| j
kd d f }|d |	 }|d	 |	 }|r^|d |	 }|d |	 }|dtt|dkd d  }|dtt|dkd d  }t|}t|}t|}| j||| j| jd d d}|jd jD ]4}| j|d 7  _| j|d 7  _|| q|| ||
d 7 }q|rt||dS td|iS )aF  
        This is the `batch_decode` method for `Pop2PianoTokenizer`. It converts the token_ids generated by the
        transformer to midi_notes and returns them.

        Args:
            token_ids (`Union[np.ndarray, torch.Tensor, tf.Tensor]`):
                Output token_ids of `Pop2PianoConditionalGeneration` model.
            feature_extractor_output (`BatchFeature`):
                Denotes the output of `Pop2PianoFeatureExtractor.__call__`. It must contain `"beatstep"` and
                `"extrapolated_beatstep"`. Also `"attention_mask_beatsteps"` and
                `"attention_mask_extrapolated_beatstep"`
                 should be present if they were returned by the feature extractor.
            return_midi (`bool`, *optional*, defaults to `True`):
                Whether to return midi object or not.
        Returns:
            If `return_midi` is True:
                - `BatchEncoding` containing both `notes` and `pretty_midi.pretty_midi.PrettyMIDI` objects.
            If `return_midi` is False:
                - `BatchEncoding` containing `notes`.
        r&   attention_mask_beatsteps$attention_mask_extrapolated_beatstep	beatstepsr   rP   zattention_mask, attention_mask_beatsteps and attention_mask_extrapolated_beatstep must be present for batched inputs! But one of them were not present.Nextrapolated_beatstepzbLength mistamtch between token_ids, beatsteps and extrapolated_beatstep! Found token_ids length - z, beatsteps shape - z$ and extrapolated_beatsteps shape - z!Found attention_mask of length - z but token_ids of length - zLength mistamtch of beatsteps and extrapolated_beatstep! Since attention_mask is not present the number of examples must be 1, But found beatsteps length - z", extrapolated_beatsteps length - .r_   )r\   rh   r^   r   )r    Zpretty_midi_objectsr    )boolhasattrshaperv   rt   rc   whererw   rx   rT   r5   r   rk   r:   r   r    r   r   r   r	   )rC   r%   r   r   Zattention_masks_presentZ	batch_idxZ
notes_listZpretty_midi_objects_listr`   rd   Zend_idxZeach_tokens_idsr   r   r   r   Zpretty_midi_objectZnoter   r   r   batch_decodeR  s    
.$$2$
zPop2PianoTokenizer.batch_decode)r'   r(   r)   r*   r+   r,   )rX   )r   r(   rg   )N)r}   )N)NN)NN)FNNNNNT)T)+__name__
__module____qualname____doc__Zmodel_input_namesr   Zvocab_files_namesPRETRAINED_VOCAB_FILES_MAPZpretrained_vocab_files_maprB   propertyrJ   rL   rT   r   rW   rZ   rc   r   rf   rk   floatrb   rj   r9   r   r   r   r   r   r~   r   r   r	   r   r   r   r
   r   r   r   r   __classcell__r   r   rF   r   r$   D   s         "
0   $<   J  .         r$   )!r   r<   r   typingr   r   r   r   numpyrc   Zfeature_extraction_utilsr   Ztokenization_utilsr   r	   r
   r   r   utilsr   r   r   r   r   r~   Z
get_loggerr   r   r   r   r   r#   r$   r   r   r   r   <module>   s*   
  