U
    9%eu(                     @   s^   d Z ddlmZ ddlmZ ddlmZ eeZ	ddiZ
G dd	 d	eZG d
d deZdS )z MusicGen model configuration   )PretrainedConfig)logging   )
AutoConfigzfacebook/musicgen-smallzGhttps://huggingface.co/facebook/musicgen-small/resolve/main/config.jsonc                       s,   e Zd ZdZdZdgZd fdd	Z  ZS )MusicgenDecoderConfiga  
    This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a
    MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the MusicGen
    [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 2048):
            Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be
            represented by the `inputs_ids` passed when calling [`MusicgenDecoder`].
        hidden_size (`int`, *optional*, defaults to 1024):
            Dimensionality of the layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 24):
            Number of decoder layers.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer block.
        ffn_dim (`int`, *optional*, defaults to 4096):
            Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
        activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            The maximum sequence length that this model might ever be used with. Typically, set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        initializer_factor (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        scale_embedding (`bool`, *optional*, defaults to `False`):
            Scale embeddings by diving by sqrt(hidden_size).
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether the model should return the last key/values attentions (not used by all models)
        num_codebooks (`int`, *optional*, defaults to 4):
            The number of parallel codebooks forwarded to the model.
        tie_word_embeddings(`bool`, *optional*, defaults to `False`):
            Whether input and output word embeddings should be tied.
    Zmusicgen_decoderZpast_key_values                    Tgelu   皙?{Gz?F   Nc                    sz   || _ || _|	| _|| _|| _|| _|
| _|| _|| _|| _	|| _
|| _|| _|| _|| _t jf ||||d| d S )N)pad_token_idbos_token_ideos_token_idtie_word_embeddings)
vocab_sizemax_position_embeddingshidden_sizeffn_dimnum_hidden_layersnum_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninitializer_factor	layerdrop	use_cachescale_embeddingnum_codebookssuper__init__)selfr   r   r   r   r   r    r!   r   r   r   r   r   r   r"   r#   r   r   r   r   kwargs	__class__ r/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/musicgen/configuration_musicgen.pyr%   R   s.    zMusicgenDecoderConfig.__init__)r   r   r   r	   r
   r   Tr   r   r   r   r   r   Fr   r   r   NF)__name__
__module____qualname____doc__
model_typeZkeys_to_ignore_at_inferencer%   __classcell__r*   r*   r(   r+   r      s.   0                   r   c                       sJ   e Zd ZdZdZdZ fddZeeee	dddZ
ed	d
 Z  ZS )MusicgenConfigaj	  
    This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a
    MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder
    configs.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        kwargs (*optional*):
            Dictionary of keyword arguments. Notably:

                - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
                  defines the text encoder config.
                - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
                  defines the audio encoder config.
                - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
                  the decoder config.

    Example:

    ```python
    >>> from transformers import (
    ...     MusicgenConfig,
    ...     MusicgenDecoderConfig,
    ...     T5Config,
    ...     EncodecConfig,
    ...     MusicgenForConditionalGeneration,
    ... )

    >>> # Initializing text encoder, audio encoder, and decoder model configurations
    >>> text_encoder_config = T5Config()
    >>> audio_encoder_config = EncodecConfig()
    >>> decoder_config = MusicgenDecoderConfig()

    >>> configuration = MusicgenConfig.from_sub_models_config(
    ...     text_encoder_config, audio_encoder_config, decoder_config
    ... )

    >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration
    >>> model = MusicgenForConditionalGeneration(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    >>> config_text_encoder = model.config.text_encoder
    >>> config_audio_encoder = model.config.audio_encoder
    >>> config_decoder = model.config.decoder

    >>> # Saving the model, including its configuration
    >>> model.save_pretrained("musicgen-model")

    >>> # loading model and config from pretrained folder
    >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model")
    >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config)
    ```ZmusicgenTc                    s   t  jf | d|ks&d|ks&d|kr.td|d}|d}|d}|d}|d}tj|f|| _tj|f|| _tf || _	d| _
d S )Ntext_encoderaudio_encoderdecoderzPConfig has to be initialized with text_encoder, audio_encoder and decoder configr0   T)r$   r%   
ValueErrorpopr   Z	for_modelr3   r4   r   r5   Zis_encoder_decoder)r&   r'   text_encoder_configZtext_encoder_model_typeaudio_encoder_configZaudio_encoder_model_typedecoder_configr(   r*   r+   r%      s    




zMusicgenConfig.__init__)r8   r9   r:   c                 K   s"   | f |  |  |  d|S )z
        Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder
        configurations.

        Returns:
            [`MusicgenConfig`]: An instance of a configuration object
        )r3   r4   r5   )to_dict)clsr8   r9   r:   r'   r*   r*   r+   from_sub_models_config   s    z%MusicgenConfig.from_sub_models_configc                 C   s   | j jS )N)r4   sampling_rate)r&   r*   r*   r+   r>      s    zMusicgenConfig.sampling_rate)r,   r-   r.   r/   r0   Zis_compositionr%   classmethodr   r   r=   propertyr>   r1   r*   r*   r(   r+   r2      s   8r2   N)r/   Zconfiguration_utilsr   utilsr   Zauto.configuration_autor   Z
get_loggerr,   loggerZ&MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAPr   r2   r*   r*   r*   r+   <module>   s   
 c