U
    9%e+                     @   sV   d Z ddlmZ ddlmZmZ eeZdddZ	e rBddl
Z
G d	d
 d
eZdS )z LayoutLMv2 model configuration   )PretrainedConfig)is_detectron2_availableloggingzQhttps://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.jsonzRhttps://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json)zlayoutlmv2-base-uncasedzlayoutlmv2-large-uncased    Nc                       sv   e Zd ZdZdZddddddddd	d
dddddddddddddgddddddf fdd	Zedd Zdd Z  Z	S )LayoutLMv2Configa_  
    This is the configuration class to store the configuration of a [`LayoutLMv2Model`]. It is used to instantiate an
    LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the LayoutLMv2
    [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 30522):
            Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`LayoutLMv2Model`] or [`TFLayoutLMv2Model`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimension of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        max_position_embeddings (`int`, *optional*, defaults to 512):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        type_vocab_size (`int`, *optional*, defaults to 2):
            The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv2Model`] or
            [`TFLayoutLMv2Model`].
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
            The maximum value that the 2D position embedding might ever be used with. Typically set this to something
            large just in case (e.g., 1024).
        max_rel_pos (`int`, *optional*, defaults to 128):
            The maximum number of relative positions to be used in the self-attention mechanism.
        rel_pos_bins (`int`, *optional*, defaults to 32):
            The number of relative position bins to be used in the self-attention mechanism.
        fast_qkv (`bool`, *optional*, defaults to `True`):
            Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.
        max_rel_2d_pos (`int`, *optional*, defaults to 256):
            The maximum number of relative 2D positions in the self-attention mechanism.
        rel_2d_pos_bins (`int`, *optional*, defaults to 64):
            The number of 2D relative position bins in the self-attention mechanism.
        image_feature_pool_shape (`List[int]`, *optional*, defaults to [7, 7, 256]):
            The shape of the average-pooled feature map.
        coordinate_size (`int`, *optional*, defaults to 128):
            Dimension of the coordinate embeddings.
        shape_size (`int`, *optional*, defaults to 128):
            Dimension of the width and height embeddings.
        has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to use a relative attention bias in the self-attention mechanism.
        has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to use a spatial attention bias in the self-attention mechanism.
        has_visual_segment_embedding (`bool`, *optional*, defaults to `False`):
            Whether or not to add visual segment embeddings.
        detectron2_config_args (`dict`, *optional*):
            Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this
            file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py)
            for details regarding default values.

    Example:

    ```python
    >>> from transformers import LayoutLMv2Config, LayoutLMv2Model

    >>> # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration
    >>> configuration = LayoutLMv2Config()

    >>> # Initializing a model (with random weights) from the microsoft/layoutlmv2-base-uncased style configuration
    >>> model = LayoutLMv2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Z
layoutlmv2i:w  i      i   Zgelug?      g{Gz?g-q=r   i          T   @      FNc                    s   t  jf |||||||||	|
|||d| || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|| _|| _|| _|d k	r|n|  | _d S )N)
vocab_sizehidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizeinitializer_rangelayer_norm_epspad_token_id)super__init__max_2d_position_embeddingsmax_rel_posrel_pos_binsfast_qkvmax_rel_2d_posrel_2d_pos_binsconvert_sync_batchnormimage_feature_pool_shapecoordinate_size
shape_sizehas_relative_attention_biashas_spatial_attention_biashas_visual_segment_embeddingget_default_detectron2_configdetectron2_config_args)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r,   kwargs	__class__ v/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/layoutlmv2/configuration_layoutlmv2.pyr   w   s@    zLayoutLMv2Config.__init__c                 C   s   ddddgddddd	gd
gdgdgdgdggdddddgddddddddddgdddddddd
gdgdgdgdggdd d!ggdddd	gd
d"d#d$S )%NTg     L@g(\L@g(\2M@Zbuild_resnet_fpn_backboneZres2Zres3Zres4Zres5r   r   r
   r   r   p2Zp3Zp4Zp5Zp6i  i  ZStandardROIHeads   ZFastRCNNConvFCHeadr	      ZMaskRCNNConvUpsampleHead   r   e   g      ?g      ?g       @   F)zMODEL.MASK_ONzMODEL.PIXEL_STDzMODEL.BACKBONE.NAMEzMODEL.FPN.IN_FEATURESzMODEL.ANCHOR_GENERATOR.SIZESzMODEL.RPN.IN_FEATURESzMODEL.RPN.PRE_NMS_TOPK_TRAINzMODEL.RPN.PRE_NMS_TOPK_TESTzMODEL.RPN.POST_NMS_TOPK_TRAINzMODEL.POST_NMS_TOPK_TESTzMODEL.ROI_HEADS.NAMEzMODEL.ROI_HEADS.NUM_CLASSESzMODEL.ROI_HEADS.IN_FEATURESzMODEL.ROI_BOX_HEAD.NAMEzMODEL.ROI_BOX_HEAD.NUM_FCz$MODEL.ROI_BOX_HEAD.POOLER_RESOLUTIONzMODEL.ROI_MASK_HEAD.NAMEzMODEL.ROI_MASK_HEAD.NUM_CONVz%MODEL.ROI_MASK_HEAD.POOLER_RESOLUTIONzMODEL.RESNETS.DEPTHzMODEL.RESNETS.SIZESzMODEL.RESNETS.ASPECT_RATIOSzMODEL.RESNETS.OUT_FEATURESzMODEL.RESNETS.NUM_GROUPSzMODEL.RESNETS.WIDTH_PER_GROUPzMODEL.RESNETS.STRIDE_IN_1X1r1   )r-   r1   r1   r2   r+      s6    



z.LayoutLMv2Config.get_default_detectron2_configc                 C   s\   t j }| j D ]B\}}|d}|}|d d D ]}t||}q6t||d | q|S )N.)
detectron2configZget_cfgr,   itemssplitgetattrsetattr)r-   Zdetectron2_configkv
attributesZto_set	attributer1   r1   r2   get_detectron2_config   s    

z&LayoutLMv2Config.get_detectron2_config)
__name__
__module____qualname____doc__Z
model_typer   classmethodr+   rE   __classcell__r1   r1   r/   r2   r   "   sB   R@
r   )rI   Zconfiguration_utilsr   utilsr   r   Z
get_loggerrF   loggerZ(LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAPr;   r   r1   r1   r1   r2   <module>   s   
