U
    9%e                     @   s  d Z ddlZddlZddlZddlmZ ddlmZm	Z	m
Z
 ddlZddlZddlmZ ddlmZmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZ ddlmZmZmZm Z m!Z!m"Z" ddl#m$Z$ ddl%m&Z& e!'e(Z)dZ*dZ+dddgZ,dZ-dZ.dgZ/eG dd deZ0eG dd deZ1eG dd deZ2eG dd deZ3dd Z4dd  Z5G d!d" d"ej6Z7G d#d$ d$ej6Z8G d%d& d&ej6Z9dNej:e;e<ej:d)d*d+Z=G d,d- d-ej6Z>G d.d/ d/ej6Z?G d0d1 d1ej6Z@G d2d3 d3ej6ZAG d4d5 d5ej6ZBG d6d7 d7ej6ZCG d8d9 d9ej6ZDG d:d; d;ej6ZEG d<d= d=ej6ZFG d>d? d?eZGd@ZHdAZIedBeHG dCdD dDeGZJedEeHG dFdG dGeGZKedHeHG dIdJ dJeGZLedKeHG dLdM dMeGe$ZMdS )Oz  PyTorch Swin Transformer model.    N)	dataclass)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BackboneOutput)PreTrainedModel) find_pruneable_heads_and_indicesmeshgridprune_linear_layer)ModelOutputadd_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings)BackboneMixin   )
SwinConfigr   z&microsoft/swin-tiny-patch4-window7-2241   i   ztabby, tabby catc                   @   sb   e Zd ZU dZdZejed< dZe	e
ej  ed< dZe	e
ej  ed< dZe	e
ej  ed< dS )SwinEncoderOutputa  
    Swin encoder's outputs, with potential hidden states and attentions.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlast_hidden_statehidden_states
attentionsreshaped_hidden_states)__name__
__module____qualname____doc__r   torchFloatTensor__annotations__r   r   r   r   r    r'   r'   e/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/swin/modeling_swin.pyr   C   s
   
r   c                   @   st   e Zd ZU dZdZejed< dZe	ej ed< dZ
e	eej  ed< dZe	eej  ed< dZe	eej  ed< dS )SwinModelOutputaT  
    Swin model's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
            Average pooling of the last layer hidden-state.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr   pooler_outputr   r   r   )r    r!   r"   r#   r   r$   r%   r&   r*   r   r   r   r   r   r'   r'   r'   r(   r)   d   s   
r)   c                   @   s   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< edd	 ZdS )
SwinMaskedImageModelingOutputa  
    Swin masked image model outputs.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
            Masked image modeling (MLM) loss.
        reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Reconstructed pixel values.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlossreconstructionr   r   r   c                 C   s   t dt | jS )Nzlogits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.)warningswarnFutureWarningr-   selfr'   r'   r(   logits   s
    z$SwinMaskedImageModelingOutput.logits)r    r!   r"   r#   r,   r   r$   r%   r&   r-   r   r   r   r   propertyr3   r'   r'   r'   r(   r+      s   
r+   c                   @   st   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< dS )SwinImageClassifierOutputa  
    Swin outputs for image classification.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr,   r3   r   r   r   )r    r!   r"   r#   r,   r   r$   r%   r&   r3   r   r   r   r   r'   r'   r'   r(   r5      s   
r5   c                 C   sR   | j \}}}}| ||| ||| ||} | dddddd d|||}|S )z2
    Partitions the given input into windows.
    r   r   r
            shapeviewpermute
contiguous)input_featurewindow_size
batch_sizeheightwidthnum_channelswindowsr'   r'   r(   window_partition   s         $rF   c                 C   sN   | j d }| d|| || |||} | dddddd d|||} | S )z?
    Merges windows to produce higher resolution features.
    r9   r   r   r
   r6   r7   r8   r:   )rE   r@   rB   rC   rD   r'   r'   r(   window_reverse   s    
$rG   c                       sH   e Zd ZdZd	 fdd	Zd
eej eej e	ej
 dddZ  ZS )SwinEmbeddingszW
    Construct the patch and position embeddings. Optionally, also the mask token.
    Fc                    s   t    t|| _| jj}| jj| _|r@tt	
dd|jnd | _|jrjtt	
d|d |j| _nd | _t|j| _t|j| _d S )Nr   )super__init__SwinPatchEmbeddingspatch_embeddingsnum_patches	grid_size
patch_gridr   	Parameterr$   zeros	embed_dim
mask_tokenZuse_absolute_embeddingsposition_embeddings	LayerNormnormDropouthidden_dropout_probdropout)r2   configuse_mask_tokenrM   	__class__r'   r(   rJ      s    


 zSwinEmbeddings.__init__N)pixel_valuesbool_masked_posreturnc           
      C   s   |  |\}}| |}| \}}}|d k	rb| j||d}|d|}	|d|	  ||	  }| jd k	rv|| j }| |}||fS )Nr9         ?)	rL   rV   sizerS   expand	unsqueezeZtype_asrT   rY   )
r2   r^   r_   
embeddingsoutput_dimensionsrA   Zseq_len_Zmask_tokensmaskr'   r'   r(   forward  s    



zSwinEmbeddings.forward)F)N)r    r!   r"   r#   rJ   r   r$   r%   
BoolTensorr   Tensorri   __classcell__r'   r'   r\   r(   rH      s     rH   c                       sL   e Zd ZdZ fddZdd Zeej e	ej
e	e f dddZ  ZS )	rK   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    c                    s   t    |j|j }}|j|j }}t|tjj	r8|n||f}t|tjj	rR|n||f}|d |d  |d |d   }|| _|| _|| _|| _
|d |d  |d |d  f| _tj||||d| _d S )Nr   r   )kernel_sizeZstride)rI   rJ   
image_size
patch_sizerD   rR   
isinstancecollectionsabcIterablerM   rN   r   Conv2d
projection)r2   rZ   rn   ro   rD   hidden_sizerM   r\   r'   r(   rJ      s    
 "zSwinPatchEmbeddings.__init__c                 C   s   || j d  dkr<d| j d || j d   f}tj||}|| j d  dkr|ddd| j d || j d   f}tj||}|S )Nr   r   )ro   r   
functionalpad)r2   r^   rB   rC   
pad_valuesr'   r'   r(   	maybe_pad/  s     zSwinPatchEmbeddings.maybe_pad)r^   r`   c                 C   sh   |j \}}}}|| jkr td| |||}| |}|j \}}}}||f}|ddd}||fS )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r6   r   )r;   rD   
ValueErrorrz   ru   flatten	transpose)r2   r^   rg   rD   rB   rC   re   rf   r'   r'   r(   ri   8  s    

zSwinPatchEmbeddings.forward)r    r!   r"   r#   rJ   rz   r   r$   r%   r   rk   intri   rl   r'   r'   r\   r(   rK     s   	rK   c                       s^   e Zd ZdZejfee eejdd fddZ	dd Z
ejeeef ejdd	d
Z  ZS )SwinPatchMerginga'  
    Patch Merging Layer.

    Args:
        input_resolution (`Tuple[int]`):
            Resolution of input feature.
        dim (`int`):
            Number of input channels.
        norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
            Normalization layer class.
    N)input_resolutiondim
norm_layerr`   c                    sB   t    || _|| _tjd| d| dd| _|d| | _d S )Nr7   r6   Fbias)rI   rJ   r   r   r   Linear	reductionrV   )r2   r   r   r   r\   r'   r(   rJ   U  s
    
zSwinPatchMerging.__init__c                 C   sF   |d dkp|d dk}|rBddd|d d|d f}t j||}|S )Nr6   r   r   )r   rw   rx   )r2   r?   rB   rC   Z
should_padry   r'   r'   r(   rz   \  s
    zSwinPatchMerging.maybe_pad)r?   input_dimensionsr`   c                 C   s   |\}}|j \}}}|||||}| |||}|d d dd ddd dd d f }|d d dd ddd dd d f }	|d d dd ddd dd d f }
|d d dd ddd dd d f }t||	|
|gd}||dd| }| |}| |}|S )Nr   r6   r   r9   r7   )r;   r<   rz   r$   catrV   r   )r2   r?   r   rB   rC   rA   r   rD   Zinput_feature_0Zinput_feature_1Zinput_feature_2Zinput_feature_3r'   r'   r(   ri   d  s    $$$$

zSwinPatchMerging.forward)r    r!   r"   r#   r   rU   r   r~   ModulerJ   rz   r$   rk   ri   rl   r'   r'   r\   r(   r   H  s   $r           F)input	drop_probtrainingr`   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    r   r   r   )r   )dtypedevice)r;   ndimr$   Zrandr   r   Zfloor_div)r   r   r   Z	keep_probr;   Zrandom_tensoroutputr'   r'   r(   	drop_path  s    
r   c                       sP   e Zd ZdZdee dd fddZejejdddZ	e
d	d
dZ  ZS )SwinDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).N)r   r`   c                    s   t    || _d S N)rI   rJ   r   )r2   r   r\   r'   r(   rJ     s    
zSwinDropPath.__init__r   r`   c                 C   s   t || j| jS r   )r   r   r   r2   r   r'   r'   r(   ri     s    zSwinDropPath.forward)r`   c                 C   s   d | jS )Nzp={})formatr   r1   r'   r'   r(   
extra_repr  s    zSwinDropPath.extra_repr)N)r    r!   r"   r#   r   floatrJ   r$   rk   ri   strr   rl   r'   r'   r\   r(   r     s   r   c                       sT   e Zd Z fddZdd Zd
ejeej eej ee	 e
ej ddd	Z  ZS )SwinSelfAttentionc                    s
  t    || dkr,td| d| d|| _t|| | _| j| j | _t|tj	j
r`|n||f| _ttd| jd  d d| jd  d  || _t| jd }t| jd }tt||gdd}t|d}|d d d d d f |d d d d d f  }	|	ddd }	|	d d d d df  | jd d 7  < |	d d d d df  | jd d 7  < |	d d d d df  d| jd  d 9  < |	d	}
| d
|
 tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _t|j| _ d S )Nr   zThe hidden size (z6) is not a multiple of the number of attention heads ()r6   r   Zij)Zindexingr9   relative_position_indexr   )!rI   rJ   r{   num_attention_headsr~   attention_head_sizeall_head_sizerp   rq   rr   rs   r@   r   rP   r$   rQ   relative_position_bias_tableZarangestackr   r|   r=   r>   sumZregister_bufferr   Zqkv_biasquerykeyvaluerW   attention_probs_dropout_probrY   )r2   rZ   r   	num_headsr@   Zcoords_hZcoords_wZcoordsZcoords_flattenZrelative_coordsr   r\   r'   r(   rJ     s8    
*,((,
zSwinSelfAttention.__init__c                 C   s6   |  d d | j| jf }||}|ddddS )Nr9   r   r6   r   r
   )rb   r   r   r<   r=   )r2   xZnew_x_shaper'   r'   r(   transpose_for_scores  s    
z&SwinSelfAttention.transpose_for_scoresNFr   attention_mask	head_maskoutput_attentionsr`   c                 C   s  |j \}}}| |}| | |}	| | |}
| |}t||	dd}|t	| j
 }| j| jd }|| jd | jd  | jd | jd  d}|ddd }||d }|d k	r|j d }||| || j||}||dd }|d| j||}tjj|dd}| |}|d k	rB|| }t||
}|dddd }| d d | jf }||}|r||fn|f}|S )Nr9   r   r   r6   r   r
   )r;   r   r   r   r   r$   matmulr}   mathsqrtr   r   r   r<   r@   r=   r>   rd   r   r   rw   ZsoftmaxrY   rb   r   )r2   r   r   r   r   rA   r   rD   Zmixed_query_layerZ	key_layerZvalue_layerZquery_layerZattention_scoresZrelative_position_biasZ
mask_shapeZattention_probsZcontext_layerZnew_context_layer_shapeoutputsr'   r'   r(   ri     sH    

  

    


zSwinSelfAttention.forward)NNF)r    r!   r"   rJ   r   r$   rk   r   r%   boolr   ri   rl   r'   r'   r\   r(   r     s   %   r   c                       s4   e Zd Z fddZejejejdddZ  ZS )SwinSelfOutputc                    s*   t    t||| _t|j| _d S r   )rI   rJ   r   r   denserW   r   rY   r2   rZ   r   r\   r'   r(   rJ     s    
zSwinSelfOutput.__init__)r   input_tensorr`   c                 C   s   |  |}| |}|S r   r   rY   )r2   r   r   r'   r'   r(   ri     s    

zSwinSelfOutput.forwardr    r!   r"   rJ   r$   rk   ri   rl   r'   r'   r\   r(   r     s   r   c                       sT   e Zd Z fddZdd Zd
ejeej eej ee	 e
ej ddd	Z  ZS )SwinAttentionc                    s2   t    t||||| _t||| _t | _d S r   )rI   rJ   r   r2   r   r   setpruned_heads)r2   rZ   r   r   r@   r\   r'   r(   rJ     s    
zSwinAttention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr   r2   r   r   r   r   r   r   r   r   r   r   union)r2   headsindexr'   r'   r(   prune_heads  s       zSwinAttention.prune_headsNFr   c                 C   s6   |  ||||}| |d |}|f|dd   }|S )Nr   r   )r2   r   )r2   r   r   r   r   Zself_outputsattention_outputr   r'   r'   r(   ri   ,  s    zSwinAttention.forward)NNF)r    r!   r"   rJ   r   r$   rk   r   r%   r   r   ri   rl   r'   r'   r\   r(   r     s      r   c                       s0   e Zd Z fddZejejdddZ  ZS )SwinIntermediatec                    sH   t    t|t|j| | _t|jt	r<t
|j | _n|j| _d S r   )rI   rJ   r   r   r~   	mlp_ratior   rp   Z
hidden_actr   r   intermediate_act_fnr   r\   r'   r(   rJ   :  s
    
zSwinIntermediate.__init__r   c                 C   s   |  |}| |}|S r   )r   r   r   r'   r'   r(   ri   B  s    

zSwinIntermediate.forwardr   r'   r'   r\   r(   r   9  s   r   c                       s0   e Zd Z fddZejejdddZ  ZS )
SwinOutputc                    s4   t    tt|j| || _t|j| _	d S r   )
rI   rJ   r   r   r~   r   r   rW   rX   rY   r   r\   r'   r(   rJ   I  s    
zSwinOutput.__init__r   c                 C   s   |  |}| |}|S r   r   r   r'   r'   r(   ri   N  s    

zSwinOutput.forwardr   r'   r'   r\   r(   r   H  s   r   c                	       st   e Zd Zd fdd	Zdd Zdd Zdd	 Zdeje	e
e
f eej ee ee e	ejejf dddZ  ZS )	SwinLayerr   c                    s   t    |j| _|| _|j| _|| _tj||jd| _	t
|||| jd| _|jdkr`t|jnt | _tj||jd| _t||| _t||| _d S )NZeps)r@   r   )rI   rJ   Zchunk_size_feed_forward
shift_sizer@   r   r   rU   layer_norm_epslayernorm_beforer   	attentiondrop_path_rater   Identityr   layernorm_afterr   intermediater   r   )r2   rZ   r   r   r   r   r\   r'   r(   rJ   U  s    
zSwinLayer.__init__c                 C   s"   t || jkrd| _t || _d S Nr   )minr@   r   )r2   r   r'   r'   r(   set_shift_and_window_sizeb  s    z#SwinLayer.set_shift_and_window_sizec              	   C   s  | j dkrtjd||df|d}td| j t| j | j  t| j  d f}td| j t| j | j  t| j  d f}d}|D ].}|D ]$}	||d d ||	d d f< |d7 }qqt|| j}
|
d| j| j }
|
d|
d }||dkt	d|dkt	d}nd }|S )Nr   r   r   r9   r6   g      Yr   )
r   r$   rQ   slicer@   rF   r<   rd   Zmasked_fillr   )r2   rB   rC   r   Zimg_maskZheight_slicesZwidth_slicescountZheight_sliceZwidth_sliceZmask_windows	attn_maskr'   r'   r(   get_attn_maskh  s*    &zSwinLayer.get_attn_maskc                 C   sR   | j || j   | j  }| j || j   | j  }ddd|d|f}tj||}||fS r   )r@   r   rw   rx   )r2   r   rB   rC   	pad_rightZ
pad_bottomry   r'   r'   r(   rz     s
    zSwinLayer.maybe_padNFr   r   r   r   always_partitionr`   c                 C   s  |s|  | n |\}}| \}}	}
|}| |}|||||
}| |||\}}|j\}	}}}	| jdkrtj|| j | j fdd}n|}t	|| j
}|d| j
| j
 |
}| j|||jd}|d k	r||j}| j||||d}|d }|d| j
| j
|
}t|| j
||}| jdkr<tj|| j| jfdd}n|}|d dkpX|d dk}|r|d d d |d |d d f  }|||| |
}|| | }| |}| |}|| | }|r||d	 fn|f}|S )
Nr   )r   r6   )ZshiftsZdimsr9   r   r   r
   r8   r   )r   rb   r   r<   rz   r;   r   r$   ZrollrF   r@   r   r   tor   r   rG   r>   r   r   r   r   )r2   r   r   r   r   r   rB   rC   rA   rg   channelsZshortcutry   Z
height_padZ	width_padZshifted_hidden_statesZhidden_states_windowsr   Zattention_outputsr   Zattention_windowsZshifted_windowsZ
was_paddedZlayer_outputlayer_outputsr'   r'   r(   ri     sN    

   $

zSwinLayer.forward)r   )NFF)r    r!   r"   rJ   r   r   rz   r$   rk   r   r~   r   r%   r   ri   rl   r'   r'   r\   r(   r   T  s      
r   c                       sT   e Zd Z fddZdejeeef eej	 ee
 ee
 eej dddZ  ZS )		SwinStagec                    sf   t     | _| _t fddt|D | _|d k	rV|tjd| _	nd | _	d| _
d S )Nc              	      s4   g | ],}t  |d  dkr"dn jd  dqS )r6   r   )rZ   r   r   r   r   )r   r@   .0irZ   r   r   r   r'   r(   
<listcomp>  s   z&SwinStage.__init__.<locals>.<listcomp>)r   r   F)rI   rJ   rZ   r   r   
ModuleListrangeblocksrU   
downsampleZpointing)r2   rZ   r   r   depthr   r   r   r\   r   r(   rJ     s    
zSwinStage.__init__NFr   c                 C   s   |\}}t | jD ]4\}}	|d k	r*|| nd }
|	|||
||}|d }q|}| jd k	r|d d |d d  }}||||f}| ||}n||||f}|||f}|r||dd  7 }|S )Nr   r   r6   )	enumerater   r   )r2   r   r   r   r   r   rB   rC   r   layer_modulelayer_head_maskr   !hidden_states_before_downsamplingZheight_downsampledZwidth_downsampledrf   Zstage_outputsr'   r'   r(   ri     s*        


zSwinStage.forward)NFF)r    r!   r"   rJ   r$   rk   r   r~   r   r%   r   ri   rl   r'   r'   r\   r(   r     s      
r   c                       sh   e Zd Z fddZd	ejeeef eej	 ee
 ee
 ee
 ee
 ee
 eeef d	ddZ  ZS )
SwinEncoderc                    sl   t    t j_ _dd td jt	 jD t
 fddtjD _d_d S )Nc                 S   s   g | ]}|  qS r'   )item)r   r   r'   r'   r(   r     s     z(SwinEncoder.__init__.<locals>.<listcomp>r   c                    s   g | ]}t  t jd |  d d |  d d |  f j|  j| t jd| t jd|d   |jd k rtnddqS )r6   r   r   N)rZ   r   r   r   r   r   r   )r   r~   rR   depthsr   r   
num_layersr   )r   Zi_layerrZ   ZdprrN   r2   r'   r(   r     s   
*F)rI   rJ   r   r   r   rZ   r$   Zlinspacer   r   r   r   r   layersgradient_checkpointing)r2   rZ   rN   r\   r   r(   rJ     s    
 
zSwinEncoder.__init__NFT)	r   r   r   r   output_hidden_states(output_hidden_states_before_downsamplingr   return_dictr`   c	                    s  |rdnd }	|rdnd }
 r dnd }|rl|j \}}}|j|f||f }|dddd}|	|f7 }	|
|f7 }
t| jD ]H\}}|d k	r|| nd }| jr| jrƇ fdd}tjj		|||||}n|||| |}|d }|d }|d }|d |d	 f}|r\|r\|j \}}}|j|f|d |d f|f }|dddd}|	|f7 }	|
|f7 }
nP|r|s|j \}}}|j|f||f }|dddd}|	|f7 }	|
|f7 }
 rv||dd  7 }qv|st
d
d ||	|fD S t||	||
dS )Nr'   r   r
   r   r6   c                    s    fdd}|S )Nc                     s    | f S r   r'   )inputs)moduler   r'   r(   custom_forward>  s    zJSwinEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr'   )r   r   r   )r   r(   create_custom_forward=  s    z2SwinEncoder.forward.<locals>.create_custom_forwardr   r9   c                 s   s   | ]}|d k	r|V  qd S r   r'   )r   vr'   r'   r(   	<genexpr>g  s      z&SwinEncoder.forward.<locals>.<genexpr>)r   r   r   r   )r;   r<   r=   r   r   r   r   r$   utils
checkpointtupler   )r2   r   r   r   r   r   r   r   r   Zall_hidden_statesZall_reshaped_hidden_statesZall_self_attentionsrA   rg   rv   Zreshaped_hidden_stater   r   r   r   r   r   rf   r'   r   r(   ri   !  sr    

        


zSwinEncoder.forward)NFFFFT)r    r!   r"   rJ   r$   rk   r   r~   r   r%   r   r   r   ri   rl   r'   r'   r\   r(   r   
  s$         

r   c                   @   s2   e Zd ZdZeZdZdZdZdd Z	ddd	Z
d
S )SwinPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    swinr^   Tc                 C   sj   t |tjtjfr@|jjjd| jjd |j	dk	rf|j	j
  n&t |tjrf|j	j
  |jjd dS )zInitialize the weightsr   )ZmeanZstdNra   )rp   r   r   rt   weightdataZnormal_rZ   Zinitializer_ranger   Zzero_rU   Zfill_)r2   r   r'   r'   r(   _init_weights|  s    
z!SwinPreTrainedModel._init_weightsFc                 C   s   t |tr||_d S r   )rp   r   r   )r2   r   r   r'   r'   r(   _set_gradient_checkpointing  s    
z/SwinPreTrainedModel._set_gradient_checkpointingN)F)r    r!   r"   r#   r   config_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingr	  r
  r'   r'   r'   r(   r  q  s   r  aG  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`SwinConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aJ  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
            for details.
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z^The bare Swin Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zd fdd	Zdd Zdd Zeeee	e
ed	ed
deej eej eej ee ee ee eee
f dddZ  ZS )	SwinModelTFc                    s   t  | || _t|j| _t|jd| jd   | _t	||d| _
t|| j
j| _tj| j|jd| _|rxtdnd | _|   d S )Nr6   r   )r[   r   )rI   rJ   rZ   r   r   r   r~   rR   num_featuresrH   re   r   rO   encoderr   rU   r   	layernormZAdaptiveAvgPool1dpooler	post_init)r2   rZ   add_pooling_layerr[   r\   r'   r(   rJ     s    zSwinModel.__init__c                 C   s   | j jS r   re   rL   r1   r'   r'   r(   get_input_embeddings  s    zSwinModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  layerr   r   )r2   Zheads_to_pruner  r   r'   r'   r(   _prune_heads  s    zSwinModel._prune_headsZvision)r  output_typer  Zmodalityexpected_outputNr^   r_   r   r   r   r   r`   c                 C   s   |dk	r|n| j j}|dk	r |n| j j}|dk	r4|n| j j}|dkrLtd| |t| j j}| j||d\}}| j	||||||d}	|	d }
| 
|
}
d}| jdk	r| |
dd}t|d}|s|
|f|	dd  }|S t|
||	j|	j|	jdS )	z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)r_   r   r   r   r   r   r   r6   )r   r*   r   r   r   )rZ   r   r   use_return_dictr{   Zget_head_maskr   r   re   r  r  r  r}   r$   r|   r)   r   r   r   )r2   r^   r_   r   r   r   r   embedding_outputr   Zencoder_outputssequence_outputpooled_outputr   r'   r'   r(   ri     s@    	

zSwinModel.forward)TF)NNNNNN)r    r!   r"   rJ   r  r  r   SWIN_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr)   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r$   r%   rj   r   r   r   ri   rl   r'   r'   r\   r(   r    s4   	      
r  aW  Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    c                       sr   e Zd Z fddZeeeeedde	e
j e	e
j e	e
j e	e e	e e	e eeef dddZ  ZS )	SwinForMaskedImageModelingc                    sn   t  | t|ddd| _t|jd|jd   }ttj	||j
d |j ddt|j
| _|   d S )NFT)r  r[   r6   r   )Zin_channelsZout_channelsrm   )rI   rJ   r  r  r~   rR   r   r   Z
Sequentialrt   Zencoder_striderD   ZPixelShuffledecoderr  )r2   rZ   r  r\   r'   r(   rJ      s      
z#SwinForMaskedImageModeling.__init__)r  r  Nr  c                 C   s@  |dk	r|n| j j}| j||||||d}|d }|dd}|j\}	}
}t|d  }}||	|
||}| |}d}|dk	r| j j	| j j
 }|d||}|| j j
d| j j
dd }tjj||dd	}||  | d
  | j j }|s(|f|dd  }|dk	r$|f| S |S t|||j|j|jdS )aI  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Returns:

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, SwinForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-base-simmim-window6-192")
        >>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-base-simmim-window6-192")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 192, 192]
        ```N)r_   r   r   r   r   r   r   r6   g      ?r9   none)r   gh㈵>)r,   r-   r   r   r   )rZ   r  r  r}   r;   r   floorZreshaper%  rn   ro   Zrepeat_interleaverd   r>   r   rw   Zl1_lossr   rD   r+   r   r   r   )r2   r^   r_   r   r   r   r   r   r  rA   rD   Zsequence_lengthrB   rC   Zreconstructed_pixel_valuesZmasked_im_lossrb   rh   Zreconstruction_lossr   r'   r'   r(   ri   0  sL    (	
  z"SwinForMaskedImageModeling.forward)NNNNNN)r    r!   r"   rJ   r   r   r   r+   r"  r   r$   r%   rj   r   r   r   ri   rl   r'   r'   r\   r(   r$    s$   
      
r$  z
    Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.
    c                       sv   e Zd Z fddZeeeeee	e
ddeej eej eej ee ee ee eeef dddZ  ZS )	SwinForImageClassificationc                    sP   t  | |j| _t|| _|jdkr:t| jj|jnt | _	| 
  d S r   )rI   rJ   
num_labelsr  r  r   r   r  r   
classifierr  )r2   rZ   r\   r'   r(   rJ     s    
"z#SwinForImageClassification.__init__)r  r  r  r  N)r^   r   labelsr   r   r   r`   c                 C   sn  |dk	r|n| j j}| j|||||d}|d }| |}	d}
|dk	r&| j jdkr| jdkrhd| j _n4| jdkr|jtjks|jtj	krd| j _nd| j _| j jdkrt
 }| jdkr||	 | }
n
||	|}
nN| j jdkrt }||	d| j|d}
n| j jdkr&t }||	|}
|sV|	f|dd  }|
dk	rR|
f| S |S t|
|	|j|j|jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr  r   Z
regressionZsingle_label_classificationZmulti_label_classificationr9   r6   )r,   r3   r   r   r   )rZ   r  r  r*  Zproblem_typer)  r   r$   longr~   r	   Zsqueezer   r<   r   r5   r   r   r   )r2   r^   r   r+  r   r   r   r   r  r3   r,   Zloss_fctr   r'   r'   r(   ri     sN    



"


z"SwinForImageClassification.forward)NNNNNN)r    r!   r"   rJ   r   r   r   _IMAGE_CLASS_CHECKPOINTr5   r"  _IMAGE_CLASS_EXPECTED_OUTPUTr   r$   r%   Z
LongTensorr   r   r   ri   rl   r'   r'   r\   r(   r(    s.         
r(  zM
    Swin backbone, to be used with frameworks like DETR and MaskFormer.
    c                       sP   e Zd Zed fddZdd Zd
ejee	 ee	 ee	 e
ddd	Z  ZS )SwinBackbonerZ   c                    s   t    t     jg fddtt jD  | _t | _	t
 | j	j| _i }t| j| jD ]\}}t|||< qjt|| _|   d S )Nc                    s   g | ]}t  jd |  qS )r6   )r~   rR   r   r0  r'   r(   r     s     z)SwinBackbone.__init__.<locals>.<listcomp>)rI   rJ   Z_init_backbonerR   r   r   r   r  rH   re   r   rO   r  zipZ_out_featuresr   r   rU   Z
ModuleDicthidden_states_normsr  )r2   rZ   r2  stagerD   r\   r0  r(   rJ     s    &
zSwinBackbone.__init__c                 C   s   | j jS r   r  r1   r'   r'   r(   r    s    z!SwinBackbone.get_input_embeddingsN)r^   r   r   r   r`   c              
   C   s<  |dk	r|n| j j}|dk	r |n| j j}|dk	r4|n| j j}| |\}}| j||d|ddddd}|j}d}	t| j|D ]\}
}|
| j	krz|j
\}}}}|dddd }|||| |}| j|
 |}|||||}|dddd }|	|f7 }	qz|s |	f}|r||jf7 }|S t|	|r0|jnd|jd	S )
aK  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, AutoBackbone
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
        >>> model = AutoBackbone.from_pretrained(
        ...     "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"]
        ... )

        >>> inputs = processor(image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 768, 7, 7]
        ```NT)r   r   r   r   r   r   r'   r   r6   r
   r   )feature_mapsr   r   )rZ   r  r   r   re   r  r   r1  Zstage_namesZout_featuresr;   r=   r>   r<   r2  r   r   r   )r2   r^   r   r   r   r  r   r   r   r4  r3  Zhidden_staterA   rD   rB   rC   r   r'   r'   r(   ri      sH     
zSwinBackbone.forward)NNN)r    r!   r"   r   rJ   r  r$   rk   r   r   r   ri   rl   r'   r'   r\   r(   r/    s      r/  )r   F)Nr#   collections.abcrq   r   r.   dataclassesr   typingr   r   r   r$   Ztorch.utils.checkpointr   Ztorch.nnr   r   r	   Zactivationsr   Zmodeling_outputsr   Zmodeling_utilsr   Zpytorch_utilsr   r   r   r  r   r   r   r   r   r   Zutils.backbone_utilsr   Zconfiguration_swinr   Z
get_loggerr    loggerr"  r!  r#  r-  r.  Z"SWIN_PRETRAINED_MODEL_ARCHIVE_LISTr   r)   r+   r5   rF   rG   r   rH   rK   r   rk   r   r   r   r   r   r   r   r   r   r   r   r   r  ZSWIN_START_DOCSTRINGr   r  r$  r(  r/  r'   r'   r'   r(   <module>   s    

 #,#
*/7d&{;ga	hW