U
    9%ea                     @   s  d Z ddlZddlZddlZddlmZ ddlmZm	Z	m
Z
 ddlZddlZddlmZ ddlmZmZmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZmZmZmZm Z  ddl!m"Z" e#e$Z%dZ&dZ'dddgZ(dZ)dZ*dgZ+eG dd deZ,eG dd deZ-eG dd deZ.eG dd deZ/dd Z0dd Z1dIej2e3e4ej2d!d"d#Z5G d$d% d%ej6Z7G d&d' d'ej6Z8G d(d) d)ej6Z9G d*d+ d+ej6Z:G d,d- d-ej6Z;G d.d/ d/ej6Z<G d0d1 d1ej6Z=G d2d3 d3ej6Z>G d4d5 d5ej6Z?G d6d7 d7ej6Z@G d8d9 d9ej6ZAG d:d; d;ej6ZBG d<d= d=eZCd>ZDd?ZEed@eDG dAdB dBeCZFedCeDG dDdE dEeCZGedFeDG dGdH dHeCZHdS )Jz" PyTorch Swinv2 Transformer model.    N)	dataclass)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)PreTrainedModel) find_pruneable_heads_and_indicesmeshgridprune_linear_layer)ModelOutputadd_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )Swinv2Configr   z(microsoft/swinv2-tiny-patch4-window8-256@   i   zEgyptian catc                   @   sb   e Zd ZU dZdZejed< dZe	e
ej  ed< dZe	e
ej  ed< dZe	e
ej  ed< dS )Swinv2EncoderOutputa  
    Swinv2 encoder's outputs, with potential hidden states and attentions.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlast_hidden_statehidden_states
attentionsreshaped_hidden_states)__name__
__module____qualname____doc__r   torchFloatTensor__annotations__r   r   r   r   r    r%   r%   i/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/swinv2/modeling_swinv2.pyr   B   s
   
r   c                   @   st   e Zd ZU dZdZejed< dZe	ej ed< dZ
e	eej  ed< dZe	eej  ed< dZe	eej  ed< dS )Swinv2ModelOutputaV  
    Swinv2 model's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
            Average pooling of the last layer hidden-state.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr   pooler_outputr   r   r   )r   r   r    r!   r   r"   r#   r$   r(   r   r   r   r   r   r%   r%   r%   r&   r'   d   s   
r'   c                   @   s   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< edd	 ZdS )
Swinv2MaskedImageModelingOutputa  
    Swinv2 masked image model outputs.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
            Masked image modeling (MLM) loss.
        reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Reconstructed pixel values.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlossreconstructionr   r   r   c                 C   s   t dt | jS )Nzlogits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.)warningswarnFutureWarningr+   selfr%   r%   r&   logits   s
    z&Swinv2MaskedImageModelingOutput.logits)r   r   r    r!   r*   r   r"   r#   r$   r+   r   r   r   r   propertyr1   r%   r%   r%   r&   r)      s   
r)   c                   @   st   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< dS )Swinv2ImageClassifierOutputa  
    Swinv2 outputs for image classification.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr*   r1   r   r   r   )r   r   r    r!   r*   r   r"   r#   r$   r1   r   r   r   r   r%   r%   r%   r&   r3      s   
r3   c                 C   sR   | j \}}}}| ||| ||| ||} | dddddd d|||}|S )z2
    Partitions the given input into windows.
    r   r   r
            shapeviewpermute
contiguous)input_featurewindow_size
batch_sizeheightwidthnum_channelswindowsr%   r%   r&   window_partition   s         $rD   c                 C   sN   | j d }| d|| || |||} | dddddd d|||} | S )z?
    Merges windows to produce higher resolution features.
    r7   r   r   r
   r4   r5   r6   r8   )rC   r>   r@   rA   rB   r%   r%   r&   window_reverse   s    
$rE           F)input	drop_probtrainingreturnc                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    rF   r   r   )r   )dtypedevice)r9   ndimr"   ZrandrK   rL   Zfloor_div)rG   rH   rI   Z	keep_probr9   Zrandom_tensoroutputr%   r%   r&   	drop_path   s    
rP   c                       sP   e Zd ZdZdee dd fddZejejdddZ	e
d	d
dZ  ZS )Swinv2DropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).N)rH   rJ   c                    s   t    || _d S N)super__init__rH   )r0   rH   	__class__r%   r&   rT     s    
zSwinv2DropPath.__init__r   rJ   c                 C   s   t || j| jS rR   )rP   rH   rI   r0   r   r%   r%   r&   forward  s    zSwinv2DropPath.forward)rJ   c                 C   s   d | jS )Nzp={})formatrH   r/   r%   r%   r&   
extra_repr  s    zSwinv2DropPath.extra_repr)N)r   r   r    r!   r   floatrT   r"   TensorrY   strr[   __classcell__r%   r%   rU   r&   rQ   
  s   rQ   c                       sH   e Zd ZdZd	 fdd	Zd
eej eej e	ej
 dddZ  ZS )Swinv2EmbeddingszW
    Construct the patch and position embeddings. Optionally, also the mask token.
    Fc                    s   t    t|| _| jj}| jj| _|r@tt	
dd|jnd | _|jrjtt	
d|d |j| _nd | _t|j| _t|j| _d S )Nr   )rS   rT   Swinv2PatchEmbeddingspatch_embeddingsnum_patches	grid_size
patch_gridr   	Parameterr"   zeros	embed_dim
mask_tokenZuse_absolute_embeddingsposition_embeddings	LayerNormnormDropouthidden_dropout_probdropout)r0   configuse_mask_tokenrc   rU   r%   r&   rT     s    


 zSwinv2Embeddings.__init__N)pixel_valuesbool_masked_posrJ   c           
      C   s   |  |\}}| |}| \}}}|d k	rb| j||d}|d|}	|d|	  ||	  }| jd k	rv|| j }| |}||fS )Nr7         ?)	rb   rl   sizeri   expand	unsqueezeZtype_asrj   ro   )
r0   rr   rs   
embeddingsoutput_dimensionsr?   Zseq_len_Zmask_tokensmaskr%   r%   r&   rY   .  s    



zSwinv2Embeddings.forward)F)N)r   r   r    r!   rT   r   r"   r#   
BoolTensorr   r]   rY   r_   r%   r%   rU   r&   r`     s     r`   c                       sL   e Zd ZdZ fddZdd Zeej e	ej
e	e f dddZ  ZS )	ra   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    c                    s   t    |j|j }}|j|j }}t|tjj	r8|n||f}t|tjj	rR|n||f}|d |d  |d |d   }|| _|| _|| _|| _
|d |d  |d |d  f| _tj||||d| _d S )Nr   r   )kernel_sizeZstride)rS   rT   
image_size
patch_sizerB   rh   
isinstancecollectionsabcIterablerc   rd   r   Conv2d
projection)r0   rp   r~   r   rB   hidden_sizerc   rU   r%   r&   rT   K  s    
 "zSwinv2PatchEmbeddings.__init__c                 C   s   || j d  dkr<d| j d || j d   f}tj||}|| j d  dkr|ddd| j d || j d   f}tj||}|S )Nr   r   )r   r   
functionalpad)r0   rr   r@   rA   
pad_valuesr%   r%   r&   	maybe_padZ  s     zSwinv2PatchEmbeddings.maybe_pad)rr   rJ   c                 C   sh   |j \}}}}|| jkr td| |||}| |}|j \}}}}||f}|ddd}||fS )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r4   r   )r9   rB   
ValueErrorr   r   flatten	transpose)r0   rr   rz   rB   r@   rA   rx   ry   r%   r%   r&   rY   c  s    

zSwinv2PatchEmbeddings.forward)r   r   r    r!   rT   r   r   r"   r#   r   r]   intrY   r_   r%   r%   rU   r&   ra   D  s   	ra   c                       s^   e Zd ZdZejfee eejdd fddZ	dd Z
ejeeef ejdd	d
Z  ZS )Swinv2PatchMerginga'  
    Patch Merging Layer.

    Args:
        input_resolution (`Tuple[int]`):
            Resolution of input feature.
        dim (`int`):
            Number of input channels.
        norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
            Normalization layer class.
    N)input_resolutiondim
norm_layerrJ   c                    sB   t    || _|| _tjd| d| dd| _|d| | _d S )Nr5   r4   Fbias)rS   rT   r   r   r   Linear	reductionrl   )r0   r   r   r   rU   r%   r&   rT     s
    
zSwinv2PatchMerging.__init__c                 C   sF   |d dkp|d dk}|rBddd|d d|d f}t j||}|S )Nr4   r   r   )r   r   r   )r0   r=   r@   rA   Z
should_padr   r%   r%   r&   r     s
    zSwinv2PatchMerging.maybe_pad)r=   input_dimensionsrJ   c                 C   s   |\}}|j \}}}|||||}| |||}|d d dd ddd dd d f }|d d dd ddd dd d f }	|d d dd ddd dd d f }
|d d dd ddd dd d f }t||	|
|gd}||dd| }| |}| |}|S )Nr   r4   r   r7   r5   )r9   r:   r   r"   catr   rl   )r0   r=   r   r@   rA   r?   r   rB   Zinput_feature_0Zinput_feature_1Zinput_feature_2Zinput_feature_3r%   r%   r&   rY     s    $$$$

zSwinv2PatchMerging.forward)r   r   r    r!   r   rk   r   r   ModulerT   r   r"   r]   rY   r_   r%   r%   rU   r&   r   s  s   $r   c                       s\   e Zd Zddgf fdd	Zdd Zdejeej eej ee	 e
ej dd	d
Z  ZS )Swinv2SelfAttentionr   c              
      s  t    || dkr,td| d| d|| _t|| | _| j| j | _t|tj	j
r`|n||f| _|| _ttdt|ddf | _ttjddd	d
tjd	dtjd|dd
| _tj| jd d  | jd tjd}tj| jd d  | jd tjd}tt||gddddd d}|d dkr|d d d d d d df  |d d   < |d d d d d d df  |d d   < n\|d d d d d d df  | jd d   < |d d d d d d df  | jd d   < |d9 }t|tt |d  t!d }| j"d|dd t| jd }	t| jd }
tt|	|
gdd}t#|d}|d d d d d f |d d d d d f  }|ddd }|d d d d df  | jd d 7  < |d d d d df  | jd d 7  < |d d d d df  d| jd  d 9  < |$d}| j"d|dd tj| j| j|j%d
| _&tj| j| jdd
| _'tj| j| j|j%d
| _(t)|j*| _+d S )Nr   zThe hidden size (z6) is not a multiple of the number of attention heads ()
   r   r4   i   Tr   )ZinplaceFrK   Zij)Zindexing   rt   relative_coords_table)
persistentr7   relative_position_index),rS   rT   r   num_attention_headsr   attention_head_sizeall_head_sizer   r   r   r   r>   pretrained_window_sizer   rf   r"   logZoneslogit_scale
Sequentialr   ZReLUcontinuous_position_bias_mlpZarangeZfloat32stackr   r;   r<   rw   signlog2absmathZregister_bufferr   sumZqkv_biasquerykeyvaluerm   attention_probs_dropout_probro   )r0   rp   r   	num_headsr>   r   Zrelative_coords_hZrelative_coords_wr   Zcoords_hZcoords_wZcoordsZcoords_flattenZrelative_coordsr   rU   r%   r&   rT     sf    
" 
 $$  ,...&,((,
zSwinv2SelfAttention.__init__c                 C   s6   |  d d | j| jf }||}|ddddS )Nr7   r   r4   r   r
   )ru   r   r   r:   r;   )r0   xZnew_x_shaper%   r%   r&   transpose_for_scores  s    
z(Swinv2SelfAttention.transpose_for_scoresNFr   attention_mask	head_maskoutput_attentionsrJ   c                 C   s  |j \}}}| |}| | |}	| | |}
| |}tjj|ddtjj|	dddd }t	j
| jtdd }|| }| | jd| j}|| jd | jd | jd  | jd | jd  d}|ddd }d	t	| }||d }|d k	rj|j d }||| || j|||dd }||dd }|d| j||}tjj|dd}| |}|d k	r|| }t	||
}|dddd
 }| d d | jf }||}|r||fn|f}|S )Nr7   r   g      Y@)maxr   r   r4      r
   )r9   r   r   r   r   r   r   	normalizer   r"   clampr   r   r   expr   r   r:   r   r   r>   r;   r<   Zsigmoidrw   Zsoftmaxro   matmulru   r   )r0   r   r   r   r   r?   r   rB   Zmixed_query_layerZ	key_layerZvalue_layerZquery_layerZattention_scoresr   Zrelative_position_bias_tableZrelative_position_biasZ
mask_shapeZattention_probsZcontext_layerZnew_context_layer_shapeoutputsr%   r%   r&   rY     sb    

     

    


zSwinv2SelfAttention.forward)NNF)r   r   r    rT   r   r"   r]   r   r#   boolr   rY   r_   r%   r%   rU   r&   r     s   ;   r   c                       s4   e Zd Z fddZejejejdddZ  ZS )Swinv2SelfOutputc                    s*   t    t||| _t|j| _d S rR   )rS   rT   r   r   denserm   r   ro   r0   rp   r   rU   r%   r&   rT   *  s    
zSwinv2SelfOutput.__init__)r   input_tensorrJ   c                 C   s   |  |}| |}|S rR   r   ro   )r0   r   r   r%   r%   r&   rY   /  s    

zSwinv2SelfOutput.forwardr   r   r    rT   r"   r]   rY   r_   r%   r%   rU   r&   r   )  s   r   c                       sV   e Zd Zd fdd	Zdd Zdejeej eej ee	 e
ej dd	d
Z  ZS )Swinv2Attentionr   c                    sL   t    t||||t|tjjr&|n||fd| _t||| _	t
 | _d S )Nrp   r   r   r>   r   )rS   rT   r   r   r   r   r   r0   r   rO   setpruned_heads)r0   rp   r   r   r>   r   rU   r%   r&   rT   7  s    
	zSwinv2Attention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr   r0   r   r   r   r   r   r   r   rO   r   r   union)r0   headsindexr%   r%   r&   prune_headsE  s       zSwinv2Attention.prune_headsNFr   c                 C   s6   |  ||||}| |d |}|f|dd   }|S )Nr   r   )r0   rO   )r0   r   r   r   r   Zself_outputsattention_outputr   r%   r%   r&   rY   W  s    zSwinv2Attention.forward)r   )NNF)r   r   r    rT   r   r"   r]   r   r#   r   r   rY   r_   r%   r%   rU   r&   r   6  s      r   c                       s0   e Zd Z fddZejejdddZ  ZS )Swinv2Intermediatec                    sH   t    t|t|j| | _t|jt	r<t
|j | _n|j| _d S rR   )rS   rT   r   r   r   	mlp_ratior   r   Z
hidden_actr^   r   intermediate_act_fnr   rU   r%   r&   rT   f  s
    
zSwinv2Intermediate.__init__rW   c                 C   s   |  |}| |}|S rR   )r   r   rX   r%   r%   r&   rY   n  s    

zSwinv2Intermediate.forwardr   r%   r%   rU   r&   r   e  s   r   c                       s0   e Zd Z fddZejejdddZ  ZS )Swinv2Outputc                    s4   t    tt|j| || _t|j| _	d S rR   )
rS   rT   r   r   r   r   r   rm   rn   ro   r   rU   r%   r&   rT   v  s    
zSwinv2Output.__init__rW   c                 C   s   |  |}| |}|S rR   r   rX   r%   r%   r&   rY   {  s    

zSwinv2Output.forwardr   r%   r%   rU   r&   r   u  s   r   c                	       st   e Zd Zd fdd	Zdd Zdd Zdd	 Zdeje	e
e
f eej ee ee e	ejejf dddZ  ZS )Swinv2Layerr   c                    s   t    |j| _|| _|j| _|| _| | t|||| jt|t	j
jrN|n||fd| _tj||jd| _|jdkrt|jnt | _t||| _t||| _tj||jd| _d S )Nr   ZepsrF   )rS   rT   Zchunk_size_feed_forward
shift_sizer>   r   set_shift_and_window_sizer   r   r   r   r   	attentionr   rk   layer_norm_epslayernorm_beforedrop_path_raterQ   IdentityrP   r   intermediater   rO   layernorm_after)r0   rp   r   r   r   r   r   rU   r%   r&   rT     s(    

	zSwinv2Layer.__init__c                 C   s   t | jtjjr| jn
| j| jf}t | jtjjr8| jn
| j| jf}t|d r^|d  n|d }||d krv|n|d | _|t | jtjjr| jn
| j| jfkrdn|d | _d S Nr   )	r   r>   r   r   r   r   r"   Z	is_tensoritem)r0   r   Ztarget_window_sizeZtarget_shift_sizeZ
window_dimr%   r%   r&   r     s$    

"
z%Swinv2Layer.set_shift_and_window_sizec              	   C   s  | j dkrtjd||df|d}td| j t| j | j  t| j  d f}td| j t| j | j  t| j  d f}d}|D ].}|D ]$}	||d d ||	d d f< |d7 }qqt|| j}
|
d| j| j }
|
d|
d }||dkt	d|dkt	d}nd }|S )Nr   r   r   r7   r4   g      YrF   )
r   r"   rg   slicer>   rD   r:   rw   Zmasked_fillr\   )r0   r@   rA   rK   Zimg_maskZheight_slicesZwidth_slicescountZheight_sliceZwidth_sliceZmask_windows	attn_maskr%   r%   r&   get_attn_mask  s*    &zSwinv2Layer.get_attn_maskc                 C   sR   | j || j   | j  }| j || j   | j  }ddd|d|f}tj||}||fS r   )r>   r   r   r   )r0   r   r@   rA   	pad_rightZ
pad_bottomr   r%   r%   r&   r     s
    zSwinv2Layer.maybe_padNFr   r   r   r   always_partitionrJ   c                 C   s  |s|  | n |\}}| \}}	}
|}|||||
}| |||\}}|j\}	}}}	| jdkrtj|| j | j fdd}n|}t|| j	}|d| j	| j	 |
}| j
|||jd}|d k	r||j}| j||||d}|d }|d| j	| j	|
}t|| j	||}| jdkr2tj|| j| jfdd}n|}|d dkpN|d dk}|rz|d d d |d |d d f  }|||| |
}| |}|| | }| |}| |}|| | | }|r||d	 fn|f}|S )
Nr   )r   r4   )ZshiftsZdimsr7   r   r   r
   r6   r   )r   ru   r:   r   r9   r   r"   ZrollrD   r>   r   rK   torL   r   rE   r<   r   rP   r   rO   r   )r0   r   r   r   r   r   r@   rA   r?   rz   ZchannelsZshortcutr   Z
height_padZ	width_padZshifted_hidden_statesZhidden_states_windowsr   Zattention_outputsr   Zattention_windowsZshifted_windowsZ
was_paddedZlayer_outputlayer_outputsr%   r%   r&   rY     sN    
   $


zSwinv2Layer.forward)r   r   )NFF)r   r   r    rT   r   r   r   r"   r]   r   r   r   r#   r   rY   r_   r%   r%   rU   r&   r     s      
r   c                       sV   e Zd Zd	 fdd	Zd
ejeeef eej	 ee
 ee
 eej dddZ  ZS )Swinv2Stager   c	           	         sh   t     | _| _t fddt|D | _|d k	rX|tjd| _	nd | _	d| _
d S )Nc              
      s6   g | ].}t  |d  dkr"dn jd  dqS )r4   r   )rp   r   r   r   r   r   )r   r>   ).0irp   r   r   r   r   r%   r&   
<listcomp>  s   	z(Swinv2Stage.__init__.<locals>.<listcomp>)r   r   F)rS   rT   rp   r   r   
ModuleListrangeblocksrk   
downsampleZpointing)	r0   rp   r   r   depthr   rP   r   r   rU   r   r&   rT     s    
	zSwinv2Stage.__init__NFr   c                 C   s   |\}}t | jD ]4\}}	|d k	r*|| nd }
|	|||
||}|d }q|}| jd k	r|d d |d d  }}||||f}| ||}n||||f}|||f}|r||dd  7 }|S )Nr   r   r4   )	enumerater   r   )r0   r   r   r   r   r   r@   rA   r   layer_modulelayer_head_maskr   !hidden_states_before_downsamplingZheight_downsampledZwidth_downsampledry   Zstage_outputsr%   r%   r&   rY   1  s*        


zSwinv2Stage.forward)r   )NFF)r   r   r    rT   r"   r]   r   r   r   r#   r   rY   r_   r%   r%   rU   r&   r     s    !   
r   c                       sj   e Zd Zd
 fdd	Zdejeeef eej	 ee
 ee
 ee
 ee
 ee
 eeef d	dd	Z  ZS )Swinv2Encoderr   r   r   r   c                    s   t    t j_ _jjd k	r. jdd td j	t
 jD t fddtjD _d_d S )Nc                 S   s   g | ]}|  qS r%   )r   )r   r   r%   r%   r&   r   Y  s     z*Swinv2Encoder.__init__.<locals>.<listcomp>r   c                    s   g | ]}t  t jd |  d d |  d d |  f j|  j| t jd| t jd|d   |jd k rtnd| dqS )r4   r   r   N)rp   r   r   r   r   rP   r   r   )r   r   rh   depthsr   r   
num_layersr   )r   Zi_layerrp   Zdprrd   pretrained_window_sizesr0   r%   r&   r   [  s   *F)rS   rT   r   r   r  rp   r  r"   Zlinspacer   r   r   r   r   layersgradient_checkpointing)r0   rp   rd   r  rU   r  r&   rT   S  s    
 zSwinv2Encoder.__init__NFT)	r   r   r   r   output_hidden_states(output_hidden_states_before_downsamplingr   return_dictrJ   c	                    s  |rdnd }	|rdnd }
 r dnd }|rl|j \}}}|j|f||f }|dddd}|	|f7 }	|
|f7 }
t| jD ]H\}}|d k	r|| nd }| jr| jrƇ fdd}tjj		|||||}n|||| |}|d }|d }|d }|d |d	 f}|r\|r\|j \}}}|j|f|d |d f|f }|dddd}|	|f7 }	|
|f7 }
nP|r|s|j \}}}|j|f||f }|dddd}|	|f7 }	|
|f7 }
 rv||dd  7 }qv|st
d
d ||	|fD S t||	||
dS )Nr%   r   r
   r   r4   c                    s    fdd}|S )Nc                     s    | f S rR   r%   )inputs)moduler   r%   r&   custom_forward  s    zLSwinv2Encoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr%   )r
  r  r   )r
  r&   create_custom_forward  s    z4Swinv2Encoder.forward.<locals>.create_custom_forwardr   r7   c                 s   s   | ]}|d k	r|V  qd S rR   r%   )r   vr%   r%   r&   	<genexpr>  s      z(Swinv2Encoder.forward.<locals>.<genexpr>)r   r   r   r   )r9   r:   r;   r   r  r  rI   r"   utils
checkpointtupler   )r0   r   r   r   r   r  r  r   r  Zall_hidden_statesZall_reshaped_hidden_statesZall_self_attentionsr?   rz   r   Zreshaped_hidden_stater   r   r   r  r   r   ry   r%   r   r&   rY   m  sr    

        


zSwinv2Encoder.forward)r   )NFFFFT)r   r   r    rT   r"   r]   r   r   r   r#   r   r   r   rY   r_   r%   r%   rU   r&   r   R  s$         

r   c                   @   s2   e Zd ZdZeZdZdZdZdd Z	ddd	Z
d
S )Swinv2PreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    swinv2rr   Tc                 C   sj   t |tjtjfr@|jjjd| jjd |j	dk	rf|j	j
  n&t |tjrf|j	j
  |jjd dS )zInitialize the weightsrF   )ZmeanZstdNrt   )r   r   r   r   weightdataZnormal_rp   Zinitializer_ranger   Zzero_rk   Zfill_)r0   r
  r%   r%   r&   _init_weights  s    
z#Swinv2PreTrainedModel._init_weightsFc                 C   s   t |tr||_d S rR   )r   r   r  )r0   r
  r   r%   r%   r&   _set_gradient_checkpointing  s    
z1Swinv2PreTrainedModel._set_gradient_checkpointingN)F)r   r   r    r!   r   config_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingr  r  r%   r%   r%   r&   r    s   r  aI  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`Swinv2Config`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aJ  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
            for details.
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z`The bare Swinv2 Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zd fdd	Zdd Zdd Zeeee	e
ed	ed
deej eej eej ee ee ee eee
f dddZ  ZS )Swinv2ModelTFc                    s   t  | || _t|j| _t|jd| jd   | _t	||d| _
t|| j
j| _tj| j|jd| _|rxtdnd | _|   d S )Nr4   r   )rq   r   )rS   rT   rp   r   r   r  r   rh   num_featuresr`   rx   r   re   encoderr   rk   r   	layernormZAdaptiveAvgPool1dpooler	post_init)r0   rp   add_pooling_layerrq   rU   r%   r&   rT     s    zSwinv2Model.__init__c                 C   s   | j jS rR   )rx   rb   r/   r%   r%   r&   get_input_embeddings  s    z Swinv2Model.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  layerr   r   )r0   Zheads_to_pruner"  r   r%   r%   r&   _prune_heads  s    zSwinv2Model._prune_headsZvision)r  output_typer  Zmodalityexpected_outputNrr   rs   r   r   r  r  rJ   c                 C   s   |dk	r|n| j j}|dk	r |n| j j}|dk	r4|n| j j}|dkrLtd| |t| j j}| j||d\}}| j	||||||d}	|	d }
| 
|
}
d}| jdk	r| |
dd}t|d}|s|
|f|	dd  }|S t|
||	j|	j|	jdS )	z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)rs   r   r   r  r  r   r   r4   )r   r(   r   r   r   )rp   r   r  use_return_dictr   Zget_head_maskr   r   rx   r  r  r  r   r"   r   r'   r   r   r   )r0   rr   rs   r   r   r  r  Zembedding_outputr   Zencoder_outputssequence_outputpooled_outputrO   r%   r%   r&   rY     s@    	

zSwinv2Model.forward)TF)NNNNNN)r   r   r    rT   r   r#  r   SWINV2_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr'   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r"   r#   r|   r   r   r   rY   r_   r%   r%   rU   r&   r    s4   	      
r  aY  Swinv2 Model with a decoder on top for masked image modeling, as proposed in
[SimMIM](https://arxiv.org/abs/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    c                       sr   e Zd Z fddZeeeeedde	e
j e	e
j e	e
j e	e e	e e	e eeef dddZ  ZS )	Swinv2ForMaskedImageModelingc                    sn   t  | t|ddd| _t|jd|jd   }ttj	||j
d |j ddt|j
| _|   d S )NFT)r  rq   r4   r   )Zin_channelsZout_channelsr}   )rS   rT   r  r  r   rh   r  r   r   r   Zencoder_striderB   ZPixelShuffledecoderr  )r0   rp   r  rU   r%   r&   rT   p  s      
z%Swinv2ForMaskedImageModeling.__init__)r$  r  Nr&  c                 C   s@  |dk	r|n| j j}| j||||||d}|d }|dd}|j\}	}
}t|d  }}||	|
||}| |}d}|dk	r| j j	| j j
 }|d||}|| j j
d| j j
dd }tjj||dd	}||  | d
  | j j }|s(|f|dd  }|dk	r$|f| S |S t|||j|j|jdS )aQ  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Returns:

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
        >>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 256, 256]
        ```N)rs   r   r   r  r  r   r   r4   g      ?r7   none)r   gh㈵>)r*   r+   r   r   r   )rp   r(  r  r   r9   r   floorZreshaper0  r~   r   Zrepeat_interleaverw   r<   r   r   Zl1_lossr   rB   r)   r   r   r   )r0   rr   rs   r   r   r  r  r   r)  r?   rB   Zsequence_lengthr@   rA   Zreconstructed_pixel_valuesZmasked_im_lossru   r{   Zreconstruction_lossrO   r%   r%   r&   rY     sL    (	
  z$Swinv2ForMaskedImageModeling.forward)NNNNNN)r   r   r    rT   r   r+  r   r)   r-  r   r"   r#   r|   r   r   r   rY   r_   r%   r%   rU   r&   r/  a  s$   
      
r/  z
    Swinv2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
    of the [CLS] token) e.g. for ImageNet.
    c                       sv   e Zd Z fddZeeeeee	e
ddeej eej eej ee ee ee eeef dddZ  ZS )	Swinv2ForImageClassificationc                    sP   t  | |j| _t|| _|jdkr:t| jj|jnt | _	| 
  d S r   )rS   rT   
num_labelsr  r  r   r   r  r   
classifierr  )r0   rp   rU   r%   r&   rT     s    
"z%Swinv2ForImageClassification.__init__)r  r$  r  r%  N)rr   r   labelsr   r  r  rJ   c                 C   sn  |dk	r|n| j j}| j|||||d}|d }| |}	d}
|dk	r&| j jdkr| jdkrhd| j _n4| jdkr|jtjks|jtj	krd| j _nd| j _| j jdkrt
 }| jdkr||	 | }
n
||	|}
nN| j jdkrt }||	d| j|d}
n| j jdkr&t }||	|}
|sV|	f|dd  }|
dk	rR|
f| S |S t|
|	|j|j|jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr'  r   Z
regressionZsingle_label_classificationZmulti_label_classificationr7   r4   )r*   r1   r   r   r   )rp   r(  r  r5  Zproblem_typer4  rK   r"   longr   r	   Zsqueezer   r:   r   r3   r   r   r   )r0   rr   r   r6  r   r  r  r   r*  r1   r*   Zloss_fctrO   r%   r%   r&   rY     sN    



"


z$Swinv2ForImageClassification.forward)NNNNNN)r   r   r    rT   r   r+  r   _IMAGE_CLASS_CHECKPOINTr3   r-  _IMAGE_CLASS_EXPECTED_OUTPUTr   r"   r#   Z
LongTensorr   r   r   rY   r_   r%   r%   rU   r&   r3    s.   	      
r3  )rF   F)Ir!   collections.abcr   r   r,   dataclassesr   typingr   r   r   r"   Ztorch.utils.checkpointr   Ztorch.nnr   r   r	   Zactivationsr   Zmodeling_utilsr   Zpytorch_utilsr   r   r   r  r   r   r   r   r   r   Zconfiguration_swinv2r   Z
get_loggerr   loggerr-  r,  r.  r8  r9  Z$SWINV2_PRETRAINED_MODEL_ARCHIVE_LISTr   r'   r)   r3   rD   rE   r]   r\   r   rP   r   rQ   r`   ra   r   r   r   r   r   r   r   r   r   r  ZSWINV2_START_DOCSTRINGr+  r  r/  r3  r%   r%   r%   r&   <module>   s    

	 #,$+/6 / ?la
h