U
    (dw1                     @   s   d dl Z d dlmZmZmZ d dlZd dlmZ ddlmZ ej	j
jZG dd dej	jZG dd	 d	ej	jZG d
d deZG dd deZG dd dej	jZG dd dej	jZG dd dej	jZdS )    N)CallableListOptional)Tensor   )_log_api_usage_oncec                       sr   e Zd ZdZdeed fddZeeee	e
e e
e e
e d fddZeed	d
dZedddZ  ZS )FrozenBatchNorm2da!  
    BatchNorm2d where the batch statistics and the affine parameters are fixed

    Args:
        num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
        eps (float): a value added to the denominator for numerical stability. Default: 1e-5
    h㈵>)num_featuresepsc                    sd   t    t|  || _| dt| | dt| | dt| | dt| d S )Nweightbiasrunning_meanrunning_var)super__init__r   r   Zregister_buffertorchZoneszeros)selfr
   r   	__class__ 8/tmp/pip-unpacked-wheel-vx7f76es/torchvision/ops/misc.pyr      s    
zFrozenBatchNorm2d.__init__)
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsc           	   	      s2   |d }||kr||= t  ||||||| d S )NZnum_batches_tracked)r   _load_from_state_dict)	r   r   r   r   r   r   r   r   Znum_batches_tracked_keyr   r   r   r    #   s    
      z'FrozenBatchNorm2d._load_from_state_dictxreturnc                 C   sr   | j dddd}| jdddd}| jdddd}| jdddd}||| j   }|||  }|| | S )N   )r   Zreshaper   r   r   r   Zrsqrt)r   r"   wbrvZrmscaler   r   r   r   forward5   s    zFrozenBatchNorm2d.forward)r#   c                 C   s$   | j j d| jjd  d| j dS )N(r   z, eps=))r   __name__r   shaper   )r   r   r   r   __repr__@   s    zFrozenBatchNorm2d.__repr__)r	   )r-   
__module____qualname____doc__intfloatr   dictstrboolr   r    r   r*   r/   __classcell__r   r   r   r   r      s     r   c                       s   e Zd Zddddejjejjdddejjf
eeeee	e ee	e
dejjf  e	e
dejjf  ee	e e	e e
dejjf dd fddZ  ZS )	ConvNormActivation   r$   NT.)in_channelsout_channelskernel_sizestridepaddinggroups
norm_layeractivation_layerdilationinplacer   
conv_layerr#   c              
      s   |d kr|d d |	 }|d kr(|d k}|||||||	||dg}|d k	rX| || |d k	r|
d krli nd|
i}| |f | t j|  t|  || _| jtkrtd d S )Nr$   r   )rC   r@   r   rD   zhDon't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead.)	appendr   r   r   r<   r   r9   warningswarn)r   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   r   rE   layersparamsr   r   r   r   E   s6    
zConvNormActivation.__init__)r-   r0   r1   r   nnBatchNorm2dReLUConv2dr3   r   r   Moduler7   r   r8   r   r   r   r   r9   D   s2   r9   c                       s   e Zd ZdZddddejjejjdddf	eeeee	e ee	e
dejjf  e	e
dejjf  ee	e e	e dd fdd	Z  ZS )
Conv2dNormActivationa  
    Configurable block used for Convolution2d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer wont be used. Default: ``torch.nn.BatchNorm2d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.

    r:   r$   NT.r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   r   r#   c                    s*   t  |||||||||	|
|tjj d S N)r   r   r   rK   rN   r   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   r   r   r   r   r      s    zConv2dNormActivation.__init__)r-   r0   r1   r2   r   rK   rL   rM   r3   r   r   rO   r7   r   r8   r   r   r   r   rP   w   s0   rP   c                       s   e Zd ZdZddddejjejjdddf	eeeee	e ee	e
dejjf  e	e
dejjf  ee	e e	e dd fdd	Z  ZS )
Conv3dNormActivationa  
    Configurable block used for Convolution3d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input video.
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer wont be used. Default: ``torch.nn.BatchNorm3d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
    r:   r$   NT.rQ   c                    s*   t  |||||||||	|
|tjj d S rR   )r   r   r   rK   ZConv3drS   r   r   r   r      s    zConv3dNormActivation.__init__)r-   r0   r1   r2   r   rK   ZBatchNorm3drM   r3   r   r   rO   r7   r   r8   r   r   r   r   rT      s0   rT   c                       st   e Zd ZdZejjejjfeee	dejj
f e	dejj
f dd fddZeeddd	Zeedd
dZ  ZS )SqueezeExcitationaE  
    This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
    Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.

    Args:
        input_channels (int): Number of channels in the input image
        squeeze_channels (int): Number of squeeze channels
        activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
        scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
    .N)input_channelssqueeze_channels
activationscale_activationr#   c                    sX   t    t|  tjd| _tj||d| _tj||d| _	| | _
| | _d S )Nr$   )r   r   r   r   rK   ZAdaptiveAvgPool2davgpoolrN   fc1fc2rX   rY   )r   rV   rW   rX   rY   r   r   r   r      s    
zSqueezeExcitation.__init__)inputr#   c                 C   s2   |  |}| |}| |}| |}| |S rR   )rZ   r[   rX   r\   rY   r   r]   r)   r   r   r   _scale   s
    



zSqueezeExcitation._scalec                 C   s   |  |}|| S rR   )r_   r^   r   r   r   r*      s    
zSqueezeExcitation.forward)r-   r0   r1   r2   r   rK   rM   ZSigmoidr3   r   rO   r   r   r_   r*   r8   r   r   r   r   rU      s   rU   c                	       sj   e Zd ZdZdejjdddfeee e	e
dejjf  e	e
dejjf  e	e eed fddZ  ZS )	MLPa  This block implements the multi-layer perceptron (MLP) module.

    Args:
        in_channels (int): Number of channels of the input
        hidden_channels (List[int]): List of the hidden channel dimensions
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer wont be used. Default: ``None``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU``
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool): Whether to use bias in the linear layer. Default ``True``
        dropout (float): The probability for the dropout layer. Default: 0.0
    NTg        .)r;   hidden_channelsrA   rB   rD   r   dropoutc                    s   |d kri nd|i}g }	|}
|d d D ]\}|	 tjj|
||d |d k	rZ|	 || |	 |f | |	 tjj|f| |}
q(|	 tjj|
|d |d |	 tjj|f| t j|	  t|  d S )NrD   r%   )r   )rF   r   rK   ZLinearZDropoutr   r   r   )r   r;   ra   rA   rB   rD   r   rb   rJ   rI   Zin_dimZ
hidden_dimr   r   r   r     s    zMLP.__init__)r-   r0   r1   r2   r   rK   rM   r3   r   r   r   rO   r7   r4   r   r8   r   r   r   r   r`     s   r`   c                       s:   e Zd ZdZee d fddZeedddZ  Z	S )PermutezThis module returns a view of the tensor input with its dimensions permuted.

    Args:
        dims (List[int]): The desired ordering of dimensions
    )dimsc                    s   t    || _d S rR   )r   r   rd   )r   rd   r   r   r   r   4  s    
zPermute.__init__r!   c                 C   s   t || jS rR   )r   Zpermuterd   )r   r"   r   r   r   r*   8  s    zPermute.forward)
r-   r0   r1   r2   r   r3   r   r   r*   r8   r   r   r   r   rc   -  s   rc   )rG   typingr   r   r   r   r   utilsr   rK   Z
functionalZinterpolaterO   r   Z
Sequentialr9   rP   rT   rU   r`   rc   r   r   r   r   <module>   s   
7321',