U
    dN                     @   s  d dl Z d dlZd dlmZ d dlZdd Zdd Zdd Zd	d
 Zdd Zd@ddZ	dAee
e
edddZdBee
e
edddZdCee
e
e
e
edddZee
edddZeedd d!Zeedd"d#Zd$d% ZdDd'd(Zd)d* ZdEee
ed+d,d-ZdFee
ed+d.d/Zd0d1 ZdGee
eed4d5d6ZdHee
eed4d7d8ZdId9d:ZdJd<d=Zd>d? ZeeZeeZeeZeeZ eeZ!eeZ"eeZ#eeZ$eeZ%eeZ&eeZ'dS )K    N)Tensorc              
   C   s,   t   | ||W  5 Q R  S Q R X d S N)torchno_graduniform_tensorab r   1/tmp/pip-unpacked-wheel-ua33x9lu/torch/nn/init.py_no_grad_uniform_   s    
r   c              
   C   s,   t   | ||W  5 Q R  S Q R X d S r   )r   r   normal_r   meanstdr   r   r   _no_grad_normal_   s    
r   c              
   C   s   dd }||d|  k s(||d|  kr6t jddd t  ||| | }||| | }| d| d d| d  |   | |td  | 	| | j
||d | W  5 Q R  S Q R X d S )	Nc                 S   s   dt | t d  d S )N      ?       @)matherfsqrt)xr   r   r   norm_cdf   s    z(_no_grad_trunc_normal_.<locals>.norm_cdf   zjmean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.
stacklevel   r   )minmax)warningswarnr   r   r   Zerfinv_mul_r   r   Zadd_Zclamp_)r   r   r   r	   r
   r   lur   r   r   _no_grad_trunc_normal_   s     

r%   c              
   C   s*   t   | |W  5 Q R  S Q R X d S r   )r   r   Zfill_r   valr   r   r   _no_grad_fill_9   s    
r(   c              
   C   s(   t   |  W  5 Q R  S Q R X d S r   )r   r   zero_r   r   r   r   _no_grad_zero_>   s    
r+   c                 C   s   dddddddg}| |ks"| dkr&d	S | d
kr2dS | dkrDt dS | dkr|dkrZd}n2t|tsnt|tsxt|tr~|}ntd|t dd	|d   S | dkrdS td| dS )a  Return the recommended gain value for the given nonlinearity function.
    The values are as follows:

    ================= ====================================================
    nonlinearity      gain
    ================= ====================================================
    Linear / Identity :math:`1`
    Conv{1,2,3}D      :math:`1`
    Sigmoid           :math:`1`
    Tanh              :math:`\frac{5}{3}`
    ReLU              :math:`\sqrt{2}`
    Leaky Relu        :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
    SELU              :math:`\frac{3}{4}`
    ================= ====================================================

    .. warning::
        In order to implement `Self-Normalizing Neural Networks`_ ,
        you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
        This gives the initial weights a variance of ``1 / N``,
        which is necessary to induce a stable fixed point in the forward pass.
        In contrast, the default gain for ``SELU`` sacrifices the normalisation
        effect for more stable gradient flow in rectangular layers.

    Args:
        nonlinearity: the non-linear function (`nn.functional` name)
        param: optional parameter for the non-linear function

    Examples:
        >>> gain = nn.init.calculate_gain('leaky_relu', 0.2)  # leaky_relu with negative_slope=0.2

    .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
    ZlinearZconv1dZconv2dZconv3dZconv_transpose1dZconv_transpose2dZconv_transpose3dZsigmoidr   tanhg?Zrelur   
leaky_reluN{Gz?z$negative_slope {} not a valid numberr   Zselug      ?zUnsupported nonlinearity {})r   r   
isinstanceboolintfloat
ValueErrorformat)nonlinearityparamZ
linear_fnsZnegative_sloper   r   r   calculate_gainC   s"    !
r7           r   )r   r	   r
   returnc                 C   s0   t j| r$t jjt| f| ||dS t| ||S )ad  Fills the input Tensor with values drawn from the uniform
    distribution :math:`\mathcal{U}(a, b)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the lower bound of the uniform distribution
        b: the upper bound of the uniform distribution

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.uniform_(w)
    r   )r   	overrideshas_torch_function_variadichandle_torch_functionr   r   r   r   r   r   r   z   s    r   )r   r   r   r9   c                 C   s0   t j| r$t jjt| f| ||dS t| ||S )az  Fills the input Tensor with values drawn from the normal
    distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.normal_(w)
    r   )r   r:   r;   r<   r   r   r   r   r   r   r      s    r          r   )r   r   r   r	   r
   r9   c                 C   s   t | ||||S )a  Fills the input Tensor with values drawn from a truncated
    normal distribution. The values are effectively drawn from the
    normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
    with values outside :math:`[a, b]` redrawn until they are within
    the bounds. The method used for generating the random values works
    best when :math:`a \leq \text{mean} \leq b`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        a: the minimum cutoff value
        b: the maximum cutoff value

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.trunc_normal_(w)
    )r%   )r   r   r   r	   r
   r   r   r   trunc_normal_   s    r>   )r   r'   r9   c                 C   s,   t j| r"t jjt| f| |dS t| |S )zFills the input Tensor with the value :math:`\text{val}`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        val: the value to fill the tensor with

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.constant_(w, 0.3)
    r&   )r   r:   r;   r<   	constant_r(   r&   r   r   r   r?      s    r?   )r   r9   c                 C   s
   t | dS )zFills the input Tensor with the scalar value `1`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.ones_(w)
    r   )r(   r*   r   r   r   ones_   s    
r@   c                 C   s   t | S )zFills the input Tensor with the scalar value `0`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.zeros_(w)
    )r+   r*   r   r   r   zeros_   s    
rA   c              	   C   sB   |   dkrtdt  tj| j| | jd W 5 Q R X | S )a=  Fills the 2-dimensional input `Tensor` with the identity
    matrix. Preserves the identity of the inputs in `Linear` layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.eye_(w)
    r   ,Only tensors with 2 dimensions are supported)outrequires_grad)
ndimensionr3   r   r   eyeshaperD   r*   r   r   r   eye_   s
    
 rH   r   c                 C   s&  |   }|dkrtd|  }|d | dkr8td|d | }t||d }t  |   t|D ]}t|D ]}|dkrd| || | || dd f< qx|dkrd| || | || dd | dd f< qxd| || | || dd | dd | dd f< qxqlW 5 Q R X | S )	aA  Fills the {3, 4, 5}-dimensional input `Tensor` with the Dirac
    delta function. Preserves the identity of the inputs in `Convolutional`
    layers, where as many input channels are preserved as possible. In case
    of groups>1, each group of channels preserves identity

    Args:
        tensor: a {3, 4, 5}-dimensional `torch.Tensor`
        groups (optional): number of groups in the conv layer (default: 1)
    Examples:
        >>> w = torch.empty(3, 16, 5, 5)
        >>> nn.init.dirac_(w)
        >>> w = torch.empty(3, 24, 5, 5)
        >>> nn.init.dirac_(w, 3)
    )         z5Only tensors with 3, 4, or 5 dimensions are supportedr   z!dim 0 must be divisible by groupsr   rI   r   rJ   )rE   r3   sizer   r   r   r)   range)r   groups
dimensionsZsizesZout_chans_per_grpZmin_dimgdr   r   r   dirac_   s2    
" rR   c                 C   sp   |   }|dk rtd| d}| d}d}|   dkrX| jdd  D ]}||9 }qJ|| }|| }||fS )Nr   zNFan in and fan out can not be computed for tensor with fewer than 2 dimensionsr   r   )Zdimr3   rL   rG   )r   rO   Znum_input_fmapsZnum_output_fmapsZreceptive_field_sizesfan_infan_outr   r   r   _calculate_fan_in_and_fan_out  s    


rV   )r   gainr9   c                 C   sB   t | \}}|tdt||   }td| }t| | |S )a  Fills the input `Tensor` with values according to the method
    described in `Understanding the difficulty of training deep feedforward
    neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform
    distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-a, a)` where

    .. math::
        a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
    r         @)rV   r   r   r2   r   )r   rW   rT   rU   r   r	   r   r   r   xavier_uniform_/  s    rY   c                 C   s2   t | \}}|tdt||   }t| d|S )a  Fills the input `Tensor` with values according to the method
    described in `Understanding the difficulty of training deep feedforward
    neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal
    distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_normal_(w)
    r   r8   )rV   r   r   r2   r   )r   rW   rT   rU   r   r   r   r   xavier_normal_J  s    rZ   c                 C   sD   |  }ddg}||kr(td||t| \}}|dkr@|S |S )NrT   rU   z+Mode {} not supported, please use one of {})lowerr3   r4   rV   )r   modeZvalid_modesrT   rU   r   r   r   _calculate_correct_fand  s    r]   rT   r-   r   r	   r\   r5   c              
   C   s   t j| r&t jjt| f| |||dS d| jkr>td | S t| |}t	||}|t
| }t
d| }t   | | |W  5 Q R  S Q R X dS )a  Fills the input `Tensor` with values according to the method
    described in `Delving deep into rectifiers: Surpassing human-level
    performance on ImageNet classification` - He, K. et al. (2015), using a
    uniform distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-\text{bound}, \text{bound})` where

    .. math::
        \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
    r^   r   ,Initializing zero-element tensors is a no-oprX   N)r   r:   r;   r<   kaiming_uniform_rG   r    r!   r]   r7   r   r   r   r   )r   r	   r\   r5   fanrW   r   boundr   r   r   r`   n  s$    




r`   c              
   C   sf   d| j krtd | S t| |}t||}|t| }t  | 	d|W  5 Q R  S Q R X dS )a  Fills the input `Tensor` with values according to the method
    described in `Delving deep into rectifiers: Surpassing human-level
    performance on ImageNet classification` - He, K. et al. (2015), using a
    normal distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
    r   r_   N)
rG   r    r!   r]   r7   r   r   r   r   r   )r   r	   r\   r5   ra   rW   r   r   r   r   kaiming_normal_  s    




rc   c           	   	   C   s   |   dk rtd|  dkr$| S | d}|  | }| ||dd}||k r^|  tj	|\}}t
|d}| }||9 }||k r|  t   | || | | W 5 Q R X | S )a!  Fills the input `Tensor` with a (semi) orthogonal matrix, as
    described in `Exact solutions to the nonlinear dynamics of learning in deep
    linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
    at least 2 dimensions, and for tensors with more than 2 dimensions the
    trailing dimensions are flattened.

    Args:
        tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
        gain: optional scaling factor

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.orthogonal_(w)
    r   z4Only tensors with 2 or more dimensions are supportedr   r   )rE   r3   ZnumelrL   newr   Zt_r   ZlinalgZqrZdiagsignr   Zview_asZcopy_r"   )	r   rW   rowscolsZ	flattenedqrrQ   phr   r   r   orthogonal_  s&    

rk   r.   c           	   	   C   s   |   dkrtd| j\}}tt|| }t B | d| t	|D ]&}t
|}|d| }d| ||f< qNW 5 Q R X | S )aN  Fills the 2D input `Tensor` as a sparse matrix, where the
    non-zero elements will be drawn from the normal distribution
    :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
    Hessian-free optimization` - Martens, J. (2010).

    Args:
        tensor: an n-dimensional `torch.Tensor`
        sparsity: The fraction of elements in each column to be set to zero
        std: the standard deviation of the normal distribution used to generate
            the non-zero values

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.sparse_(w, sparsity=0.1)
    r   rB   r   N)rE   r3   rG   r1   r   ceilr   r   r   rM   Zrandperm)	r   Zsparsityr   rf   rg   Z	num_zerosZcol_idxZrow_indicesZzero_indicesr   r   r   sparse_  s    


rm   c                    s<    j d d  fdd}djd|_|_ |S )Nc                     s    t jddd  | |S )Nz4nn.init.{} is now deprecated in favor of nn.init.{}.r   r   )r    r!   r4   )argskwargsmethnew_nameold_namer   r   deprecated_init  s     z(_make_deprecate.<locals>.deprecated_initz
    {old_name}(...)

    .. warning::
        This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.

    See :func:`~torch.nn.init.{new_name}` for details.)rt   rs   )__name__r4   __doc__)rr   ru   r   rq   r   _make_deprecate  s     rx   )N)r8   r   )r8   r   )r8   r   r=   r   )r   )r   )r   )r   rT   r-   )r   rT   r-   )r   )r.   )(r   r    r   r   r   r   r%   r(   r+   r7   r2   r   r   r>   r?   r@   rA   rH   rR   rV   rY   rZ   r]   strr`   rc   rk   rm   rx   uniformnormalZconstantrF   ZdiracZxavier_uniformZxavier_normalZkaiming_uniformZkaiming_normalZ
orthogonalsparser   r   r   r   <module>   sj   #
7
+        2        '
,
 