U
    do                     @   s
  d Z ddlmZmZ ddlZddlZddlmZ ddlmZm	Z	 ddl
mZ ddlmZ dEd
dZdFddZeee edddZeee edddZdddddddejfddZdddddddejfddZdddddddejfddZdGddZdHeeee ee ee edd d!ZdId"d#ZdJd$d%ZdKeeeeed&d'd(ZdLeeeee ee d*d+d,ZdMeeeeed.d/d0Zeeeed1d2d3Z eeeed4d5d6Z!dNeeeeed&d7d8Z"dOeeed9d:d;Z#eeeed<d=d>Z$dPd?d@Z%dQdAdBZ&dRdCdDZ'dS )Sz" Functional interface (quantized).    )ListOptionalN)Tensor)_pair_triple)_pair_from_first)BroadcastingList2FTc              	   C   s(   | j stdtjj| ||||||S )a  
    Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
    :math:`sH \times sW` steps. The number of output features is equal to the number of
    input planes.

    .. note:: The input quantization parameters propagate to the output.

    See :class:`~torch.nn.quantized.AvgPool2d` for details and output shape.

    Args:
        input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
        kernel_size: size of the pooling region. Can be a single number or a
          tuple `(kH, kW)`
        stride: stride of the pooling operation. Can be a single number or a
          tuple `(sH, sW)`. Default: :attr:`kernel_size`
        padding: implicit zero paddings on both sides of the input. Can be a
          single number or a tuple `(padH, padW)`. Default: 0
        ceil_mode: when True, will use `ceil` instead of `floor` in the formula
            to compute the output shape. Default: ``False``
        count_include_pad: when True, will include the zero-padding in the
            averaging calculation. Default: ``True``
        divisor_override: if specified, it will be used as divisor, otherwise
             size of the pooling region will be used. Default: None
    z2Input to 'quantized.avg_pool2d' must be quantized!)is_quantized
ValueErrortorchnn
functional
avg_pool2dinputkernel_sizestridepadding	ceil_modeZcount_include_padZdivisor_override r   A/tmp/pip-unpacked-wheel-ua33x9lu/torch/nn/quantized/functional.pyr      s     r   c              	   C   s(   | j stdtjj| ||||||S )a  
    Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
    :math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
    input planes.

    .. note:: The input quantization parameters propagate to the output.

    Args:
        input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
        kernel_size: size of the pooling region. Can be a single number or a
          tuple `(kD, kH, kW)`
        stride: stride of the pooling operation. Can be a single number or a
          tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
        padding: implicit zero paddings on both sides of the input. Can be a
          single number or a tuple `(padD, padH, padW)`. Default: 0
        ceil_mode: when True, will use `ceil` instead of `floor` in the formula
            to compute the output shape. Default: ``False``
        count_include_pad: when True, will include the zero-padding in the
            averaging calculation. Default: ``True``
        divisor_override: if specified, it will be used as divisor, otherwise
             size of the pooling region will be used. Default: None
    z2Input to 'quantized.avg_pool3d' must be quantized!)r	   r
   r   r   r   
avg_pool3dr   r   r   r   r   .   s     r   )r   output_sizereturnc                 C   s   | j stdtjj| |S )a  
    Applies a 2D adaptive average pooling over a quantized input signal composed
    of several quantized input planes.

    .. note:: The input quantization parameters propagate to the output.

    See :class:`~torch.nn.quantized.AdaptiveAvgPool2d` for details and output shape.

    Args:
        output_size: the target output size (single integer or
                     double-integer tuple)
    zFInput to 'quantized.functional.adaptive_avg_pool2d' must be quantized!)r	   r
   r   r   r   adaptive_avg_pool2dr   r   r   r   r   r   L   s    r   c                 C   s   | j stdtjj| |S )a  
    Applies a 3D adaptive average pooling over a quantized input signal composed
    of several quantized input planes.

    .. note:: The input quantization parameters propagate to the output.

    See :class:`~torch.nn.quantized.AdaptiveAvgPool3d` for details and output shape.

    Args:
        output_size: the target output size (single integer or
                     double-integer tuple)
    zFInput to 'quantized.functional.adaptive_avg_pool3d' must be quantized!)r	   r
   r   r   r   adaptive_avg_pool3dr   r   r   r   r   ]   s
    r      zeros      ?c                 C   s   |dkrt d| jtjkr$t d|jtjkr8t d| jdkrJtdt|}t|}t|}tjj	
||||||}tjj	| |||	S )a  
    Applies a 1D convolution over a quantized 1D input composed of several input
    planes.

    See :class:`~torch.nn.quantized.Conv1d` for details and output shape.

    Args:
        input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
        weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , iW)`
        bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
        stride: the stride of the convolving kernel. Can be a single number or a
          tuple `(sW,)`. Default: 1
        padding: implicit paddings on both sides of the input. Can be a
          single number or a tuple `(padW,)`. Default: 0
        dilation: the spacing between kernel elements. Can be a single number or
          a tuple `(dW,)`. Default: 1
        groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
          number of groups. Default: 1
        padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
        scale: quantization scale for the output. Default: 1.0
        zero_point: quantization zero_point for the output. Default: 0
        dtype: quantization data type to use. Default: ``torch.quint8``

    Examples::

        >>> from torch.nn.quantized import functional as qF
        >>> filters = torch.randn(33, 16, 3, dtype=torch.float)
        >>> inputs = torch.randn(20, 16, 50, dtype=torch.float)
        >>> bias = torch.randn(33, dtype=torch.float)
        >>>
        >>> scale, zero_point = 1.0, 0
        >>> dtype_inputs = torch.quint8
        >>> dtype_filters = torch.qint8
        >>>
        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
        >>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
    r   Only zero-padding is supported!5Only torch.quint8 is supported for activation tensor!0Only torch.qint8 is supported for weight tensor!   z Input shape must be `(N, C, L)`!)NotImplementedErrordtyper   quint8qint8ndimr
   r   ops	quantizedZconv1d_prepackconv1dr   weightbiasr   r   dilationgroupsZpadding_modescale
zero_pointr%   Zpacked_paramsr   r   r   r+   o   s(    +
     r+   c                 C   s   |dkrt d| jtjkr$t d|jtjkr8t d| jdkrJtdt|}t|}t|}tjj	
||||||}tjj	| |||	S )a  
    Applies a 2D convolution over a quantized 2D input composed of several input
    planes.

    See :class:`~torch.nn.quantized.Conv2d` for details and output shape.

    Args:
        input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
        weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
        bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
        stride: the stride of the convolving kernel. Can be a single number or a
          tuple `(sH, sW)`. Default: 1
        padding: implicit paddings on both sides of the input. Can be a
          single number or a tuple `(padH, padW)`. Default: 0
        dilation: the spacing between kernel elements. Can be a single number or
          a tuple `(dH, dW)`. Default: 1
        groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
          number of groups. Default: 1
        padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
        scale: quantization scale for the output. Default: 1.0
        zero_point: quantization zero_point for the output. Default: 0
        dtype: quantization data type to use. Default: ``torch.quint8``

    Examples::

        >>> from torch.nn.quantized import functional as qF
        >>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
        >>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
        >>> bias = torch.randn(8, dtype=torch.float)
        >>>
        >>> scale, zero_point = 1.0, 0
        >>> dtype_inputs = torch.quint8
        >>> dtype_filters = torch.qint8
        >>>
        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
        >>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
    r   r    r!   r"      z#Input shape must be `(N, C, H, W)`!)r$   r%   r   r&   r'   r(   r
   r   r)   r*   Zconv2d_prepackconv2dr,   r   r   r   r4      s(    +
     r4   c                 C   s   |dkrt d| jtjkr$t d|jtjkr8t d| jdkrJtdt|}t|}t|}tjj	
||||||}tjj	| |||	S )a]  
    Applies a 3D convolution over a quantized 3D input composed of several input
    planes.

    See :class:`~torch.nn.quantized.Conv3d` for details and output shape.

    Args:
        input: quantized input tensor of shape
          :math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
        weight: quantized filters of shape
          :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
        bias: **non-quantized** bias tensor of shape
          :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
        stride: the stride of the convolving kernel. Can be a single number or a
          tuple `(sD, sH, sW)`. Default: 1
        padding: implicit paddings on both sides of the input. Can be a
          single number or a tuple `(padD, padH, padW)`. Default: 0
        dilation: the spacing between kernel elements. Can be a single number or
          a tuple `(dD, dH, dW)`. Default: 1
        groups: split input into groups, :math:`\text{in\_channels}` should be
          divisible by the number of groups. Default: 1
        padding_mode: the padding mode to use. Only "zeros" is supported for
          quantized convolution at the moment. Default: "zeros"
        scale: quantization scale for the output. Default: 1.0
        zero_point: quantization zero_point for the output. Default: 0
        dtype: quantization data type to use. Default: ``torch.quint8``

    Examples::

        >>> from torch.nn.quantized import functional as qF
        >>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
        >>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
        >>> bias = torch.randn(8, dtype=torch.float)
        >>>
        >>> scale, zero_point = 1.0, 0
        >>> dtype_inputs = torch.quint8
        >>> dtype_filters = torch.qint8
        >>>
        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
        >>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
    r   r    r!   r"      z&Input shape must be `(N, C, D, H, W)`!)r$   r%   r   r&   r'   r(   r
   r   r)   r*   Zconv3d_prepackconv3dr,   r   r   r   r6      s(    ,
     r6   nearestc                 C   s$   | j stdtjj| ||||S )a  Down/up samples the input to either the given :attr:`size` or the given
    :attr:`scale_factor`

    See :func:`torch.nn.functional.interpolate` for implementation details.

    The input dimensions are interpreted in the form:
    `mini-batch x channels x [optional depth] x [optional height] x width`.

    .. note:: The input quantization parameters propagate to the output.

    .. note:: Only 2D/3D input is supported for quantized inputs

    .. note:: Only the following modes are supported for the quantized inputs:

        - `bilinear`
        - `nearest`

    Args:
        input (Tensor): the input tensor
        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
            output spatial size.
        scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
        mode (str): algorithm used for upsampling:
            ``'nearest'`` | ``'bilinear'``
        align_corners (bool, optional): Geometrically, we consider the pixels of the
            input and output as squares rather than points.
            If set to ``True``, the input and output tensors are aligned by the
            center points of their corner pixels, preserving the values at the corner pixels.
            If set to ``False``, the input and output tensors are aligned by the corner
            points of their corner pixels, and the interpolation uses edge value padding
            for out-of-boundary values, making this operation *independent* of input size
            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
            is ``'bilinear'``.
            Default: ``False``
    z3Input to 'quantized.interpolate' must be quantized!)r	   r
   r   r   r   interpolater   sizescale_factormodealign_cornersr   r   r   r8   !  s
    $r8   )r   r-   r.   r1   r2   r   c                 C   sD   |dkr|   }|dkr |  }tjj||}tjj| |||S )a  
    Applies a linear transformation to the incoming quantized data:
    :math:`y = xA^T + b`.
    See :class:`~torch.nn.quantized.Linear`

    .. note::

      Current implementation packs weights on every call, which has penalty on performance.
      If you want to avoid the overhead, use :class:`~torch.nn.quantized.Linear`.

    Args:
      input (Tensor): Quantized input of type `torch.quint8`
      weight (Tensor): Quantized weight of type `torch.qint8`
      bias (Tensor): None or fp32 bias of type `torch.float`
      scale (double): output scale. If None, derived from the input scale
      zero_point (long): output zero point. If None, derived from the input zero_point

    Shape:
        - Input: :math:`(N, *, in\_features)` where `*` means any number of
          additional dimensions
        - Weight: :math:`(out\_features, in\_features)`
        - Bias: :math:`(out\_features)`
        - Output: :math:`(N, *, out\_features)`
    N)Zq_scaleZq_zero_pointr   r)   r*   Zlinear_prepacklinear)r   r-   r.   r1   r2   Z_packed_paramsr   r   r   r>   J  s    r>   c              	   C   sB   |rt d|dkr&tjtt g }tjjj| ||||||dS )zApplies a 1D max pooling over a quantized input signal composed of
    several quantized input planes.

    .. note:: The input quantization parameters are propagated to the output.

    See :class:`~torch.nn.quantized.MaxPool1d` for details.
    &return_indices is not yet implemented!Nr   return_indices)	r$   r   jitannotater   intr   r   
max_pool1dr   r   r   r   r/   r   rA   r   r   r   rE   m  s    	  rE   c              	   C   sB   |rt d|dkr&tjtt g }tjjj| ||||||dS )zApplies a 2D max pooling over a quantized input signal composed of
    several quantized input planes.

    .. note:: The input quantization parameters are propagated to the output.

    See :class:`~torch.nn.quantized.MaxPool2d` for details.
    r?   Nr@   )	r$   r   rB   rC   r   rD   r   r   
max_pool2drF   r   r   r   rG   }  s    	  rG   )r   r1   r2   alphar   c                 C   s"   | j stdtjj| |||S )aD  celu(input, scale, zero_point, alpha=1.) -> Tensor

    Applies the quantized CELU function element-wise.

    .. math::
        \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x / \alpha) - 1))

    Args:
        input: quantized input
        alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
    z,Input to 'quantized.celu' must be quantized!)r	   r
   r   r)   r*   celur   r1   r2   rH   r   r   r   rI     s    rI   {Gz?)r   negative_slopeinplacer1   r2   c                 C   sx   |dk	rN|dk	rN|rt dtj| j|t|| jd}tjjj| ||d |S |rdtjj	| |}ntjj| |}|S )a  
    Quantized version of the.
    leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor

    Applies element-wise,
    :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`

    Args:
        input: Quaintized input
        negative_slope: The slope of the negative input
        inplace: Inplace modification of the input tensor
        scale, zero_point: Scale and zero point of the output tensor.

    See :class:`~torch.nn.LeakyReLU` for more details.
    NzCannot rescale with `inplace`)r1   r2   r%   )out)
AssertionErrorr   Z_empty_affine_quantizedshaperD   r%   _C_nn
leaky_reluZleaky_relu_)r   rL   rM   r1   r2   outputresultr   r   r   rS     s       rS         )r   min_valmax_valrM   r   c                 C   s6   | j std|r$tjj| ||S tjj| ||S )zLThis is the quantized version of :func:`~torch.nn.functional.hardtanh`.
    z0Input to 'quantized.hardtanh' must be quantized!)r	   r
   r   rQ   rR   Z	hardtanh_hardtanh)r   rW   rX   rM   r   r   r   rY     s
    rY   )r   r1   r2   r   c                 C   s"   | j stdtjjj| ||S )zThis is the quantized version of :func:`~torch.nn.functional.hardswish`.

    Args:
        input: quantized input
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
    z1Input to 'quantized.hardswish' must be quantized!)r	   r
   r   _opsr)   r*   	hardswish)r   r1   r2   r   r   r   r[     s    r[   )r   	thresholdvaluer   c                 C   sB   | j std|dkrtd|dkr.tdtjjj| ||S )a*  Applies the quantized version of the threshold function element-wise:

    .. math::
        x = \begin{cases}
                x & \text{if~} x > \text{threshold} \\
                \text{value} & \text{otherwise}
            \end{cases}

    See :class:`~torch.nn.Threshold` for more details.
    z1Input to 'quantized.threshold' must be quantized!Nz'Input to 'threshold' must be specified!z#Input to 'value' must be specified!)r	   r
   r   rZ   r)   r*   r\   )r   r\   r]   r   r   r   r\     s    r\   c                 C   s"   | j stdtjj| |||S )a  This is the quantized version of :func:`~torch.nn.functional.elu`.

    Args:
        input: quantized input
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
        alpha: the alpha constant
    z+Input to 'quantized.elu' must be quantized!)r	   r
   r   r)   r*   elurJ   r   r   r   r^     s    	r^   )r   rM   r   c                 C   s.   | j std|r tjj| S tjj| S )zOThis is the quantized version of :func:`~torch.nn.functional.hardsigmoid`.
    z3Input to 'quantized.hardsigmoid' must be quantized!)r	   r
   r   rQ   rR   Zhardsigmoid_hardsigmoid)r   rM   r   r   r   r_     s
    r_   )r   min_max_r   c                 C   s   | j stdt| ||S )a  float(input, min\_, max\_) -> Tensor

    Applies the clamp function element-wise.
    See :class:`~torch.nn.quantized.clamp` for more details.

    Args:
        input: quantized input
        min_: minimum value for clamping
        max_: maximum value for clamping
    z-Input to 'quantized.clamp' must be quantized!)r	   r
   r   clamp)r   r`   ra   r   r   r   rb     s    rb   c                 C   s   t d t| ||||S )a2	  Upsamples the input to either the given :attr:`size` or the given
    :attr:`scale_factor`

    .. warning::
        This function is deprecated in favor of
        :func:`torch.nn.quantized.functional.interpolate`.
        This is equivalent with ``nn.quantized.functional.interpolate(...)``.

    See :func:`torch.nn.functional.interpolate` for implementation details.

    The input dimensions are interpreted in the form:
    `mini-batch x channels x [optional depth] x [optional height] x width`.

    .. note:: The input quantization parameters propagate to the output.

    .. note:: Only 2D input is supported for quantized inputs

    .. note:: Only the following modes are supported for the quantized inputs:

        - `bilinear`
        - `nearest`

    Args:
        input (Tensor): quantized input tensor
        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
            output spatial size.
        scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
        mode (string): algorithm used for upsampling:
            ``'nearest'`` | ``'bilinear'``
        align_corners (bool, optional): Geometrically, we consider the pixels of the
            input and output as squares rather than points.
            If set to ``True``, the input and output tensors are aligned by the
            center points of their corner pixels, preserving the values at the corner pixels.
            If set to ``False``, the input and output tensors are aligned by the corner
            points of their corner pixels, and the interpolation uses edge value padding
            for out-of-boundary values, making this operation *independent* of input size
            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
            is ``'bilinear'``.
            Default: ``False``

    .. warning::
        With ``align_corners = True``, the linearly interpolating modes
        (`bilinear`) don't proportionally align the
        output and input pixels, and thus the output values can depend on the
        input size. This was the default behavior for these modes up to version
        0.3.1. Since then, the default behavior is ``align_corners = False``.
        See :class:`~torch.nn.Upsample` for concrete examples on how this
        affects the outputs.
    z`nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.warningswarnr8   r9   r   r   r   upsample  s    2
rf   c                 C   s   t d t| ||dddS )a\  Upsamples the input, using bilinear upsampling.

    .. warning::
        This function is deprecated in favor of
        :func:`torch.nn.quantized.functional.interpolate`.
        This is equivalent with
        ``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.

    .. note:: The input quantization parameters propagate to the output.

    .. note:: Only 2D inputs are supported

    Args:
        input (Tensor): quantized input
        size (int or Tuple[int, int]): output spatial size.
        scale_factor (int or Tuple[int, int]): multiplier for spatial size
    zinn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.ZbilinearT)r<   r=   rc   r   r:   r;   r   r   r   upsample_bilinear=  s    
rh   c                 C   s   t d t| ||ddS )at  Upsamples the input, using nearest neighbours' pixel values.

    .. warning::
        This function is deprecated in favor of
        :func:`torch.nn.quantized.functional.interpolate`.
        This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.

    .. note:: The input quantization parameters propagate to the output.

    .. note:: Only 2D inputs are supported

    Args:
        input (Tensor): quantized input
        size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
            size.
        scale_factor (int): multiplier for spatial size. Has to be an integer.
    zhnn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.r7   )r<   rc   rg   r   r   r   upsample_nearestS  s    
ri   )Nr   FTN)Nr   FTN)NNr7   N)NNN)Nr   r   FF)Nr   r   FF)r   )rK   FNN)rV   r   F)r   )F)NNr7   N)NN)NN)(__doc__typingr   r   rd   r   r   Ztorch.nn.modules.utilsr   r   Z torch.nn.quantized.modules.utilsr   Ztorch.jit.annotationsr   r   r   rD   r   r   r&   r+   r4   r6   r8   floatr>   rE   rG   rI   boolrS   rY   r[   r\   r^   r_   rb   rf   rh   ri   r   r   r   r   <module>   s       
     
    
<    
;   
<
*       #    
    
     		
5
