U
    dɕ                     @   sf  d Z ddlmZmZmZ ddlZddlmZ ddlm  m	Z
 ddlm  mZ ddlm  m  mZ ddlmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZ d	d
hZee ee dddZ G dd deZ!G dd de!Z"G dd de!Z#G dd de!Z$edej%j&j!dZ'G dd de!Z(G dd de(Z)G dd de(Z*G dd de(Z+dS ) zQuantized convolution modules.    )OptionalListTypeVarN)ops)	_size_1_t)_single_pair_triple)_quantize_weightWeightedQuantizedModule)fuse_conv_bn_weightszerosreflect)paddingreturnc                 C   sB   g }t | }t|D ](}tdD ]}|| || d   q q|S )N      )lenrangeappend)r    _reversed_padding_repeated_twiceNidx_ r   C/tmp/pip-unpacked-wheel-ua33x9lu/torch/nn/quantized/modules/conv.py_reverse_repeat_padding   s    r   c                       s   e Zd Zd%ddZd&dd fd	d
Zdd Zdd Zdd Zdd Z fddZ	e
jjdd Z fddZe
jjdd Zdd Zdd Zed'dd Zed!d" Zed#d$ Z  ZS )(_ConvNdr   r   Tr   Nc                 C   s   t d S NNotImplementedError)selfin_channelsout_channelskernel_sizestrider   dilationgroupsbiaspadding_modedevicedtyper   r   r   __init__!   s    z_ConvNd.__init__)r   c                    s4  ||d}t t|   ||	 dkr,td||	 dkr@td|| _|| _|| _|| _|| _|| _	|| _
|| _|	| _|tkrtd||| _| j
r||| j g}n||| j g}tj|t| fddtjddd	 | D }|
rtj|fd
tjidd	 | D nd }| || d| _d| _d S )Nr*   r+   r   z'in_channels must be divisible by groupsz(out_channels must be divisible by groupsz;'padding_mode' {} is not supported by quantized convolutionr   )scale
zero_pointr+   c                 S   s   i | ]\}}|d kr||qS r+   r   .0kvr   r   r   
<dictcomp>I   s       z!_ConvNd._init.<locals>.<dictcomp>r+   c                 S   s   i | ]\}}|d kr||qS r0   r   r1   r   r   r   r5   L   s       g      ?)superr   r,   
ValueErrorr"   r#   r$   r%   r   r&   
transposedoutput_paddingr'   _SUPPORTED_PADDINGformatr)   torchZ_empty_affine_quantizedlistqint8itemsr   floatset_weight_biasr.   r/   )r!   r"   r#   r$   r%   r   r&   r8   r9   r'   r(   r)   r*   r+   factory_kwargsZweight_shapeqweight
bias_float	__class__r   r   _init'   sN    

  z_ConvNd._initc                 C   s   t d S r   r   )r!   rC   rD   r   r   r   rA   R   s    z_ConvNd.set_weight_biasc                 C   s   t d S r   r   r!   r   r   r   r(   U   s    z_ConvNd.biasc                 C   s   t d S r   r   rH   r   r   r   _weight_biasX   s    z_ConvNd._weight_biasc                 C   s   d}| j dt| j  kr |d7 }| jdt| j kr<|d7 }| jdt| j krX|d7 }| jdkrj|d7 }|  d kr~|d	7 }|jf | jS )
Nzq{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}, scale={scale}, zero_point={zero_point})r   z, padding={padding})r   z, dilation={dilation}z!, output_padding={output_padding}r   z, groups={groups}z, bias=False)r   r   r&   r9   r'   r(   r;   __dict__)r!   sr   r   r   
extra_repr[   s    
z_ConvNd.extra_reprc                    sd   t t| ||| |  \}}|||d < |||d < t| j||d < t| j||d < d S )Nweightr(   r.   r/   )r6   r   _save_to_state_dictrI   r<   Ztensorr.   r/   )r!   ZdestinationprefixZ	keep_varswbrE   r   r   rN   v   s    z_ConvNd._save_to_state_dictc                 C   sH   |   \}}| j| j| j| j| j| j| j| j| j	| j
||| j| j| jfS r   )rI   r"   r#   r$   r%   r   r&   r8   r9   r'   r)   r.   r/   trainingr!   rP   rQ   r   r   r   __getstate__~   s"    z_ConvNd.__getstate__c              	      s   |  ||d  ||d   ||d  ||d  t||d  | _||d  t||d  | _||d  tt| |||d||| d S )NrM   r(   r.   r/   F)	rA   popr@   r.   intr/   r6   r   _load_from_state_dict)r!   Z
state_dictrO   Zlocal_metadatastrictZmissing_keysZunexpected_keysZ
error_msgsrE   r   r   rW      s&    
 

     z_ConvNd._load_from_state_dictc                 C   s   |d | _ |d | _|d | _|d | _|d | _|d | _|d | _|d | _|d	 | _|d
 | _	| 
|d |d  |d | _|d | _|d | _d S )Nr   r   r                     	   
               )r"   r#   r$   r%   r   r&   r8   r9   r'   r)   rA   r.   r/   rR   )r!   stater   r   r   __setstate__   s    











z_ConvNd.__setstate__c                 C   s6   t | t | }tjj| |  }|| |S r   )type__new__r<   nnModuler,   rT   rf   )r!   memoZnew_instancere   r   r   r   __deepcopy__   s
    
z_ConvNd.__deepcopy__c                 C   s
   |  i S r   )rl   rH   r   r   r   __copy__   s    z_ConvNd.__copy__c              
   C   s   |dkr|j  }||j |jtjks0tdt|j |}| |j|j	|j
|j|j|j|j|jdk	|j	}|||j |dks|jtjkr|S | \}}t||_t||_|S dS )z/Creates a qconv object and returns it.
        N*Weight observer must have a dtype of qint8)qconfigrM   r+   r<   r>   AssertionErrorr
   r@   r"   r#   r$   r%   r   r&   r'   r(   r)   rA   calculate_qparamsr.   rV   r/   )clsmodactivation_post_processweight_post_processrC   qconv	act_scaleact_zpr   r   r   	get_qconv   s,    

    

z_ConvNd.get_qconvc                 C   s   t |drlt|| jkrLt|j|j|jj|jj|jj	|jj|jj\|_|_t |ds^t
d|j}|j}n~t|| jkst
d| j d | jj d tt| t |dst
dt |dsd n|j}t|| jkr|d	 }|j }| |||S )
Nweight_fake_quantrt   z,Input QAT module must have observer attached nnq..from_float only works for z	 but got:ro   -Input float module must have qconfig defined.r   )hasattrrg   _NNIQAT_CONV_BN_MODULEr   rM   r(   ZbnZrunning_meanZrunning_varZepsrp   rz   rt   _FLOAT_MODULE__name__str_NNI_CONV_RELU_MODULEro   ry   )rr   rs   ru   rt   r   r   r   
from_float   sL    
     
 
z_ConvNd.from_floatc                 C   sj   | |j |j|j|j|j|j|j|jdk	|j|j	j
|j	jd}| }|||j t||_t||_|S a  Create a (fbgemm/qnnpack) quantized module from a reference quantized module
        Args:
            ref_module (Module): a reference quantized  module, either produced by torch.ao.quantization
                          utilities or provided by the user
            output_scale (float): scale for output Tensor
            output_zero_point (int): zero point for output Tensor
        Nr-   )r"   r#   r$   r%   r   r&   r'   r(   r)   rM   r*   r+   get_quantized_weightrA   r@   r.   rV   r/   )rr   Z	ref_qconvoutput_scaleoutput_zero_pointrv   rC   r   r   r   from_reference   s$    	

z_ConvNd.from_reference)r   r   r   r   Tr   NN)r   NN)N)r   
__module____qualname__r,   rG   rA   r(   rI   rL   rN   r<   jitexportrT   rW   rf   rl   rm   classmethodry   staticmethodr   r   __classcell__r   r   rE   r   r       sB                

   +


r   c                       s   e Zd ZdZejZejZ	e
jZdeeeeeeeeed	 fdd	Zd
d Zejeej ddddZdd Zdd Zdd Zdd Zedd Z  ZS )Conv1da  Applies a 1D convolution over a quantized input signal composed of
    several quantized input planes.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv1d`.

    .. note::
        Only `zeros` is supported for the :attr:`padding_mode` argument.

    .. note::
        Only `torch.quint8` is supported for the input data type.


    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv1d` for other attributes.

    Examples::

        >>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
        >>> input = torch.randn(20, 16, 100)
        >>> # quantize input to quint8
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
                                                dtype=torch.quint8)
        >>> output = m(q_input)

    r   r   Tr   N)	r"   r#   r$   r%   r   r&   r'   r(   r)   c                    sh   |
|d}t |}t |}t|tr(|nt |}t |}tt| j||||||dt d|||	f| d S Nr-   Fr   )r   
isinstancer   r6   r   rG   r!   r"   r#   r$   r%   r   r&   r'   r(   r)   r*   r+   rB   rE   r   r   r,   0  s(    

         zConv1d.__init__c                 C   s   dS )NZQuantizedConv1dr   rH   r   r   r   	_get_nameH  s    zConv1d._get_namerP   rQ   r   c                 C   sV   | j dkr.tjj||| j| j| j| j| _	n$tjj||| jt
d| j| j| _	d S Nr   r   )r)   r<   r   	quantizedZconv1d_prepackr%   r   r&   r'   _packed_paramsr   rS   r   r   r   rA   K  s"    
         zConv1d.set_weight_biasc                 C   s   t jj| j\}}||fS r   )r<   r   r   Zconv1d_unpackr   rS   r   r   r   rI   T  s    zConv1d._weight_biasc                 C   s   |   d S Nr   rI   rH   r   r   r   rM   X  s    zConv1d.weightc                 C   s   |   d S Nr   r   rH   r   r   r   r(   [  s    zConv1d.biasc                 C   s\   t |jdkrtd| jdkrDt| jd d }tj||| jd}tj	
|| j| j| jS )NrY    Input shape must be `(N, C, L)`!r   r   mode)r   shaper7   r)   r   r   Fpadr   r   Zconv1dr   r.   r/   r!   inputr   r   r   r   forward^  s    
zConv1d.forwardc                 C   s   t | |S zCreates a quantized module from a float module or qparams_dict.

        Args:
            mod (Module): a float module, either produced by torch.ao.quantization
              utilities or provided by the user
        r   r   rr   rs   r   r   r   r   j  s    zConv1d.from_float)r   r   r   r   Tr   NN)r   r   r   __doc__ri   r   r   nniqatZConvBn1dr   nniZ
ConvReLU1dr   rV   r   boolr   r,   r   r<   Tensorr   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r     s>            	r   c                       s   e Zd ZdZejZejZ	e
jZd fdd	Zd	d
 Zejeej ddddZdd Zdd Zdd Zdd Zedd Z  ZS )Conv2daZ  Applies a 2D convolution over a quantized input signal composed of
    several quantized input planes.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv2d`.

    .. note::
        Only `zeros` is supported for the :attr:`padding_mode` argument.

    .. note::
        Only `torch.quint8` is supported for the input data type.


    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv2d` for other attributes.

    Examples::

        >>> # With square kernels and equal stride
        >>> m = nn.quantized.Conv2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> # non-square kernels and unequal stride and with padding and dilation
        >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
        >>> input = torch.randn(20, 16, 50, 100)
        >>> # quantize input to quint8
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> output = m(q_input)

    r   r   Tr   Nc                    sZ   |
|d}t |}t |}t |}t |}tt| j||||||dt d|||	f| d S r   )r   r6   r   rG   r   rE   r   r   r,     s(    

         zConv2d.__init__c                 C   s   dS )NZQuantizedConv2dr   rH   r   r   r   r     s    zConv2d._get_namer   c                 C   sV   | j dkr.tjj||| j| j| j| j| _	n$tjj||| jt
d| j| j| _	d S r   )r)   r<   r   r   Zconv2d_prepackr%   r   r&   r'   r   r   rS   r   r   r   rA     s"    
          zConv2d.set_weight_biasc                 C   s
   | j  S r   r   unpackrH   r   r   r   rI     s    zConv2d._weight_biasc                 C   s   |   d S r   r   rH   r   r   r   rM     s    zConv2d.weightc                 C   s   |   d S r   r   rH   r   r   r   r(     s    zConv2d.biasc                 C   sT   t |jdkrtd| jdkr<t| j}tj||| jd}tj	
|| j| j| jS )NrZ   #Input shape must be `(N, C, H, W)`!r   r   )r   r   r7   r)   r   r   r   r   r   r   Zconv2dr   r.   r/   r   r   r   r   r     s    

   zConv2d.forwardc                 C   s   t | |S r   r   r   r   r   r   r     s    zConv2d.from_float)r   r   r   r   Tr   NN)r   r   r   r   ri   r   r   r   ZConvBn2dr   r   Z
ConvReLU2dr   r,   r   r<   r   r   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r   u  s*   #             r   c                       s   e Zd ZdZejZejZ	e
jZd fdd	Zd	d
 Zejeej ddddZdd Zdd Zdd Zdd Zedd Z  ZS )Conv3dar  Applies a 3D convolution over a quantized input signal composed of
    several quantized input planes.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv3d`.

    .. note::
        Only `zeros` is supported for the :attr:`padding_mode` argument.

    .. note::
        Only `torch.quint8` is supported for the input data type.


    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv3d` for other attributes.

    Examples::

        >>> # With square kernels and equal stride
        >>> m = nn.quantized.Conv3d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
        >>> # non-square kernels and unequal stride and with padding and dilation
        >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
        >>> input = torch.randn(20, 16, 56, 56, 56)
        >>> # quantize input to quint8
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> output = m(q_input)

    r   r   Tr   Nc                    sj   |	dkst d|
|d}t|}t|}t|}t|}tt| j||||||dtd|||	f| d S )Nr   z*Conv3d does not support reflection paddingr-   Fr   )rp   r	   r6   r   rG   r   rE   r   r   r,     s*    

         zConv3d.__init__c                 C   s   dS )NZQuantizedConv3dr   rH   r   r   r   r     s    zConv3d._get_namer   c                 C   sV   | j dkr.tjj||| j| j| j| j| _	n$tjj||| jt
d| j| j| _	d S r   )r)   r<   r   r   Zconv3d_prepackr%   r   r&   r'   r   r	   rS   r   r   r   rA     s"    
          zConv3d.set_weight_biasc                 C   s
   | j  S r   r   rH   r   r   r   rI     s    zConv3d._weight_biasc                 C   s   |   d S r   r   rH   r   r   r   rM     s    zConv3d.weightc                 C   s   |   d S r   r   rH   r   r   r   r(     s    zConv3d.biasc                 C   sT   t |jdkrtd| jdkr<t| j}tj||| jd}tj	
|| j| j| jS )Nr[   z&Input shape must be `(N, C, D, H, W)`!r   r   )r   r   r7   r)   r   r   r   r   r   r   Zconv3dr   r.   r/   r   r   r   r   r   !  s    

   zConv3d.forwardc                 C   s   t | |S r   r   r   r   r   r   r   -  s    zConv3d.from_float)r   r   r   r   Tr   NN)r   r   r   r   ri   r   r   r   ZConvBn3dr   r   Z
ConvReLU3dr   r,   r   r<   r   r   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r     s*   #             r   MOD)boundc                       s^   e Zd ZeZd fdd	Zee ee ee ee dddZe	dd Z
ed	d
 Z  ZS )_ConvTransposeNdNc                    sP   |dkrt d| jj||d}tt| j|||||||||	|
|f| d S )Nr   z-Only "zeros" padding mode is supported for {}r-   )r7   r;   rF   r   r6   r   rG   )r!   r"   r#   r$   r%   r   r&   r8   r9   r'   r(   r)   r*   r+   rB   rE   r   r   r,   >  s$    

        z_ConvTransposeNd.__init__)r$   r&   r   r   c                 C   sN   t jtt g }tt|D ]*}|| || d  ||  }|| q|S r   )r<   r   Zannotater   rV   r   r   r   )r!   r$   r&   r   resZkdxr   r   r   r   _input_paddingK  s
    z_ConvTransposeNd._input_paddingc                 C   s   d| j  d | jj  }t|| jks,t|t|ds>td|j }||j |jtj	ksftdt
|j |}| |j|j|j|j|j|j|j|jdk	|j|j
}|||j t|dr|jjtjkr|S |j \}}t||_t||_|S dS )zCreates a quantized module from a float module or qparams_dict.
        Args:
            mod (Module): a float module, either produced by torch.ao.quantization
              utilities or provided by the user
        r{   r|   ro   r}   rn   Nrt   )r   r   rg   rp   r~   ro   rM   r+   r<   r>   r
   r@   r"   r#   r$   r%   r   r9   r'   r(   r&   r)   rA   rt   rq   r.   rV   r/   )rr   rs   msgru   rC   rv   rw   rx   r   r   r   r   R  s:    

     

z_ConvTransposeNd.from_floatc                 C   sn   | |j |j|j|j|j|j|j|jdk	|j|j	|j
j|j
jd}| }|||j t||_t||_|S r   )r"   r#   r$   r%   r   r9   r'   r(   r&   r)   rM   r*   r+   r   rA   r@   r.   rV   r/   )rr   
ref_qconvtr   r   rv   rC   r   r   r   r   q  s&    	

z_ConvTransposeNd.from_reference)NN)r   r   r   r   r   r,   r   rV   r   r   r   r   r   r   r   r   rE   r   r   :  s      $
r   c                	       sv   e Zd ZdZejZd fdd	Zd	d
 Ze	j
ee	j
 ddddZdd Zdd Zdd Zdd Zedd Z  ZS )ConvTranspose1da  Applies a 1D transposed convolution operator over an input image
    composed of several input planes.
    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose1d`.

    .. note:: Currently only the QNNPACK engine is implemented.
        Please, set the `torch.backends.quantized.engine = 'qnnpack'`

    For special notes, please, see :class:`~torch.nn.quantized.Conv1d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose2d` for other attributes.

    Examples::

        >>> torch.backends.quantized.engine = 'qnnpack'
        >>> # With square kernels and equal stride
        >>> m = nnq.ConvTranspose1d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nnq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> input = torch.randn(20, 16, 50)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> output = m(q_input)
        >>> # exact output size can be also specified as an argument
        >>> input = torch.randn(1, 16, 12)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> downsample = nnq.Conv1d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nnq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(q_input)
        >>> h.size()
        torch.Size([1, 16, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12])
    r   r   Tr   Nc                    s^   ||d}t |}t |}t |}t |	}	t |}tt| j||||||	d||||
f| d S Nr-   T)r   r6   r   r,   r!   r"   r#   r$   r%   r   r9   r'   r(   r&   r)   r*   r+   rB   rE   r   r   r,     s*    

         zConvTranspose1d.__init__c                 C   s   dS )NZQuantizedConvTranpose1dr   rH   r   r   r   r     s    zConvTranspose1d._get_namer   c              	   C   s*   t jj||| j| j| j| j| j| _	d S r   )
r<   r   r   Zconv_transpose1d_prepackr%   r   r9   r&   r'   r   rS   r   r   r   rA     s         zConvTranspose1d.set_weight_biasc                 C   s   t jj| j\}}||fS r   )r<   r   r   Zconv_transpose1d_unpackr   rS   r   r   r   rI     s    zConvTranspose1d._weight_biasc                 C   s   |   \}}|S r   r   r!   rP   r   r   r   r   rM     s    zConvTranspose1d.weightc                 C   s   |   \}}|S r   r   r!   r   rQ   r   r   r   r(     s    zConvTranspose1d.biasc                 C   s0   t |jdkrtdtjj|| j| j| j	S )NrY   r   )
r   r   r7   r<   r   r   Zconv_transpose1dr   r.   r/   r!   r   r   r   r   r     s       zConvTranspose1d.forwardc                 C   s   t | |||S r   r   r   rr   r   r   r   r   r   r   r     s    zConvTranspose1d.from_reference)	r   r   r   r   Tr   r   NN)r   r   r   r   ri   r   r   r,   r   r<   r   r   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r     s(   (               r   c                	       sv   e Zd ZdZejZd fdd	Zd	d
 Ze	j
ee	j
 ddddZdd Zdd Zdd Zdd Zedd Z  ZS )ConvTranspose2da  Applies a 2D transposed convolution operator over an input image
    composed of several input planes.
    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose2d`.

    For special notes, please, see :class:`~torch.nn.quantized.Conv2d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose2d` for other attributes.

    Examples::

        >>> # QNNPACK or FBGEMM as backend
        >>> torch.backends.quantized.engine = 'qnnpack'
        >>> # With square kernels and equal stride
        >>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> input = torch.randn(20, 16, 50, 100)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> output = m(q_input)
        >>> # exact output size can be also specified as an argument
        >>> input = torch.randn(1, 16, 12, 12)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(q_input)
        >>> h.size()
        torch.Size([1, 16, 6, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12, 12])
    r   r   Tr   Nc                    s^   ||d}t |}t |}t |}t |	}	t |}tt| j||||||	d||||
f| d S r   )r   r6   r   r,   r   rE   r   r   r,     s*    

         zConvTranspose2d.__init__c                 C   s   dS )NZQuantizedConvTranpose2dr   rH   r   r   r   r     s    zConvTranspose2d._get_namer   c              	   C   s*   t jj||| j| j| j| j| j| _	d S r   )
r<   r   r   Zconv_transpose2d_prepackr%   r   r9   r&   r'   r   rS   r   r   r   rA   !  s         zConvTranspose2d.set_weight_biasc                 C   s   t jj| j\}}||fS r   )r<   r   r   Zconv2d_unpackr   rS   r   r   r   rI   &  s    zConvTranspose2d._weight_biasc                 C   s   |   \}}|S r   r   r   r   r   r   rM   *  s    zConvTranspose2d.weightc                 C   s   |   \}}|S r   r   r   r   r   r   r(   .  s    zConvTranspose2d.biasc                 C   s.   t |jdkrtdtj|| j| j| jS )NrZ   r   )	r   r   r7   r   r   Zconv_transpose2dr   r.   r/   r   r   r   r   r   2  s       zConvTranspose2d.forwardc                 C   s   t | |||S r   r   r   r   r   r   r   :  s    zConvTranspose2d.from_reference)	r   r   r   r   Tr   r   NN)r   r   r   r   ri   r   r   r,   r   r<   r   r   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r     s(   &               r   c                	       sv   e Zd ZdZejZd fdd	Zd	d
 Ze	j
ee	j
 ddddZdd Zdd Zdd Zdd Zedd Z  ZS )ConvTranspose3da8  Applies a 3D transposed convolution operator over an input image
    composed of several input planes.
    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose3d`.

    .. note:: Currently only the FBGEMM engine is implemented.
        Please, set the `torch.backends.quantized.engine = 'fbgemm'`

    For special notes, please, see :class:`~torch.nn.quantized.Conv3d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose3d` for other attributes.

    Examples::

        >>> torch.backends.quantized.engine = 'fbgemm'
        >>> # With cubic kernels and equal stride
        >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
        >>> # non-cubic kernels and unequal stride and with padding
        >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
        >>> input = torch.randn(20, 16, 50, 100, 100)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> output = m(q_input)
        >>> # exact output size can be also specified as an argument
        >>> input = torch.randn(1, 16, 12, 12, 12)
        >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
        >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(q_input)
        >>> h.size()
        torch.Size([1, 16, 6, 6, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12, 12, 12])
    r   r   Tr   Nc                    s^   ||d}t |}t |}t |}t |	}	t |}tt| j||||||	d||||
f| d S r   )r	   r6   r   r,   r   rE   r   r   r,   i  s*    

         zConvTranspose3d.__init__c                 C   s   dS )NZQuantizedConvTranpose3dr   rH   r   r   r   r   w  s    zConvTranspose3d._get_namer   c              	   C   s*   t jj||| j| j| j| j| j| _	d S r   )
r<   r   r   Zconv_transpose3d_prepackr%   r   r9   r&   r'   r   rS   r   r   r   rA   z  s         zConvTranspose3d.set_weight_biasc                 C   s   t jj| j\}}||fS r   )r<   r   r   Zconv3d_unpackr   rS   r   r   r   rI     s    zConvTranspose3d._weight_biasc                 C   s   |   \}}|S r   r   r   r   r   r   rM     s    zConvTranspose3d.weightc                 C   s   |   \}}|S r   r   r   r   r   r   r(     s    zConvTranspose3d.biasc                 C   s.   t |jdkrtdtj|| j| j| jS )Nr[   z&Input shape must be `(N, C, T, H, W)`!)	r   r   r7   r   r   Zconv_transpose3dr   r.   r/   r   r   r   r   r     s       zConvTranspose3d.forwardc                 C   s   t | |||S r   r   r   r   r   r   r     s    zConvTranspose3d.from_reference)	r   r   r   r   Tr   r   NN)r   r   r   r   ri   r   r   r,   r   r<   r   r   rA   rI   rM   r(   r   r   r   r   r   r   rE   r   r   >  s(   (               r   ),r   typingr   r   r   r<   Ztorch.nnri   Ztorch.nn.functionalZ
functionalr   Ztorch.nn.intrinsicZ	intrinsicr   Ztorch.nn.intrinsic.qatZqatr   Z
torch._opsr   Ztorch.nn.common_typesr   Ztorch.nn.modules.utilsr   r   r	   Z torch.nn.quantized.modules.utilsr
   r   Ztorch.nn.utilsr   r:   rV   r   r   r   r   r   modulesconvr   r   r   r   r   r   r   r   r   <module>   s2    ljabSZW