U
    (d+                     @   s  d dl Z d dlmZ d dlmZmZmZmZ d dlZd dl	m
Z
 d dlm
  mZ d dlmZ d dlmZ d dlmZmZ ddlmZ d	d
lmZmZ d	dlmZ d	dlmZmZ ddlm Z m!Z!m"Z" dddgZ#G dd dej$Z%G dd dej&Z'G dd dej(Z)G dd dej*Z+G dd dej,Z-G dd dej.Z/G dd dej0Z1G d d dej2Z3G d!d deZ4ed"d#d$ fd%dd&d'd(eee4ef  e5e5ee3d)d*dZ6d	d+lm7Z7 d	d,lm8Z8 e7d-e4j9j:iZ;dS ).    N)partial)AnyListOptionalUnion)Tensor)	inception)InceptionOutputsInception_V3_Weights   )ImageClassification   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_param   )_fuse_modules_replace_reluquantize_modelQuantizableInception3Inception_V3_QuantizedWeightsinception_v3c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )QuantizableBasicConv2dNargskwargsreturnc                    s   t  j|| t | _d S N)super__init__nnZReLUreluselfr   r   	__class__ M/tmp/pip-unpacked-wheel-vx7f76es/torchvision/models/quantization/inception.pyr!      s    zQuantizableBasicConv2d.__init__xr   c                 C   s"   |  |}| |}| |}|S r   )convbnr#   )r%   r+   r(   r(   r)   forward   s    


zQuantizableBasicConv2d.forwardis_qatr   c                 C   s   t | dddg|dd d S )Nr,   r-   r#   T)Zinplace)r   )r%   r0   r(   r(   r)   
fuse_model%   s    z!QuantizableBasicConv2d.fuse_model)N)__name__
__module____qualname__r   r!   r   r.   r   boolr1   __classcell__r(   r(   r&   r)   r      s   r   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionANr   c                    s&   t  j|dti| tj | _d S NZ
conv_blockr    r!   r   r"   	quantizedFloatFunctionalmyopr$   r&   r(   r)   r!   +   s    zQuantizableInceptionA.__init__r*   c                 C   s   |  |}| j|dS Nr   _forwardr<   catr%   r+   outputsr(   r(   r)   r.   /   s    
zQuantizableInceptionA.forwardr2   r3   r4   r   r!   r   r.   r6   r(   r(   r&   r)   r7   )   s   r7   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionBNr   c                    s&   t  j|dti| tj | _d S r8   r9   r$   r&   r(   r)   r!   6   s    zQuantizableInceptionB.__init__r*   c                 C   s   |  |}| j|dS r=   r>   rA   r(   r(   r)   r.   :   s    
zQuantizableInceptionB.forwardrC   r(   r(   r&   r)   rD   4   s   rD   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionCNr   c                    s&   t  j|dti| tj | _d S r8   r9   r$   r&   r(   r)   r!   A   s    zQuantizableInceptionC.__init__r*   c                 C   s   |  |}| j|dS r=   r>   rA   r(   r(   r)   r.   E   s    
zQuantizableInceptionC.forwardrC   r(   r(   r&   r)   rE   ?   s   rE   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionDNr   c                    s&   t  j|dti| tj | _d S r8   r9   r$   r&   r(   r)   r!   L   s    zQuantizableInceptionD.__init__r*   c                 C   s   |  |}| j|dS r=   r>   rA   r(   r(   r)   r.   P   s    
zQuantizableInceptionD.forwardrC   r(   r(   r&   r)   rF   J   s   rF   c                       sJ   e Zd Zeedd fddZeee dddZeeddd	Z  Z	S )
QuantizableInceptionENr   c                    s>   t  j|dti| tj | _tj | _tj | _d S r8   )	r    r!   r   r"   r:   r;   myop1myop2myop3r$   r&   r(   r)   r!   W   s    zQuantizableInceptionE.__init__r*   c                 C   s   |  |}| |}| || |g}| j|d}| |}| |}| || 	|g}| j
|d}tj|dddd}| |}||||g}|S )Nr   r   )Zkernel_sizeZstridepadding)	branch1x1Zbranch3x3_1Zbranch3x3_2aZbranch3x3_2brH   r@   Zbranch3x3dbl_1Zbranch3x3dbl_2Zbranch3x3dbl_3aZbranch3x3dbl_3brI   FZ
avg_pool2dbranch_pool)r%   r+   rL   Z	branch3x3Zbranch3x3dblrN   rB   r(   r(   r)   r?   ]   s    




zQuantizableInceptionE._forwardc                 C   s   |  |}| j|dS r=   )r?   rJ   r@   rA   r(   r(   r)   r.   r   s    
zQuantizableInceptionE.forward)
r2   r3   r4   r   r!   r   r   r?   r.   r6   r(   r(   r&   r)   rG   U   s   rG   c                       s&   e Zd Zeedd fddZ  ZS )QuantizableInceptionAuxNr   c                    s   t  j|dti| d S r8   )r    r!   r   r$   r&   r(   r)   r!   y   s    z QuantizableInceptionAux.__init__)r2   r3   r4   r   r!   r6   r(   r(   r&   r)   rO   w   s   rO   c                       sP   e Zd Zdeeedd fddZeedd	d
Zde	e ddddZ
  ZS )r     TFN)num_classes
aux_logitstransform_inputr   c                    sB   t  j|||tttttttgd t	j
j | _t	j
j | _d S )N)rQ   rR   rS   Zinception_blocks)r    r!   r   r7   rD   rE   rF   rG   rO   torchZaoZquantizationZ	QuantStubquantZDeQuantStubdequant)r%   rQ   rR   rS   r&   r(   r)   r!   ~   s    zQuantizableInception3.__init__r*   c                 C   sj   |  |}| |}| |\}}| |}| jo6| j}tj rZ|sPt	
d t||S | ||S d S )NzIScripted QuantizableInception3 always returns QuantizableInception3 Tuple)Z_transform_inputrU   r?   rV   ZtrainingrR   rT   ZjitZis_scriptingwarningswarnr	   Zeager_outputs)r%   r+   ZauxZaux_definedr(   r(   r)   r.      s    





zQuantizableInception3.forwardr/   c                 C   s(   |   D ]}t|tkr|| qdS )a  Fuse conv/bn/relu modules in inception model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r1   )r%   r0   mr(   r(   r)   r1      s    z QuantizableInception3.fuse_model)rP   TF)N)r2   r3   r4   intr5   r!   r   r	   r.   r   r1   r6   r(   r(   r&   r)   r   }   s      c                   @   sF   e Zd Zedeedddddeddejd	d
ddidddZ	e	Z
dS )r   zUhttps://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pthi+  iV  )Z	crop_sizeZresize_sizeir)K   r]   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg%CKS@g-VW@)zacc@1zacc@5z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )Z
num_paramsZmin_size
categoriesbackendZrecipeZunquantizedZ_metricsZ_docs)urlZ
transformsmetaN)r2   r3   r4   r   r   r   r   r
   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr(   r(   r(   r)   r      s$   Z
pretrainedc                 C   s   |  ddrtjS tjS )NquantizeF)getr   rd   r
   rc   )r   r(   r(   r)   <lambda>   s    
rh   )weightsTF)ri   progressrf   )ri   rj   rf   r   r   c                 K   s   |rt nt| } |dd}| dk	rxd|kr:t|dd t|dd t|dt| jd  d| jkrxt|d| jd  |dd	}tf |}t	| |rt
|| | dk	r|r|sd|_d|_|| j|d
 |s|sd|_d|_|S )a  Inception v3 model architecture from
    `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.

    .. note::
        **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
        N x 3 x 299 x 299, so ensure your images are sized accordingly.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
            weights for the model. See
            :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.Inception_V3_Weights
        :members:
        :noindex:
    rR   FNrS   TrQ   r_   r`   r^   )rj   )r   r
   verifyrg   r   lenrb   popr   r   r   rR   Z	AuxLogitsZload_state_dictZget_state_dict)ri   rj   rf   r   Zoriginal_aux_logitsr`   modelr(   r(   r)   r      s.    1


)
_ModelURLs)
model_urlsZinception_v3_google_fbgemm)<rW   	functoolsr   typingr   r   r   r   rT   Ztorch.nnr"   Ztorch.nn.functionalZ
functionalrM   r   Ztorchvision.modelsr   Zinception_moduleZtorchvision.models.inceptionr	   r
   Ztransforms._presetsr   Z_apir   r   Z_metar   _utilsr   r   utilsr   r   r   __all__ZBasicConv2dr   Z
InceptionAr7   Z
InceptionBrD   Z
InceptionCrE   Z
InceptionDrF   Z
InceptionErG   ZInceptionAuxrO   Z
Inception3r   r   r5   r   ro   rp   rd   ra   Zquant_model_urlsr(   r(   r(   r)   <module>   s`   "2
G 