U
    (d(                     @   sR  d dl Z d dlmZ d dlmZmZmZmZ d dlZd dlm	Z	 d dlm
Z
 ddlmZ ddlmZ dd	lmZ d
dlmZmZ d
dlmZ d
dlmZmZmZ dddgZG dd deZeZeZG dd de
jZG dd de
jZ ddedZ!G dd deZ"ede"j#fddddee" e$ee dddZ%d
d lm&Z& e&de"j#j'iZ(dS )!    N)partial)CallableAnyOptionalList)Tensor)nn   )Conv2dNormActivation)ImageClassification)_log_api_usage_once   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_param_make_divisibleMobileNetV2MobileNet_V2_Weightsmobilenet_v2c                       s   e Zd Z fddZ  ZS )_DeprecatedConvBNActc                    sR   t dt |dd d kr&tj|d< |dd d kr@tj|d< t j|| d S )NzThe ConvBNReLU/ConvBNActivation classes are deprecated since 0.12 and will be removed in 0.14. Use torchvision.ops.misc.Conv2dNormActivation instead.
norm_layeractivation_layer)	warningswarnFutureWarninggetr   BatchNorm2dReLU6super__init__)selfargskwargs	__class__ B/tmp/pip-unpacked-wheel-vx7f76es/torchvision/models/mobilenetv2.pyr!      s    

z_DeprecatedConvBNAct.__init__)__name__
__module____qualname__r!   __classcell__r'   r'   r%   r(   r      s   r   c                	       sL   e Zd Zd	eeeeeedejf  dd fddZe	e	dddZ
  ZS )
InvertedResidualN.)inpoupstrideexpand_ratior   returnc                    s   t    || _|dkr&td| |d kr4tj}tt|| }| jdkoT||k| _g }|dkr~|	t
||d|tjd |t
|||||tjdtj||ddddd||g tj| | _|| _|dk| _d S )	N)r   r	   z"stride should be 1 or 2 insted of r   Zkernel_sizer   r   )r0   groupsr   r   r   F)bias)r    r!   r0   
ValueErrorr   r   introunduse_res_connectappendr
   r   extendConv2d
SequentialconvZout_channelsZ_is_cn)r"   r.   r/   r0   r1   r   Z
hidden_dimZlayersr%   r'   r(   r!   (   s:    
	zInvertedResidual.__init__xr2   c                 C   s"   | j r|| | S | |S d S N)r9   r>   r"   r@   r'   r'   r(   forwardP   s    zInvertedResidual.forward)N)r)   r*   r+   r7   r   r   r   Moduler!   r   rC   r,   r'   r'   r%   r(   r-   '   s        (r-   c                
       sz   e Zd Zdeeeeee   eeedej	f  eedej	f  edd fdd	Z
eed
ddZeed
ddZ  ZS )r           ?N   皙?.)num_classes
width_multinverted_residual_settinground_nearestblockr   dropoutr2   c                    s@  t    t|  |dkrt}|dkr,tj}d}d}	|dkrddddgddddgddd	dgdd
ddgddd	dgddd	dgddddgg}t|dkst|d dkrtd| t|| |}t|	t	d| || _
td	|d|tjdg}
|D ]X\}}}}t|| |}t|D ]4}|dkr"|nd}|
||||||d |}qq|
t|| j
d|tjd tj|
 | _ttj|dt| j
|| _|  D ]}t|tjrtjj|jdd |jdk	r8tj|j n`t|tjtjfr
tj|j tj|j n.t|tjrtj|jdd tj|j qdS )aw  
        MobileNet V2 main class

        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
            Set to 1 to turn off rounding
            block: Module specifying inverted residual building block for mobilenet
            norm_layer: Module specifying the normalization layer to use
            dropout (float): The droupout probability

        N    i   r            r	      @      `      i@  r   zGinverted_residual_setting should be non-empty or a 4-element list, got rF   )r0   r   r   )r1   r   r3   )pZfan_out)modeg{Gz?)r    r!   r   r-   r   r   lenr6   r   maxlast_channelr
   r   ranger:   r=   featuresZDropoutZLinear
classifiermodules
isinstancer<   initZkaiming_normal_Zweightr5   Zzeros_Z	GroupNormZones_Znormal_)r"   rI   rJ   rK   rL   rM   r   rN   Zinput_channelr\   r^   tcnsZoutput_channelir0   mr%   r'   r(   r!   X   sp    








    
zMobileNetV2.__init__r?   c                 C   s2   |  |}tj|d}t|d}| |}|S )Nr   r   r   )r^   r   Z
functionalZadaptive_avg_pool2dtorchflattenr_   rB   r'   r'   r(   _forward_impl   s
    

zMobileNetV2._forward_implc                 C   s
   |  |S rA   )rl   rB   r'   r'   r(   rC      s    zMobileNetV2.forward)rE   rF   NrG   NNrH   )r)   r*   r+   r7   floatr   r   r   r   rD   r!   r   rl   rC   r,   r'   r'   r%   r(   r   W   s&          _
iz5 ri   )Z
num_paramsZmin_size
categoriesc                	   @   sj   e Zd Zedeeddedddddid	d
dZedeedddedddddidd
dZeZ	dS )r   z=https://download.pytorch.org/models/mobilenet_v2-b0353104.pth   )	crop_sizezQhttps://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2zImageNet-1Kgx&1Q@gMV@)zacc@1zacc@5zXThese weights reproduce closely the results of the paper using a simple training recipe.)ZrecipeZ_metricsZ_docs)urlZ
transformsmetaz=https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth   )rp   Zresize_sizezHhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuningg`"	R@gS㥛V@a$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            N)
r)   r*   r+   r   r   r   _COMMON_METAIMAGENET1K_V1ZIMAGENET1K_V2DEFAULTr'   r'   r'   r(   r      s6   
Z
pretrained)weightsT)rw   progress)rw   rx   r$   r2   c                 K   sP   t | } | dk	r(t|dt| jd  tf |}| dk	rL|| j|d |S )a  MobileNetV2 architecture from the `MobileNetV2: Inverted Residuals and Linear
    Bottlenecks <https://arxiv.org/abs/1801.04381>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MobileNet_V2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mobilenetv2.MobileNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MobileNet_V2_Weights
        :members:
    NrI   rn   )rx   )r   verifyr   rZ   rr   r   Zload_state_dictZget_state_dict)rw   rx   r$   modelr'   r'   r(   r      s    

)
_ModelURLs))r   	functoolsr   typingr   r   r   r   rj   r   r   Zops.miscr
   Ztransforms._presetsr   utilsr   Z_apir   r   Z_metar   _utilsr   r   r   __all__r   Z
ConvBNReLUZConvBNActivationrD   r-   r   rt   r   ru   boolr   r{   rq   Z
model_urlsr'   r'   r'   r(   <module>   sJ   
0o&   $ 