U
    (d]C                  	   @   sB  d dl Z d dlmZ d dlmZmZmZmZ d dlZd dl	m
Z
 d dlmZ ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZ dddddddddg	ZdZG dd de
jZeeeeeeee
jdddZd6eeeedddZ eee dd d!Z!G d"d dej
jZ"d#ed$d%Z#G d&d deZ$G d'd deZ%G d(d deZ&G d)d deZ'eee e(ee"d*d+d,Z)ed-e$j*fd.dd/d0ee$ e(ee"d1d2dZ+ed-e%j*fd.dd/d0ee% e(ee"d1d3dZ,ed-e&j*fd.dd/d0ee& e(ee"d1d4dZ-ed-e'j*fd.dd/d0ee' e(ee"d1d5dZ.dS )7    N)partial)AnyDictListOptional)Tensor   )ImageClassification)_log_api_usage_once   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_paramMNASNetMNASNet0_5_WeightsMNASNet0_75_WeightsMNASNet1_0_WeightsMNASNet1_3_Weights
mnasnet0_5mnasnet0_75
mnasnet1_0
mnasnet1_3g 0U0*3?c                	       s@   e Zd Zd	eeeeeedd fddZeedddZ  ZS )
_InvertedResidual皙?N)in_chout_chkernel_sizestrideexpansion_factorbn_momentumreturnc                    s   t    |dkr td| |dkr6td| || }||koL|dk| _ttj||dddtj||dtjd	d
tj||||d ||ddtj||dtjd	d
tj||dddtj||d| _	d S )Nr   r   z#stride should be 1 or 2 instead of )      z(kernel_size should be 3 or 5 instead of r   F)biasZmomentumTinplacer   paddingr   groupsr&   )
super__init__
ValueErrorapply_residualnn
SequentialConv2dBatchNorm2dReLUlayers)selfr   r   r   r   r    r!   Zmid_ch	__class__ >/tmp/pip-unpacked-wheel-vx7f76es/torchvision/models/mnasnet.pyr.   #   s"    


z_InvertedResidual.__init__)inputr"   c                 C   s"   | j r| || S | |S d S )N)r0   r6   )r7   r<   r:   r:   r;   forward;   s    z_InvertedResidual.forward)r   )	__name__
__module____qualname__intfloatr.   r   r=   __classcell__r:   r:   r8   r;   r   "   s         r   )r   r   r   r   
exp_factorrepeatsr!   r"   c           
      C   sh   |dk rt d| t| |||||d}g }td|D ]}	|t|||d||d q8tj|f| S )z&Creates a stack of inverted residuals.r   z$repeats should be >= 1, instead got )r!   )r/   r   rangeappendr1   r2   )
r   r   r   r   rD   rE   r!   first	remaining_r:   r:   r;   _stackB   s    rK   ?)valdivisorround_up_biasr"   c                 C   sZ   d|  k rdk s$n t d| t|t| |d  | | }|||  krR|S || S )zAsymmetric rounding to make `val` divisible by `divisor`. With default
    bias, will round up, unless the number is no more than 10% greater than the
    smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88.              ?zIround_up_bias should be greater than 0.0 and smaller than 1.0 instead of r   )r/   maxrA   )rM   rN   rO   new_valr:   r:   r;   _round_to_multiple_ofP   s    rT   )alphar"   c                    s&   ddddddddg} fd	d
|D S )z]Scales tensor depths as in reference MobileNet code, prefers rouding up
    rather than down.          (   P   `      i@  c                    s   g | ]}t |  d qS )   )rT   ).0depthrU   r:   r;   
<listcomp>^   s     z_get_depths.<locals>.<listcomp>r:   )rU   depthsr:   r`   r;   _get_depthsZ   s    rc   c                	       sn   e Zd ZdZdZdeeedd fddZeed	d
dZ	e
ee
eee ee ee dd fddZ  ZS )r   a  MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This
    implements the B1 variant of the model.
    >>> model = MNASNet(1.0, num_classes=1000)
    >>> x = torch.rand(1, 3, 224, 224)
    >>> y = model(x)
    >>> y.dim()
    2
    >>> y.nelement()
    1000
    r     皙?N)rU   num_classesdropoutr"   c                    s~  t    t|  |dkr(td| || _|| _t|}tjd|d dddddtj	|d t
d	tjd
dtj|d |d ddd|d ddtj	|d t
d	tjd
dtj|d |d dddddtj	|d t
d	t|d |d ddddt
t|d |d ddddt
t|d |d ddddt
t|d |d ddddt
t|d |d ddddt
t|d |d ddddt
tj|d ddddddtj	dt
d	tjd
dg}tj| | _ttj|d
dtd|| _|  D ]}t|tjrtjj|jddd |jd k	rvtj|j n\t|tj	rFtj|j tj|j n0t|tjrtjj|jddd tj|j qd S )NrP   z,alpha should be greater than 0.0 instead of r$   r   r   r   Fr+   r   r&   r'   Tr(   r*   r%            i   )pr)   Zfan_outZrelu)modeZnonlinearityZsigmoid)r-   r.   r
   r/   rU   rf   rc   r1   r3   r4   _BN_MOMENTUMr5   rK   r2   r6   ZDropoutZLinear
classifiermodules
isinstanceinitZkaiming_normal_Zweightr&   Zzeros_Zones_Zkaiming_uniform_)r7   rU   rf   rg   rb   r6   mr8   r:   r;   r.   p   sL    

"

 zMNASNet.__init__)xr"   c                 C   s"   |  |}|ddg}| |S )Nr   r$   )r6   Zmeanro   )r7   rt   r:   r:   r;   r=      s    
zMNASNet.forward)
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsr"   c                    s  | dd }|dkr"td| |dkr| jdkst| j}	tjdddddd	d
tjdtdtjddtjddddddd	dtjdtdtjddtjdddddd	d
tjdtdt	d|	d ddddtg	}
t
|
D ]\}}|| j|< qd| _tdt t ||||||| d S )Nversionr#   z+version shluld be set to 1 or 2 instead of r   rQ   r$   rV   r   Frh   r'   Tr(   r*   rW   r   a  A new version of MNASNet model has been implemented. Your checkpoint was saved using the previous version. This checkpoint will load and work as before, but you may want to upgrade by training a newer model or transfer learning from an updated ImageNet checkpoint.)getr/   rU   rc   r1   r3   r4   rn   r5   rK   	enumerater6   _versionwarningswarnUserWarningr-   _load_from_state_dict)r7   ru   rv   rw   rx   ry   rz   r{   r|   rb   Zv1_stemidxZlayerr8   r:   r;   r      s>    



	      zMNASNet._load_from_state_dict)rd   re   )r>   r?   r@   __doc__r   rB   rA   r.   r   r=   r   strboolr   r   rC   r:   r:   r8   r;   r   a   s   .)r   r   z(https://github.com/1e100/mnasnet_trainer)Zmin_size
categoriesrecipec                	   @   s<   e Zd Zedeeddedddddid	d
dZeZdS )r   zIhttps://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth   	crop_sizei! ImageNet-1Kg"P@g(\U@zacc@1zacc@59These weights reproduce closely the results of the paper.
num_params_metrics_docsurlZ
transformsmetaN	r>   r?   r@   r   r   r	   _COMMON_METAIMAGENET1K_V1DEFAULTr:   r:   r:   r;   r      s   
c                
   @   s@   e Zd Zedeedddeddddd	d
idddZeZdS )r   z<https://download.pytorch.org/models/mnasnet0_75-7090bc5f.pthr      r   Zresize_size+https://github.com/pytorch/vision/pull/6019i_0 r   gQQ@g9vV@r   
                These weights were trained from scratch by using TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            r   r   r   r   r   Nr   r:   r:   r:   r;   r      s   c                	   @   s<   e Zd Zedeeddedddddid	d
dZeZdS )r   zIhttps://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pthr   r   iPB r   gw/]R@gq=
ףV@r   r   r   r   Nr   r:   r:   r:   r;   r     s   
c                
   @   s@   e Zd Zedeedddeddddd	d
idddZeZdS )r   z;https://download.pytorch.org/models/mnasnet1_3-a4c69d6f.pthr   r   r   r   i_ r   gMb S@g rhaW@r   r   r   r   Nr   r:   r:   r:   r;   r     s   )rU   weightsprogresskwargsr"   c                 K   sD   |d k	rt |dt|jd  t| f|}|r@||j|d |S )Nrf   r   )r   )r   lenr   r   Zload_state_dictZget_state_dict)rU   r   r   r   modelr:   r:   r;   _mnasnet1  s    r   Z
pretrained)r   T)r   r   )r   r   r   r"   c                 K   s   t | } td| |f|S )a  MNASNet with depth multiplier of 0.5 from
    `MnasNet: Platform-Aware Neural Architecture Search for Mobile
    <https://arxiv.org/pdf/1807.11626.pdf>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MNASNet0_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MNASNet0_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MNASNet0_5_Weights
        :members:
    g      ?)r   verifyr   r   r   r   r:   r:   r;   r   =  s    
c                 K   s   t | } td| |f|S )a  MNASNet with depth multiplier of 0.75 from
    `MnasNet: Platform-Aware Neural Architecture Search for Mobile
    <https://arxiv.org/pdf/1807.11626.pdf>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MNASNet0_75_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MNASNet0_75_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MNASNet0_75_Weights
        :members:
    g      ?)r   r   r   r   r:   r:   r;   r   X  s    
c                 K   s   t | } td| |f|S )a  MNASNet with depth multiplier of 1.0 from
    `MnasNet: Platform-Aware Neural Architecture Search for Mobile
    <https://arxiv.org/pdf/1807.11626.pdf>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MNASNet1_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MNASNet1_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MNASNet1_0_Weights
        :members:
    rQ   )r   r   r   r   r:   r:   r;   r   s  s    
c                 K   s   t | } td| |f|S )a  MNASNet with depth multiplier of 1.3 from
    `MnasNet: Platform-Aware Neural Architecture Search for Mobile
    <https://arxiv.org/pdf/1807.11626.pdf>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MNASNet1_3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MNASNet1_3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MNASNet1_3_Weights
        :members:
    g?)r   r   r   r   r:   r:   r;   r     s    
)rL   )/r   	functoolsr   typingr   r   r   r   ZtorchZtorch.nnr1   r   Ztransforms._presetsr	   utilsr
   Z_apir   r   Z_metar   _utilsr   r   __all__rn   Moduler   rA   rB   r2   rK   rT   rc   r   r   r   r   r   r   r   r   r   r   r   r   r   r:   r:   r:   r;   <module>   sf   !      
v"""