U
    (dl<                  	   @   s$  d dl mZ d dlmZmZmZmZ d dlZd dlm	Z	 d dlm
Z
 ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZ dddddddddg	Ze
ee
dddZG dd de	jZG dd de	jZee eeeedddZdedd ZG d!d deZ G d"d deZ!G d#d deZ"G d$d deZ#ed%e j$fd&dd'd(ee  eeed)d*dZ%ed%e!j$fd&dd'd(ee! eeed)d+dZ&ed%e"j$fd&dd'd(ee" eeed)d,dZ'ed%e#j$fd&dd'd(ee# eeed)d-dZ(dd.lm)Z) e)e j$j*e!j$j*ddd/Z+dS )0    )partial)CallableAnyListOptionalN)Tensor   )ImageClassification)_log_api_usage_once   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_paramShuffleNetV2ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0)xgroupsreturnc                 C   sP   |   \}}}}|| }| |||||} t| dd } | |d||} | S )Nr   r   )sizeviewtorchZ	transpose
contiguous)r   r   Z	batchsizeZnum_channelsheightwidthZchannels_per_group r$   C/tmp/pip-unpacked-wheel-vx7f76es/torchvision/models/shufflenetv2.pychannel_shuffle   s    r&   c                
       sZ   e Zd Zeeedd fddZedeeeeeeejdd	d
Z	e
e
dddZ  ZS )InvertedResidualN)inpoupstrider   c                    sN  t    d|  krdks(n td|| _|d }| jdkrh||d> krhtd| d| d| d| jdkrt| j||d| jdd	t|tj||ddd
ddt|tj	dd| _
n
t | _
ttj| jdkr|n||ddd
ddt|tj	dd| j||d| jdd	t|tj||ddd
ddt|tj	dd| _d S )Nr      zillegal stride valuer   zInvalid combination of stride z, inp z	 and oup zB values. If stride == 1 then inp should be equal to oup // 2 << 1.kernel_sizer*   paddingr   F)r-   r*   r.   biasTZinplace)super__init__
ValueErrorr*   nn
Sequentialdepthwise_convBatchNorm2dConv2dReLUbranch1branch2)selfr(   r)   r*   Zbranch_features	__class__r$   r%   r2   ,   sF    





zInvertedResidual.__init__r   r   F)ior-   r*   r.   r/   r   c              	   C   s   t j| |||||| dS )N)r/   r   )r4   r8   )r?   r@   r-   r*   r.   r/   r$   r$   r%   r6   V   s    zInvertedResidual.depthwise_convr   r   c                 C   sb   | j dkr6|jddd\}}tj|| |fdd}ntj| || |fdd}t|d}|S )Nr   r   )Zdim)r*   chunkr    catr;   r:   r&   )r<   r   x1Zx2outr$   r$   r%   forward\   s    

zInvertedResidual.forward)r   r   F)__name__
__module____qualname__intr2   staticmethodboolr4   r8   r6   r   rF   __classcell__r$   r$   r=   r%   r'   +   s   *          r'   c                       sb   e Zd Zdefee ee eedejf dd fddZ	e
e
ddd	Ze
e
dd
dZ  ZS )r   i  .N)stages_repeatsstages_out_channelsnum_classesinverted_residualr   c              
      sd  t    t|  t|dkr&tdt|dkr:td|| _d}| jd }ttj||ddddd	t	|tj
d
d| _|}tjdddd| _|  |  |  dd dD }t||| jdd  D ]R\}}	}|||dg}
t|	d D ]}|
|||d qt| |tj|
  |}q| jd }ttj||ddddd	t	|tj
d
d| _t||| _d S )Nr+   z2expected stages_repeats as list of 3 positive ints   z7expected stages_out_channels as list of 5 positive intsr   r   r   F)r/   Tr0   r,   c                 S   s   g | ]}d | qS )Zstager$   ).0r?   r$   r$   r%   
<listcomp>   s     z)ShuffleNetV2.__init__.<locals>.<listcomp>)r   r+      r   )r1   r2   r
   lenr3   Z_stage_out_channelsr4   r5   r8   r7   r9   conv1Z	MaxPool2dmaxpoolziprangeappendsetattrconv5ZLinearfc)r<   rN   rO   rP   rQ   Zinput_channelsZoutput_channelsZstage_namesnameZrepeatsseqr?   r=   r$   r%   r2   i   sB    


 

zShuffleNetV2.__init__rA   c                 C   sX   |  |}| |}| |}| |}| |}| |}|ddg}| |}|S )Nr   r+   )rW   rX   Zstage2Zstage3Zstage4r]   Zmeanr^   r<   r   r$   r$   r%   _forward_impl   s    






zShuffleNetV2._forward_implc                 C   s
   |  |S )N)rb   ra   r$   r$   r%   rF      s    zShuffleNetV2.forward)rG   rH   rI   r'   r   rJ   r   r4   Moduler2   r   rb   rF   rM   r$   r$   r=   r%   r   h   s   0)weightsprogressargskwargsr   c                 O   sF   | d k	rt |dt| jd  t||}| d k	rB|| j|d |S )NrP   
categories)re   )r   rV   metar   Zload_state_dictZget_state_dict)rd   re   rf   rg   modelr$   r$   r%   _shufflenetv2   s    
rk   )r   r   z2https://github.com/ericsun99/Shufflenet-v2-Pytorch)Zmin_sizerh   recipec                	   @   s<   e Zd Zedeeddedddddid	d
dZeZdS )r   zDhttps://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth   	crop_sizei ImageNet-1Kg-FN@g9voT@zacc@1zacc@5VThese weights were trained from scratch to reproduce closely the results of the paper.
num_params_metrics_docsurlZ
transformsri   N	rG   rH   rI   r   r   r	   _COMMON_METAIMAGENET1K_V1DEFAULTr$   r$   r$   r%   r      s   
c                	   @   s<   e Zd Zedeeddedddddid	d
dZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pthrm   rn   i" rp   gI+WQ@gNbX9V@rq   rr   rs   rw   Nry   r$   r$   r$   r%   r      s   
c                
   @   s@   e Zd Zedeedddeddddd	d
idddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pthrm      ro   Zresize_size+https://github.com/pytorch/vision/pull/5906iv5 rp   g9v?R@g/$V@rq   
                These weights were trained from scratch by using TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            rl   rt   ru   rv   rw   Nry   r$   r$   r$   r%   r      s   c                
   @   s@   e Zd Zedeedddeddddd	d
idddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pthrm   r}   r~   r   ip rp   gQS@gMb@W@rq   r   r   rw   Nry   r$   r$   r$   r%   r      s   Z
pretrained)rd   T)rd   re   )rd   re   rg   r   c              	   K   s,   t | } t| |dddgdddddgf|S )a  
    Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
    rU         0   `         )r   verifyrk   rd   re   rg   r$   r$   r%   r     s    
c              	   K   s,   t | } t| |dddgdddddgf|S )a  
    Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
    rU   r   r   t   r}   i  r   )r   r   rk   r   r$   r$   r%   r   5  s    
c              	   K   s,   t | } t| |dddgdddddgf|S )a  
    Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
    rU   r   r      i`  i  r   )r   r   rk   r   r$   r$   r%   r   S  s    
c              	   K   s,   t | } t| |dddgdddddgf|S )a  
    Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
    rU   r   r      i  i  i   )r   r   rk   r   r$   r$   r%   r   q  s    
)
_ModelURLs)zshufflenetv2_x0.5zshufflenetv2_x1.0zshufflenetv2_x1.5zshufflenetv2_x2.0),	functoolsr   typingr   r   r   r   r    Ztorch.nnr4   r   Ztransforms._presetsr	   utilsr
   Z_apir   r   Z_metar   _utilsr   r   __all__rJ   r&   rc   r'   r   rL   rk   rz   r   r   r   r   r{   r   r   r   r   r   rx   Z
model_urlsr$   r$   r$   r%   <module>   s   =B            