o
    ,hN<                     @   s,  d dl mZ d dlmZmZmZ d dlZd dlmZ d dlm	Z	 ddl
mZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ g dZde	dede	fddZG dd dejZG dd dejZdee dedededef
ddZdeddZG dd  d eZ G d!d" d"eZ!G d#d$ d$eZ"G d%d& d&eZ#e ed'e j$fd(dd)d*dee  dededefd+d,Z%e ed'e!j$fd(dd)d*dee! dededefd-d.Z&e ed'e"j$fd(dd)d*dee" dededefd/d0Z'e ed'e#j$fd(dd)d*dee# dededefd1d2Z(dS )3    )partial)AnyCallableOptionalN)Tensor   )ImageClassification)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)	ShuffleNetV2ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0xgroupsreturnc                 C   sP   |   \}}}}|| }| |||||} t| dd } | ||||} | S )Nr
   r   )sizeviewtorch	transpose
contiguous)r   r   	batchsizenum_channelsheightwidthchannels_per_group r'   Z/var/www/html/scripts/venv/lib/python3.10/site-packages/torchvision/models/shufflenetv2.pychannel_shuffle   s   r)   c                       sp   e Zd Zdedededdf fddZe	
ddededededededejfddZ	de
de
fddZ  ZS )InvertedResidualinpoupstrider   Nc                    sT  t    d|  krdkstd td|| _|d }| jdkr7||d> kr7td| d| d| d| jdkrct| j||d| jdd	t|tj||ddd
ddt|tj	dd| _
nt | _
ttj| jdkrs|n||ddd
ddt|tj	dd| j||d| jdd	t|tj||ddd
ddt|tj	dd| _d S )Nr
      zillegal stride valuer   zInvalid combination of stride z, inp z	 and oup zB values. If stride == 1 then inp should be equal to oup // 2 << 1.kernel_sizer-   paddingr   F)r0   r-   r1   biasTinplace)super__init__
ValueErrorr-   nn
Sequentialdepthwise_convBatchNorm2dConv2dReLUbranch1branch2)selfr+   r,   r-   branch_features	__class__r'   r(   r6   ,   sJ   






zInvertedResidual.__init__r
   r   Fior0   r1   r2   c              	   C   s   t j| |||||| dS )N)r2   r   )r8   r<   )rD   rE   r0   r-   r1   r2   r'   r'   r(   r:   V   s   zInvertedResidual.depthwise_convr   c                 C   sb   | j dkr|jddd\}}tj|| |fdd}ntj| || |fdd}t|d}|S )Nr
   r   )dim)r-   chunkr   catr?   r>   r)   )r@   r   x1x2outr'   r'   r(   forward\   s   

zInvertedResidual.forward)r
   r   F)__name__
__module____qualname__intr6   staticmethodboolr8   r<   r:   r   rL   __classcell__r'   r'   rB   r(   r*   +   s(    *r*   c                       sn   e Zd Zdefdee dee dededejf ddf
 fd	d
Z	de
de
fddZde
de
fddZ  ZS )r   i  stages_repeatsstages_out_channelsnum_classesinverted_residual.r   Nc              
      sd  t    t|  t|dkrtdt|dkrtd|| _d}| jd }ttj||ddddd	t	|tj
d
d| _|}tjdddd| _|  |  |  dd dD }t||| jdd  D ])\}}	}|||dg}
t|	d D ]}|
|||d qtt| |tj|
  |}qb| jd }ttj||ddddd	t	|tj
d
d| _t||| _d S )Nr.   z2expected stages_repeats as list of 3 positive ints   z7expected stages_out_channels as list of 5 positive intsr   r   r
   F)r2   Tr3   r/   c                 S   s   g | ]}d | qS )stager'   ).0rD   r'   r'   r(   
<listcomp>   s    z)ShuffleNetV2.__init__.<locals>.<listcomp>)r   r.      )r5   r6   r	   lenr7   _stage_out_channelsr8   r9   r<   r;   r=   conv1	MaxPool2dmaxpoolziprangeappendsetattrconv5Linearfc)r@   rT   rU   rV   rW   input_channelsoutput_channelsstage_namesnamerepeatsseqrD   rB   r'   r(   r6   i   sB   


 

zShuffleNetV2.__init__r   c                 C   sX   |  |}| |}| |}| |}| |}| |}|ddg}| |}|S )Nr   r.   )r`   rb   stage2stage3stage4rg   meanri   r@   r   r'   r'   r(   _forward_impl   s   






zShuffleNetV2._forward_implc                 C   s
   |  |S )N)ru   rt   r'   r'   r(   rL      s   
zShuffleNetV2.forward)rM   rN   rO   r*   listrP   r   r8   Moduler6   r   ru   rL   rS   r'   r'   rB   r(   r   h   s     0r   weightsprogressargskwargsc                 O   sL   | d urt |dt| jd  t|i |}| d ur$|| j|dd |S )NrV   
categoriesT)ry   
check_hash)r   r^   metar   load_state_dictget_state_dict)rx   ry   rz   r{   modelr'   r'   r(   _shufflenetv2   s   r   )r
   r
   z2https://github.com/ericsun99/Shufflenet-v2-Pytorch)min_sizer|   recipec                
   @   D   e Zd Zedeeddi edddddid	d
dddZeZdS )r   zDhttps://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth   	crop_sizei ImageNet-1Kg-FN@g9voT@zacc@1zacc@5g{Gz?gT㥛 @VThese weights were trained from scratch to reproduce closely the results of the paper.
num_params_metrics_ops
_file_size_docsurl
transformsr~   N	rM   rN   rO   r   r   r   _COMMON_METAIMAGENET1K_V1DEFAULTr'   r'   r'   r(   r      &    
r   c                
   @   r   )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pthr   r   i" r   gI+WQ@gNbX9V@r   g(\?gE!@r   r   r   Nr   r'   r'   r'   r(   r      r   r   c                   @   H   e Zd Zedeedddi eddddd	d
idddddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pthr      r   resize_size+https://github.com/pytorch/vision/pull/5906iv5 r   g9v?R@g/$V@r   gl?gw/+@
                These weights were trained from scratch by using TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            r   r   r   r   r   r   r   Nr   r'   r'   r'   r(   r      (    r   c                   @   r   )r   zBhttps://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pthr   r   r   r   ip r   gQS@gMb@W@r   g-?g+n<@r   r   r   Nr   r'   r'   r'   r(   r     r   r   
pretrained)rx   T)rx   ry   c                 K   (   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
    r\      r\   )   0   `         )r   verifyr   rx   ry   r{   r'   r'   r(   r        
r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
    r   )r   t   r   i  r   )r   r   r   r   r'   r'   r(   r   >  r   r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
    r   )r      i`  i  r   )r   r   r   r   r'   r'   r(   r   ]  r   r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
    r   )r      i  i  i   )r   r   r   r   r'   r'   r(   r   |  r   r   ))	functoolsr   typingr   r   r   r   torch.nnr8   r   transforms._presetsr   utilsr	   _apir   r   r   _metar   _utilsr   r   __all__rP   r)   rw   r*   r   rR   r   r   r   r   r   r   r   r   r   r   r   r'   r'   r'   r(   <module>   s    =A
