o
    ,hY*                     @   s  d dl Z d dlmZ d dlmZmZmZ d dlZd dlm	Z	 d dl
m	  mZ d dlmZ d dlmZ d dlmZmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZ ddlm Z m!Z!m"Z" g dZ#G dd dej$Z%G dd dej&Z'G dd dej(Z)G dd dej*Z+G dd dej,Z-G dd dej.Z/G dd dej0Z1G dd dej2Z3G d d! d!eZ4ed"d#ed$d%d& fd'dd(d)d*d+eee4ef  d,e5d-e5d.ed/e3f
d0d1Z6dS )2    N)partial)AnyOptionalUnion)Tensor)	inception)Inception_V3_WeightsInceptionOutputs   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)QuantizableInception3Inception_V3_QuantizedWeightsinception_v3c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )QuantizableBasicConv2dargskwargsreturnNc                    s    t  j|i | t | _d S N)super__init__nnReLUreluselfr   r   	__class__ d/var/www/html/scripts/venv/lib/python3.10/site-packages/torchvision/models/quantization/inception.pyr       s   zQuantizableBasicConv2d.__init__xc                 C   s"   |  |}| |}| |}|S r   convbnr#   )r%   r*   r(   r(   r)   forward   s   


zQuantizableBasicConv2d.forwardis_qatc                 C   s   t | g d|dd d S )Nr+   T)inplace)r   )r%   r/   r(   r(   r)   
fuse_model%   s   z!QuantizableBasicConv2d.fuse_modelr   )__name__
__module____qualname__r   r    r   r.   r   boolr1   __classcell__r(   r(   r&   r)   r      s     r   c                       <   e Zd Zdededdf fddZdedefdd	Z  ZS )
QuantizableInceptionAr   r   r   Nc                    &   t  j|dti| tj | _d S N
conv_blockr   r    r   r!   	quantizedFloatFunctionalmyopr$   r&   r(   r)   r    +      zQuantizableInceptionA.__init__r*   c                 C      |  |}| j|dS Nr   _forwardr?   catr%   r*   outputsr(   r(   r)   r.   /      
zQuantizableInceptionA.forwardr2   r3   r4   r   r    r   r.   r6   r(   r(   r&   r)   r8   )       r8   c                       r7   )
QuantizableInceptionBr   r   r   Nc                    r9   r:   r<   r$   r&   r(   r)   r    6   r@   zQuantizableInceptionB.__init__r*   c                 C   rA   rB   rC   rF   r(   r(   r)   r.   :   rH   zQuantizableInceptionB.forwardrI   r(   r(   r&   r)   rK   4   rJ   rK   c                       r7   )
QuantizableInceptionCr   r   r   Nc                    r9   r:   r<   r$   r&   r(   r)   r    A   r@   zQuantizableInceptionC.__init__r*   c                 C   rA   rB   rC   rF   r(   r(   r)   r.   E   rH   zQuantizableInceptionC.forwardrI   r(   r(   r&   r)   rL   ?   rJ   rL   c                       r7   )
QuantizableInceptionDr   r   r   Nc                    r9   r:   r<   r$   r&   r(   r)   r    L   r@   zQuantizableInceptionD.__init__r*   c                 C   rA   rB   rC   rF   r(   r(   r)   r.   P   rH   zQuantizableInceptionD.forwardrI   r(   r(   r&   r)   rM   J   rJ   rM   c                       sR   e Zd Zdededdf fddZdedee fdd	Zdedefd
dZ  Z	S )QuantizableInceptionEr   r   r   Nc                    s>   t  j|dti| tj | _tj | _tj | _d S r:   )	r   r    r   r!   r=   r>   myop1myop2myop3r$   r&   r(   r)   r    W   s   zQuantizableInceptionE.__init__r*   c                 C   s   |  |}| |}| || |g}| j|d}| |}| |}| || 	|g}| j
|d}tj|dddd}| |}||||g}|S )Nr   r
   )kernel_sizestridepadding)	branch1x1branch3x3_1branch3x3_2abranch3x3_2brO   rE   branch3x3dbl_1branch3x3dbl_2branch3x3dbl_3abranch3x3dbl_3brP   F
avg_pool2dbranch_pool)r%   r*   rU   	branch3x3branch3x3dblr_   rG   r(   r(   r)   rD   ]   s   




zQuantizableInceptionE._forwardc                 C   rA   rB   )rD   rQ   rE   rF   r(   r(   r)   r.   r   rH   zQuantizableInceptionE.forward)
r2   r3   r4   r   r    r   listrD   r.   r6   r(   r(   r&   r)   rN   U   s    rN   c                       s*   e Zd Zdededdf fddZ  ZS )QuantizableInceptionAuxr   r   r   Nc                    s   t  j|dti| d S r:   )r   r    r   r$   r&   r(   r)   r    y   s   z QuantizableInceptionAux.__init__)r2   r3   r4   r   r    r6   r(   r(   r&   r)   rc   w   s    "rc   c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee	 ddfddZ
  ZS )r   r   r   r   Nc              
      sD   t  j|dtttttttgi| t	j
j | _t	j
j | _d S )Ninception_blocks)r   r    r   r8   rK   rL   rM   rN   rc   torchaoquantization	QuantStubquantDeQuantStubdequantr$   r&   r(   r)   r    ~   s    zQuantizableInception3.__init__r*   c                 C   sf   |  |}| |}| |\}}| |}| jo| j}tj r-|s(t	
d t||S | ||S )NzIScripted QuantizableInception3 always returns QuantizableInception3 Tuple)_transform_inputri   rD   rk   training
aux_logitsre   jitis_scriptingwarningswarnr	   eager_outputs)r%   r*   auxaux_definedr(   r(   r)   r.      s   





zQuantizableInception3.forwardr/   c                 C   s(   |   D ]}t|tu r|| qdS )a  Fuse conv/bn/relu modules in inception model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r1   )r%   r/   mr(   r(   r)   r1      s
   
z QuantizableInception3.fuse_modelr   )r2   r3   r4   r   r    r   r	   r.   r   r5   r1   r6   r(   r(   r&   r)   r   }   s     r   c                   @   sJ   e Zd Zedeedddddeddejd	d
ddidddd
dZ	e	Z
dS )r   zUhttps://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pthi+  iV  )	crop_sizeresize_sizeir)K   r{   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg%CKS@g-VW@)zacc@1zacc@5g'1@gL7A`%7@z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )

num_paramsmin_size
categoriesbackendrecipeunquantized_metrics_ops
_file_size_docs)url
transformsmetaN)r2   r3   r4   r   r   r   r   r   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr(   r(   r(   r)   r      s*    r   quantized_inception_v3)name
pretrainedc                 C   s   |  ddr	tjS tjS )NquantizeF)getr   r   r   r   )r   r(   r(   r)   <lambda>   s   
r   )weightsTF)r   progressr   r   r   r   r   r   c                 K   s   |rt nt| } |dd}| dur<d|vrt|dd t|dd t|dt| jd  d| jv r<t|d| jd  |dd	}tdi |}t	| |rTt
|| | durv|rb|sbd|_d|_|| j|dd
 |sv|svd|_d|_|S )a  Inception v3 model architecture from
    `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.

    .. note::
        **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
        N x 3 x 299 x 299, so ensure your images are sized accordingly.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
            weights for the model. See
            :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.Inception_V3_Weights
        :members:
        :noindex:
    rn   FNtransform_inputTnum_classesr   r   r|   )r   
check_hashr(   )r   r   verifyr   r   lenr   popr   r   r   rn   	AuxLogitsload_state_dictget_state_dict)r   r   r   r   original_aux_logitsr   modelr(   r(   r)   r      s.   4

r   )7rq   	functoolsr   typingr   r   r   re   torch.nnr!   torch.nn.functional
functionalr]   r   torchvision.modelsr   inception_moduletorchvision.models.inceptionr   r	   transforms._presetsr   _apir   r   r   _metar   _utilsr   r   utilsr   r   r   __all__BasicConv2dr   
InceptionAr8   
InceptionBrK   
InceptionCrL   
InceptionDrM   
InceptionErN   InceptionAuxrc   
Inception3r   r   r5   r   r(   r(   r(   r)   <module>   sX    ",