o
    ,h$                     @   s@  d dl mZ d dlZd dlZd dlmZ d dlmZmZm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZ d d	lmZmZmZmZ d d
lmZ d dlmZ G dd deZe Zeedd Zeedd Z ej!dd Z"eej#eedd eej$dd Z%dd Z&edd Z'dd Z(dd Z)d d! Z*dS )"    )contextmanagerN)DispatchKey)_ConstantFunction
flat_applyto_graphablestrict_mode)autograd_not_implemented)HigherOrderOperator)FakeTensorMode)get_proxy_slotPreDispatchTorchFunctionModeProxyTorchDispatchModetrack_tensor_tree)_pytree)"is_traceable_wrapper_subclass_typec                       s(   e Zd Z fddZ fddZ  ZS )ExportTracepointc                    s   t  d d S )N_export_tracepoint)super__init__)self	__class__ Q/var/www/html/scripts/venv/lib/python3.10/site-packages/torch/_export/wrappers.pyr      s   zExportTracepoint.__init__c                    s   t  j|i |S N)r   __call__)r   argskwargsr   r   r   r         zExportTracepoint.__call__)__name__
__module____qualname__r   r   __classcell__r   r   r   r   r      s    r   c                 O   s<   t | jj||f\}}| jdt||}t||d | jdS )Ncall_functionconstanttracer)pytreetree_mapr'   unwrap_proxycreate_proxyr   r   )moder   r   p_argsp_kwargsproxyr   r   r   export_tracepoint_dispatch_mode%   s
   r0   c                 O   s,   | 
 |W  d    S 1 sw   Y  d S r   r   )r,   r   r   r   r   r   "export_tracepoint_fake_tensor_mode.   s   $r1   c                 O   sR   |  |}|  |}|   t|i | |W  d    S 1 s"w   Y  d S r   )unwrap_tensorsredispatch_to_nextr   )ctxr   r   unwrapped_argsunwrapped_kwargsr   r   r   export_tracepoint_functional4   s   


$r7   T)deferred_errorc                  O   s   | S r   r   )r   r   r   r   r   export_tracepoint_cpuC   s   r9   c                    s   t | tjjs	J dksJ tjj| }fdddd   fdd} fdd	}|j|d
d}|j|d
d}||fS )N c                    sB   |  v r |  d |ksJ  |  d |ksJ ||d | < d S )Nin_specout_spec)r;   r<   r   )pathr;   r<   )module_call_specsr   r   update_module_call_signaturesM   s   z6_wrap_submodule.<locals>.update_module_call_signaturesc                 S   s:   | D ]}t |tjttttfs|d u std| qd S )NzGOnly Tensors or scalars are supported as pytree flattened inputs, got: )
isinstancetorchTensorstrintfloatboolAssertionError)	flat_argsar   r   r   check_flattenedS   s   z(_wrap_submodule.<locals>.check_flattenedc                    sB   t ||f\}} | t|dd}t ||\}}||fS )Nmodule_call_inputskindr=   r(   tree_flattenr   tree_unflatten)moduler   r   rH   r;   )rJ   r=   r   r   pre_hookZ   s
   z!_wrap_submodule.<locals>.pre_hookc                    sP   t ||f\}}t |\}} | t|dd}|| t ||S )Nmodule_call_outputsrL   rN   )rQ   r   r   res_r;   flat_resr<   )rJ   r=   r?   r   r   	post_hooka   s   z"_wrap_submodule.<locals>.post_hookT)with_kwargs)	r@   rA   nnModulefxgraph_module	_get_attrregister_forward_pre_hookregister_forward_hook)modr=   r>   	submodulerR   rW   
pre_handlepost_handler   )rJ   r>   r=   r?   r   _wrap_submoduleH   s   rd   c              	   c   sV    g }z|D ]}| t| || qd V  W |D ]}|  qd S |D ]}|  q#w r   )extendrd   remove)fpreserve_signaturemodule_call_signatureshandlesr=   handler   r   r   _wrap_submodulesn   s   

rl   c                 C   s   dd }|| _ | S )Nc                 W   s
   t | |S r   r   )r   r   r   r   r   call|   s   
z'_mark_strict_experimental.<locals>.call)r   )clsrm   r   r   r   _mark_strict_experimental{   s   ro   c                 C   s`   |d }t | j|rt| j||ksJ | d|di S | |}t| j|| | d|di S )a  
    This is a wrapper utility method on top of tracer to cache the
    already registered subclass spec attribute. This is useful because
    Subclass.__init__ will be same for each subclass. By default, fx will
    create multiple attributes/proxies for given attribute.
    0get_attrr   )hasattrrootgetattrr+   get_fresh_qualnamesetattr)r'   namespecfx_namequalnamer   r   r   '_register_subclass_spec_proxy_in_tracer   s   
r{   c                    s2   dd }| st d j d fdd}|S )a)  
    Experimental decorator that makes subclass to be traceable in export
    with pre-dispatch IR. To make your subclass traceble in export, you need to:
        1. Implement __init__ method for your subclass (Look at DTensor implementation)
        2. Decorate your __init__ method with _mark_constructor_exportable_experimental
        3. Put torch._dynamo_disable decorator to prevent dynamo from peeking into its' impl

    Example:

    class FooTensor(torch.Tensor):
        @staticmethod
        def __new__(cls, elem, *, requires_grad=False):
            # ...
            return torch.Tensor._make_subclass(cls, elem, requires_grad=requires_grad)

        @torch._dynamo_disable
        @mark_subclass_constructor_exportable_experimental
        def __init__(self, elem, ...):
            # ...
    c                 S   s   t | o| jdkS )Nr   )callabler    )fnr   r   r   _is_init   r   zCmark_subclass_constructor_exportable_experimental.<locals>._is_initztorch._export.wrappers.mark_constructor_exportable_experimental can only be applied on subclass tensor.__init__But, you are adding it on z which is not supported. If __init__ doesn't exist on your subclass, please add it. Look at DTensor.__init__ implementation for examplec                     s  t t| d s"jdsJ jd td  }td| d| i | tj s0d S tj	
 }dd |D }t|dksKJ dt| t|dkrSd S |d }|j | d }tt| dd  |f\}}d	j d
}	 |	}
t j|
|  d|
di }ttj fdd|}tjjtt|\}}t|j d }t ||} dt||g|R i }t||d  d d S )Nr   r   z5Applying mark_constructor_exportable_experimental on z is not valid as it is not a traceable tensor subclass. Please look at DTensor.__init__ implementation as an example of proper usage of this API.c                 S   s   g | ]	}t |tr|qS r   )r@   r   ).0r,   r   r   r   
<listcomp>   s    zVmark_subclass_constructor_exportable_experimental.<locals>.wrapper.<locals>.<listcomp>   z6Expected only one PreDispatchTorchFunctionMode, found rU   .rq   r   c                    s   t |  jS r   )r   r/   )xr'   r   r   <lambda>   s    zTmark_subclass_constructor_exportable_experimental.<locals>.wrapper.<locals>.<lambda>_const_func_specr$   r%   ) r   typer"   endswithlenRuntimeErrorrA   _C_is_torch_function_mode_enabled	overrides _get_current_function_mode_stackr'   r   tuplejoinlowersplitru   rv   rs   r+   r(   tree_map_onlyrB   utilsr   rO   r   r    r{   r   r   )r   r   obj_nametorch_function_mode_stackpre_dispatch_tf_modesr,   subclassrH   r;   constructor_spec_namerz   
spec_proxyflat_proxy_argsrU   	func_spec!fxable_constructor_call_spec_namefunc_spec_proxyinner_proxyconstructor_subclassr   r   wrapper   s^   




	zBmark_subclass_constructor_exportable_experimental.<locals>.wrapper)r   r    )r   r~   r   r   r   r   1mark_subclass_constructor_exportable_experimental   s   Gr   )+
contextlibr   rA   torch._custom_opstorch._Cr   "torch._higher_order_ops.flat_applyr   r   r   #torch._higher_order_ops.strict_moder   torch._higher_order_ops.utilsr	   
torch._opsr
   torch._subclasses.fake_tensorr   "torch.fx.experimental.proxy_tensorr   r   r   r   torch.utilsr   r(   torch.utils._python_dispatchr   r   r   py_implr0   r1   py_functionalize_implr7   AutogradCPUr9   rd   rl   ro   r{   r   r   r   r   r   <module>   s>   



	


&
