o
    ,hF                     @   s  U d Z ddlZddlZddlmZ ddlmZmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZmZ ddlmZ dd	lmZmZ d
ae Ze Zg aeeeg df ee f  ed< e e
j!ddd Z"e	eee#df Z$e a%dZ&ee
j!j' ed< de(fddZ)e) re
j!j*Z*e
j!j+Z,e
j!j-Z.nedZ*de#de#fddZ,de#de#fddZ.eddde#fddZ/de(fddZ0d[d e(de(fd!d"Z1d#d$ Z2d%d& Z3d'd( Z4d)d* Z5G d+d, d,Z6G d-d dZG d.d/ d/eZ7de$ddfd0d1Z8d\dee$ defd2d3Z9edd\dee$ de:eef fd4d5Z;d\dee$ de*fd6d7Z<de#fd8d9Z=de	e#ee
jf de
jfd:d;Z>G d<d= d=Z?d>ed? de?fd@d>Z@dAdB ZAd>efdCdDZBd\dee$ defdEdFZC	d\dGe#dee$ defdHdIZDd\de$ddfdJdKZEdee fdLdMZFdefdNdOZGde
jde
j!j'fdPdQZH	Rd]dSe#de	e#ee
jf ddfdTdUZId]de	e#ee
jf de#fdVdWZJddXlKmLZLmMZMmNZNmOZOmPZPmQZQmRZRmSZSmTZTmUZU ddYlVmWZWmXZXmYZYmZZZm[Z[m\Z\m]Z]m^Z^m_Z_ g dZZ`dS )^z
This package introduces support for the XPU backend, specifically tailored for
Intel GPU optimization.

This package is lazily initialized, so you can always import it, and use
:func:`is_available()` to determine if your system supports XPU.
    N)	lru_cache)AnyCallableOptionalUniondevice)_dummy_type_LazySeedTracker   )_get_device_index)EventStreamF_queued_calls_xpu_isInBadForkc                   C   s   dS NF r   r   r   M/var/www/html/scripts/venv/lib/python3.10/site-packages/torch/xpu/__init__.py<lambda>   s    r   r   default_generatorsreturnc                   C   s   t jjS )z(Return true if compile with XPU support.)torch_C_has_xpur   r   r   r   _is_compiled#   s   r   _XpuDevicePropertiesr   c                 C      t dNz(PyTorch was compiled without XPU supportNotImplementedErrorr   r   r   r   _exchange_device0      r    c                 C   r   r   r   r   r   r   r   _maybe_exchange_device3   r!   r"   )maxsizec                   C   s   t  sdS tj S )z*Return the number of XPU device available.r   )r   r   r   _xpu_getDeviceCountr   r   r   r   device_count7   s   
r%   c                   C   s
   t  dkS )z7Return a bool indicating if XPU is currently available.r   )r%   r   r   r   r   is_available?   s   
r&   Tincluding_emulationc                 C   s   t  sdS | ptj jS )zKReturn a bool indicating if the current XPU device supports dtype bfloat16.F)r&   r   xpuget_device_propertieshas_bfloat16_conversions)r'   r   r   r   is_bf16_supportedE   s
   
r+   c                   C   s   t ot  S )z8Return whether PyTorch's XPU state has been initialized.)_initialized_is_in_bad_forkr   r   r   r   is_initializedO   s   r.   c                 K   sf   t  r|   d S |ddrt| t  d S |ddr(t| t  d S t| t f d S )Nseed_allFseed)	r.   get_lazy_seed_trackerqueue_seed_all	tracebackformat_stack
queue_seedr   append)callablekwargsr   r   r   
_lazy_callT   s   
r:   c                   C   s
   t   dS )zInitialize PyTorch's XPU state.
    This is a Python API about lazy initialization that avoids initializing
    XPU until the first time it is accessed. Does nothing if the XPU state is
    already initialized.
    N)
_lazy_initr   r   r   r   initb   s   
r<   c                  C   s  t  sttdr
d S tq t  r	 W d    d S t r tdt s'tdtj	
  dt_tdd t D  z1tD ]'\} }z|   W q> tye } zdt| dd	| }t||d }~ww W ttd nttd w daW d    d S 1 sw   Y  d S )
Nis_initializingzuCannot re-initialize XPU in forked subprocess. To use XPU with multiprocessing, you must use the 'spawn' start methodz#Torch not compiled with XPU enabledTc                 s   s    | ]}|r|V  qd S Nr   ).0callsr   r   r   	<genexpr>   s    z_lazy_init.<locals>.<genexpr>z5XPU call failed lazily at initialization with error: z'

XPU call was originally invoked at:

 )r.   hasattr_tls_initialization_lockr-   RuntimeErrorr   AssertionErrorr   r   	_xpu_initr=   r   extendr2   	get_calls	Exceptionstrjoindelattrr,   )queued_callorig_tracebackemsgr   r   r   r;   k   s>   



"r;   c                   @   s8   e Zd ZdefddZdd Zdededefd	d
ZdS )_DeviceGuardindexc                 C   s   || _ d| _d S N)idxprev_idx)selfrT   r   r   r   __init__   s   
z_DeviceGuard.__init__c                 C      t j| j| _d S r>   r   r(   r    rW   rX   rY   r   r   r   	__enter__      z_DeviceGuard.__enter__typevaluer4   c                 C      t j| j| _dS r   r   r(   r"   rX   rW   rY   r`   ra   r4   r   r   r   __exit__      z_DeviceGuard.__exit__N)__name__
__module____qualname__intrZ   r^   r   re   r   r   r   r   rS      s    rS   c                   @   s<   e Zd ZdZd efddZdd Zdededefd	d
ZdS )r   zContext-manager that changes the selected device.

    Args:
        device (torch.device or int or str): device index to select. It's a no-op if
            this argument is a negative integer or ``None``.
    c                 C   s   t |dd| _d| _d S )NToptionalrV   )r   rW   rX   )rY   r   r   r   r   rZ      s   
zdevice.__init__c                 C   r[   r>   r\   r]   r   r   r   r^      r_   zdevice.__enter__r`   ra   r4   c                 C   rb   r   rc   rd   r   r   r   re      rf   zdevice.__exit__N)rg   rh   ri   __doc__r   rZ   r^   re   r   r   r   r   r      s
    c                       s    e Zd ZdZ fddZ  ZS )	device_ofa  Context-manager that changes the current device to that of given object.

    You can use both tensors and storages as arguments. If a given object is
    not allocated on a XPU, this is a no-op.

    Args:
        obj (Tensor or Storage): object allocated on the selected device.
    c                    s"   |j r| nd}t | d S rU   )is_xpu
get_devicesuperrZ   )rY   objrW   	__class__r   r   rZ      s   zdevice_of.__init__)rg   rh   ri   rm   rZ   __classcell__r   r   rs   r   rn      s    	rn   c                 C   s*   t   t| } | dkrtj|  dS dS )zSet the current device.

    Args:
        device (torch.device or int or str): selected device. This function is a
            no-op if this argument is negative.
    r   N)r;   r   r   r   _xpu_setDevicer   r   r   r   
set_device   s
   rw   c                 C   s
   t | jS )a  Get the name of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the name. This function is a no-op if this argument is a
            negative integer. It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).

    Returns:
        str: the name of the device
    )r)   namer   r   r   r   get_device_name   s   
ry   c                    s   t |   fddt D S )a  Get the xpu capability of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the device capability. This function is a no-op if this
            argument is a negative integer. It uses the current device, given by
            :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).

    Returns:
        Dict[str, Any]: the xpu capability dictionary of the device
    c                    s"   i | ]}| d s|t |qS ))__
_pybind11_)
startswithgetattr)r?   proppropsr   r   
<dictcomp>   s    
z)get_device_capability.<locals>.<dictcomp>)r)   dirr   r   r   r   get_device_capability   s   
r   c                 C   s   t   t| dd} t| S )zGet the properties of a device.

    Args:
        device (torch.device or int or str): device for which to return the
            properties of the device.

    Returns:
        _XpuDeviceProperties: the properties of the device
    Trk   )r;   r   _get_device_propertiesr   r   r   r   r)      s   
r)   c                   C   s   t   tj S )z0Return the index of a currently selected device.)r;   r   r   _xpu_getDevicer   r   r   r   current_device	  s   
r   c                 C   s2   t | trt| } | S t | trtd| } | S )zReturn the torch.device type object from the passed in device.

    Args:
        device (torch.device or int or str): selected device.
    r(   )
isinstancerL   r   r   rj   r   r   r   r   _get_device  s   


r   c                   @   sN   e Zd ZU dZed ed< ded fddZdd Zd	ed
edefddZ	dS )StreamContexta  Context-manager that selects a given stream.

    All XPU kernels queued within its context will be enqueued on a selected
    stream.

    Args:
        Stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: Streams are per-device.
    torch.xpu.Stream
cur_streamstreamc                 C   s*   || _ td d| _| jd u rd| _d S d S )NTrV   )r   r   rW   )rY   r   r   r   r   rZ   )  s
   

zStreamContext.__init__c                 C   s   | j }|d u s| jdkrd S tjd | _| jj|jkr9t|j tj|j| _W d    n1 s4w   Y  tj| d S rU   )	r   rW   r   r(   current_streamsrc_prev_streamr   dst_prev_stream
set_stream)rY   r   r   r   r   r^   /  s   zStreamContext.__enter__r`   ra   r4   c                 C   sJ   | j }|d u s| jdkrd S | jj|jkrtj| j tj| j d S rU   )r   rW   r   r   r   r(   r   r   )rY   r`   ra   r4   r   r   r   r   re   ;  s   zStreamContext.__exit__N)
rg   rh   ri   rm   r   __annotations__rZ   r^   r   re   r   r   r   r   r     s   
 
r   r   r   c                 C   s   t | S )zWrap around the Context-manager StreamContext that selects a given stream.

    Arguments:
        stream (Stream): selected stream. This manager is a no-op if it's ``None``.
    )r   r   r   r   r   r   F  s   c                 C   s   t jj| ||d dS )a  set stream specified by the stream id, device index and device type

    Args: stream_id (int): not visible to the user, used to assigned to the specific stream.
          device_index (int): selected device index.
          device_type (int): selected device type.
    	stream_iddevice_indexdevice_typeN)r   r   _xpu_setStreamr   r   r   r   _set_stream_by_idO  s
   
r   c                 C   s*   | du rdS t   t| j| j| jd dS )a  Set the current stream.This is a wrapper API to set the stream.
        Usage of this function is discouraged in favor of the ``stream``
        context manager.

    Args:
        stream (Stream): selected stream. This function is a no-op
            if this argument is ``None``.
    Nr   )r;   r   r   r   r   r   r   r   r   r   ]  s   	
r   c                 C   s4   t   tjt| dd}t|d |d |d dS )aR  Return the currently selected :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the currently selected :class:`Stream` for the current device, given
            by :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).
    Trk   r   r      r   )r;   r   r   _xpu_getCurrentStreamr   r   )r   
streamdatar   r   r   r   p  s   	
r   data_ptrc                 C   s6   t   tj| t|dd}t|d |d |d dS )a;  Return a :class:`Stream` from an external SYCL queue.

    This function is used to wrap SYCL queue created in other libraries in order
    to facilitate data exchange and multi-library interactions.

    .. note:: This function doesn't manage the queue life-cycle, it is the user
       responsibility to keep the referenced queue alive while this returned stream is
       being used. The different SYCL queue pointers will result in distinct
       :class:`Stream` objects, even if the SYCL queues they dereference are equivalent.

    Args:
        data_ptr(int): Integer representation of the `sycl::queue*` value passed externally.
        device(torch.device or int, optional): the device where the queue was originally created.
            It is the user responsibility to ensure the device is specified correctly.
    Trk   r   r   r   r   )r;   r   r   _xpu_getStreamFromExternalr   r   )r   r   r   r   r   r   get_stream_from_external  s   r   c                 C   s   t   t| dd} tj| S )a*  Wait for all kernels in all streams on a XPU device to complete.

    Args:
        device (torch.device or int, optional): device for which to synchronize.
            It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    Trk   )r;   r   r   r   _xpu_synchronizer   r   r   r   synchronize  s   r   c                  C   s(   t  sg S tj } | du rg S |  S )z<Return list XPU architectures this library was compiled for.N)r   r   r   _xpu_getArchFlagssplit)
arch_flagsr   r   r   get_arch_list  s   
r   c                  C   s0   t  } t| dkrdS dddd | D  S )zIReturn XPU AOT(ahead-of-time) build flags this library was compiled with.r   rB   z-device ,c                 s   s    | ]}|V  qd S r>   r   )r?   archr   r   r   rA     s    z$get_gencode_flags.<locals>.<genexpr>)r   lenrM   )	arch_listr   r   r   get_gencode_flags  s   r   c                 C   s    | j }|du r
t }tjj| S )zuReturn the XPU Generator object for the given device.

    Args:
        device (torch.device): selected device.
    N)rT   r   r   r(   r   )r   rW   r   r   r   _get_generator  s   r   r(   offsetc                    s"   t |  fdd}t| dS )a$  Set the random number generator state offset of the specified GPU.

    Args:
        offset (int): The desired offset
        device (torch.device or int, optional): The device to set the RNG state.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).
    c                     s   t  } |  d S r>   )r   
set_offset)default_generatorfinal_devicer   r   r   cb  s   z!_set_rng_state_offset.<locals>.cbN)r   r:   )r   r   r   r   r   r   _set_rng_state_offset  s   
r   c                 C   s   t   t| }t|}| S )aL  Return the random number generator state offset of the specified GPU.

    Args:
        device (torch.device or int, optional): The device to return the RNG state offset of.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).

    .. warning::
        This function eagerly initializes XPU.
    )r;   r   r   
get_offset)r   r   r   r   r   r   _get_rng_state_offset  s   
r   )
empty_cachemax_memory_allocatedmax_memory_reservedmem_get_infomemory_allocatedmemory_reservedmemory_statsmemory_stats_as_nested_dictreset_accumulated_memory_statsreset_peak_memory_stats)	get_rng_stateget_rng_state_allinitial_seedmanual_seedmanual_seed_allr0   r/   set_rng_stateset_rng_state_all)+r   r   r   r   r   r   r   rn   r%   r   r   r   ry   r)   r   r   r   r   r<   r   r&   r+   r.   r   r   r   r   r   r   r   r   r   r   r   r0   r/   rw   r   r   r   r   streamsr   )Tr>   )r(   )arm   	threadingr4   	functoolsr   typingr   r   r   r   r   torch._Cr   _devicetorch._utilsr	   r
   _utilsr   r   r   r   r,   localrD   LockrE   r   listtuplerL   r   r}   r   r-   rj   	_device_tr2   r   	Generatorboolr   r   _xpu_exchangeDevicer    _xpu_maybeExchangeDevicer"   r%   r&   r+   r.   r:   r<   r;   rS   rn   rw   ry   dictr   r)   r   r   r   r   r   r   r   r   r   r   r   r   r   r   memoryr   r   r   r   r   r   r   r   r   r   randomr   r   r   r   r   r0   r/   r   r   __all__r   r   r   r   <module>   s   

	(" *	


 0,