U
    d+i                     @   s  U d Z ddlZddlZddlZddlmZ ddlZddlZddlZddl	m
Z
mZmZmZmZ ddlmZmZ ddlmZ dd	lmZmZmZmZmZ dd
lmZmZmZ ddlmZ ddl Zzddl m!Z! W n e"k
r   dZ!Y nX da#e$ Z%e& Z'g a(e)ej*ddd Z+eee,e-df Z.G dd dZ/e/ a0e1ej*drNej*j2Z2nedZ2dZ3e4e5d< dZ6e4e5d< dZ7eej*j8 e5d< e4dddZ9dd Z:dd Z;dd  Z<d!d" Z=d#d$ Z>d%d& Z?e?e< e?e= G d'd( d(e@ZAd)d* ZBd+d, ZCd-d. ZDG d/d0 d0eEZFG d1d2 d2eGZHe-dd3d4d5ZIG d6d7 d7eEZG d8d9 d9eZJe.dd:d;d<ZKdee. e,d:d=d>ZLdee. ee-e-f d:d?d@ZMe.e2d:dAdBZNe.e.e4dCdDdEZOG dFdG dGeEZPedH ePdIdJdKZQedLdMdNZRe-ddOdPZSe
e, ddQdRZTe,ddSdTZUe-ddUdVZVde.dd:dWdXZWdYdZ ZXdee. ed:d[d\ZYdee. ed:d]d^ZZd_d` Z[ee-e,f ddadbdcZ\e-ddddeZ]deeee-f  e-d:dfdgZ^deeee-f  e-d:dhdiZ_ddjl`T ddjlaT ebdkdl ZcG dmdn dneEZdddolemfZf G dpdq dqefZgG drds dsegZhG dtdu duegZiG dvdw dwegZjG dxdy dyegZkG dzd{ d{egZlG d|d} d}egZmG d~d degZnG dd degZoG dd degZpG dd degZqG dd degZrG dd degZs[f[gejtuei ejtuej ejtuel ejtuem ejtuen ejtueo ejtueh ejtuek ejtuep ejtueq ejtuer ejtues ddlmvZv ddlmwZw ddlmxZx ddlmyZy ddlmzZz dS )aM  
This package adds support for CUDA tensor types, that implement the same
function as CPU tensors, but they utilize GPUs for computation.

It is lazily initialized, so you can always import it, and use
:func:`is_available()` to determine if your system supports CUDA.

:ref:`cuda-semantics` has more details about working with CUDA.
    N)Device)ListOptionalTupleUnionAny   )_get_device_index_dummy_type   )classproperty)	CUDAGraphgraph_pool_handlegraphmake_graphed_callablesis_current_stream_capturing)ExternalStreamStreamEventdevice)_cudartFZ_cuda_isInBadForkc                   C   s   dS NF r   r   r   7/tmp/pip-unpacked-wheel-ua33x9lu/torch/cuda/__init__.py<lambda>$       r   c                   @   s2   e Zd Zdd Zdd Zdd Zeddd	Zd
S )_LazySeedTrackerc                 C   s   d | _ d | _g | _d S Nmanual_seed_all_cbmanual_seed_cb
call_orderselfr   r   r   __init__-   s    z_LazySeedTracker.__init__c                 C   s   ||f| _ | j| j g| _d S r   r   r$   cb	tracebackr   r   r   queue_seed_all2   s    
z_LazySeedTracker.queue_seed_allc                 C   s   ||f| _ | j| j g| _d S r   )r!   r    r"   r&   r   r   r   
queue_seed7   s    
z_LazySeedTracker.queue_seedreturnc                 C   s   | j S r   )r"   r#   r   r   r   	get_calls<   s    z_LazySeedTracker.get_callsN)__name__
__module____qualname__r%   r)   r*   r   r-   r   r   r   r   r   (   s   r   _CudaDeviceProperties	has_magmahas_halfr   default_generatorsr+   c                   C   s   t tjdsdS tj dkS )z9Returns a bool indicating if CUDA is currently available._cuda_getDeviceCountFr   )hasattrtorch_Cr5   r   r   r   r   is_availableM   s    r9   c                  C   sH   t jj} | dk	r(t| dd dk}nd}t jt j jdkoF|S )zLReturns a bool indicating if the current CUDA device supports dtype bfloat16N.r      F   )r7   versioncudaintsplitget_device_propertiescurrent_devicemajor)Zcu_versZcuda_maj_decider   r   r   is_bf16_supportedU   s
    rD   c                 C   s   t j|  d S r   )r7   r8   Z_cuda_sleep)Zcyclesr   r   r   _sleep_   s    rE   c            
      C   s   d} d}t jjd k	rt j }tt D ]}t|}|d }|d }t|}|d | }t	dd t j
 D dd	}	||	k rt||||||	d |	d f  q(|d
kr(|dkr(|dkr(t| ||d|f  q(d S )Nz
    Found GPU%d %s which requires CUDA_VERSION >= %d to
     work properly, but your PyTorch was compiled
     with CUDA_VERSION %d. Please install the correct PyTorch binary
     using instructions from https://pytorch.org
    z
    Found GPU%d %s which is of cuda capability %d.%d.
    PyTorch no longer supports this GPU because it is too old.
    The minimum cuda capability supported by this library is %d.%d.
    r   r   
   c                 s   s    | ]}t |d d V  qdS )_r   Nr?   r@   .0archr   r   r   	<genexpr>y   s     z$_check_capability.<locals>.<genexpr>#   )defaulti(#        i'  )r7   r=   r>   r8   Z_cuda_getCompiledVersionrangedevice_countget_device_capabilityget_device_nameminget_arch_listwarningswarn)
Zincorrect_binary_warnZold_gpu_warnZCUDA_VERSIONd
capabilityrC   minornameZcurrent_archZmin_archr   r   r   _check_capabilityc   s    
$r]   c               
      s   d} t jjd krd S t }t|dkr*d S dd |D }tt D ]Z}t|\ }t fdd|D }|sBt	|} d | }t
| ||d|| qBd S )Na	  
{} with CUDA capability sm_{} is not compatible with the current PyTorch installation.
The current PyTorch install supports CUDA capabilities {}.
If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/
r   c                 S   s&   g | ]}d |krt |dd qS )Zsm_rG   r   rH   rI   r   r   r   
<listcomp>   s      z!_check_cubins.<locals>.<listcomp>c                    s   g | ]}|d   kqS )rF   r   )rJ   smZ	cap_majorr   r   r^      s     rF    )r7   r=   r>   rV   lenrQ   rR   rS   anyrT   rW   rX   formatjoin)Zincompatible_device_warn	arch_listZsupported_smidxZ	cap_minor	supportedZdevice_namerZ   r   r`   r   _check_cubins   s    ri   c                   C   s   t o
t  S )z:Returns whether PyTorch's CUDA state has been initialized.)_initialized_is_in_bad_forkr   r   r   r   is_initialized   s    rl   c                 K   s`   t  r|   nN|ddr,t| t  n0|ddrJt| t  nt| t f d S )NZseed_allFseed)	rl   get_lazy_seed_trackerr)   r(   format_stackr*   _queued_callsappend)callablekwargsr   r   r   
_lazy_call   s    ru   c                   @   s   e Zd ZdS )DeferredCudaCallErrorN)r.   r/   r0   r   r   r   r   rv      s   rv   c                   C   s
   t   dS )a  Initialize PyTorch's CUDA state.  You may need to call
    this explicitly if you are interacting with PyTorch via
    its C API, as Python bindings for CUDA functionality will not
    be available until this initialization takes place.  Ordinary users
    should not need this, as all of PyTorch's CUDA methods
    automatically initialize CUDA state on-demand.

    Does nothing if the CUDA state is already initialized.
    N)
_lazy_initr   r   r   r   init   s    
rx   c                  C   s
  t  sttdrd S t t  r.W 5 Q R  d S t r<tdttjdsPtdt	d kr`tdtj
  dt_t D ]} | rxt|  qxz^tD ]T\}}z
|  W q tk
r } z"dt| d| }t||W 5 d }~X Y qX qW 5 ttd X daW 5 Q R X d S )	Nis_initializingzwCannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start methodr5   z$Torch not compiled with CUDA enabledzGlibcudart functions unavailable. It looks like you have a broken build?Tz6CUDA call failed lazily at initialization with error: z(

CUDA call was originally invoked at:

)rl   r6   _tls_initialization_lockrk   RuntimeErrorr7   r8   AssertionErrorr   Z
_cuda_initry   ro   r-   rq   rr   delattr	Exceptionstrrv   rj   )ZcallsZqueued_callZorig_tracebackemsgr   r   r   rw      s:    

"rw   c                   C   s
   t   tS r   )rw   r   r   r   r   r   cudart   s    r   c                   @   s&   e Zd ZU dZeed< dZeed< dS )
cudaStatusr   SUCCESS"   ERROR_NOT_READYN)r.   r/   r0   r   r?   __annotations__r   r   r   r   r   r      s   
r   c                       s$   e Zd Zedd fddZ  ZS )	CudaErrorN)coder,   c                    s,   t t |}tt| d|| d S )Nz	{0} ({1}))r   ZcudaGetErrorString	cudaErrorsuperr   r%   rd   )r$   r   r   	__class__r   r   r%      s    zCudaError.__init__)r.   r/   r0   r?   r%   __classcell__r   r   r   r   r      s   r   )resr,   c                 C   s   | t jjkrt| d S r   )r   r   successr   )r   r   r   r   check_error   s    r   c                   @   s8   e Zd ZdZedddZdd Zeeeddd	Zd
S )r   zContext-manager that changes the selected device.

    Args:
        device (torch.device or int): device index to select. It's a no-op if
            this argument is a negative integer or ``None``.
    r   c                 C   s   t |dd| _d| _d S )NToptional)r	   rg   prev_idx)r$   r   r   r   r   r%     s    zdevice.__init__c                 C   sH   | j dkrd S tj | _| j| j kr4tj| j  tj sDt  d S Nr   )	rg   r7   r>   rB   r   
set_devicejitis_scriptingrw   r#   r   r   r   	__enter__  s    

zdevice.__enter__typevaluer(   c                 C   s   | j | jkrtj| j  dS r   )r   rg   r7   r>   r   )r$   r   r   r(   r   r   r   __exit__  s    zdevice.__exit__N)r.   r/   r0   __doc__r   r%   r   r   r   r   r   r   r     s   	r   c                       s    e Zd ZdZ fddZ  ZS )	device_ofa  Context-manager that changes the current device to that of given object.

    You can use both tensors and storages as arguments. If a given object is
    not allocated on a GPU, this is a no-op.

    Args:
        obj (Tensor or Storage): object allocated on the selected device.
    c                    s&   |j r| nd}tt| | d S r   )is_cuda
get_devicer   r   r%   )r$   objrg   r   r   r   r%   )  s    zdevice_of.__init__)r.   r/   r0   r   r%   r   r   r   r   r   r     s   	r   )r   r,   c                 C   s    t | } | dkrtj|  dS )a>  Sets the current device.

    Usage of this function is discouraged in favor of :any:`device`. In most
    cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.

    Args:
        device (torch.device or int): selected device. This function is a no-op
            if this argument is negative.
    r   N)r	   r7   r8   Z_cuda_setDevicer   r   r   r   r   .  s    
r   c                 C   s
   t | jS )a  Gets the name of a device.

    Args:
        device (torch.device or int, optional): device for which to return the
            name. This function is a no-op if this argument is a negative
            integer. It uses the current device, given by :func:`~torch.cuda.current_device`,
            if :attr:`device` is ``None`` (default).

    Returns:
        str: the name of the device
    )rA   r\   r   r   r   r   rT   =  s    rT   c                 C   s   t | }|j|jfS )a  Gets the cuda capability of a device.

    Args:
        device (torch.device or int, optional): device for which to return the
            device capability. This function is a no-op if this argument is
            a negative integer. It uses the current device, given by
            :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
            (default).

    Returns:
        tuple(int, int): the major and minor cuda capability of the device
    )rA   rC   r[   )r   propr   r   r   rS   L  s    rS   c                 C   s4   t   t| dd} | dk s$| t kr,tdt| S )zGets the properties of a device.

    Args:
        device (torch.device or int or str): device for which to return the
            properties of the device.

    Returns:
        _CudaDeviceProperties: the properties of the device
    Tr   r   Invalid device id)rw   r	   rR   r}   Z_get_device_propertiesr   r   r   r   rA   ]  s
    
rA   )r   peer_devicer,   c                 C   s\   t   t| dd} t|}| dk s,| t kr4td|dk sF|t krNtdtj| |S )z;Checks if peer access between two devices is possible.
    Tr   r   r   zInvalid peer device id)rw   r	   rR   r}   r7   r8   Z_cuda_canDeviceAccessPeer)r   r   r   r   r   can_device_access_peerm  s    r   c                   @   sJ   e Zd ZU dZed ed< ed dddZdd Zeeed	d
dZ	dS )StreamContexta  Context-manager that selects a given stream.

    All CUDA kernels queued within its context will be enqueued on a selected
    stream.

    Args:
        Stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: Streams are per-device.
    torch.cuda.Stream
cur_streamstreamc                 C   sh   || _ td d| _tj s,| jd kr,d| _tj s:d n
tjd | _tj sVd n
tjd | _	d S )NTr   )
r   r	   rg   r7   r   r   r>   default_streamsrc_prev_streamdst_prev_stream)r$   r   r   r   r   r%     s    

zStreamContext.__init__c              	   C   sn   | j }|d ks| jdkrd S tjd | _| jj|jkr^t|j tj|j| _W 5 Q R X tj| d S r   )	r   rg   r7   r>   current_streamr   r   r   
set_stream)r$   r   r   r   r   r     s    zStreamContext.__enter__r   c                 C   sJ   | j }|d ks| jdkrd S | jj|jkr8tj| j tj| j d S r   )r   rg   r   r   r7   r>   r   r   )r$   r   r   r(   r   r   r   r   r     s    zStreamContext.__exit__N)
r.   r/   r0   r   r   r   r%   r   r   r   r   r   r   r   r   z  s
   


r   r   )r   r,   c                 C   s   t | S )aT  Wrapper around the Context-manager StreamContext that
    selects a given stream.

    Arguments:
        stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    ..Note:: In eager mode stream is of type Stream class while in JIT it is
    an object of the custom class ``torch.classes.cuda.Stream``.
    )r   r   r   r   r   r     s    
r   r   c                 C   s   | dkrdS t j| j dS )a  Sets the current stream.This is a wrapper API to set the stream.
        Usage of this function is discouraged in favor of the ``stream``
        context manager.

    Args:
        stream (Stream): selected stream. This function is a no-op
            if this argument is ``None``.
    N)r7   r8   Z_cuda_setStream_cdatar   r   r   r   r     s    	r   c                   C   s   t  rtj S dS dS )z%Returns the number of GPUs available.r   N)r9   r7   r8   r5   r   r   r   r   rR     s    
rR   c                  C   s(   t  s
g S tj } | dkr g S |  S )z>Returns list CUDA architectures this library was compiled for.N)r9   r7   r8   Z_cuda_getArchFlagsr@   )Z
arch_flagsr   r   r   rV     s    
rV   c                  C   s8   t  } t| dkrdS dd | D }ddd |D S )z:Returns NVCC gencode flags this library was compiled with.r    c                 S   s   g | ]}| d qS )rG   )r@   rI   r   r   r   r^     s     z%get_gencode_flags.<locals>.<listcomp>ra   c                 S   s&   g | ]\}}d | d| d| qS )z-gencode compute=compute_z,code=rG   r   )rJ   kindrK   r   r   r   r^     s     )rV   rb   re   )rf   Z
arch_list_r   r   r   get_gencode_flags  s
    r   c                   C   s   t   tj S )z1Returns the index of a currently selected device.)rw   r7   r8   Z_cuda_getDevicer   r   r   r   rB     s    rB   c              
   C   s4   t   tj|  tj W  5 Q R  S Q R X dS )a-  Waits for all kernels in all streams on a CUDA device to complete.

    Args:
        device (torch.device or int, optional): device for which to synchronize.
            It uses the current device, given by :func:`~torch.cuda.current_device`,
            if :attr:`device` is ``None`` (default).
    N)rw   r7   r>   r   r8   Z_cuda_synchronizer   r   r   r   synchronize  s    r   c                   C   s   t   tj S )ax  Force collects GPU memory after it has been released by CUDA IPC.

    .. note::
        Checks if any sent CUDA tensors could be cleaned from the memory. Force
        closes shared memory file used for reference counting if there is no
        active counters. Useful when the producer process stopped actively sending
        tensors and want to release unused memory.
    )rw   r7   r8   Z_cuda_ipc_collectr   r   r   r   ipc_collect  s    	r   c                 C   s    t   ttjt| dddS )aT  Returns the currently selected :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the currently selected :class:`Stream` for the current device, given
            by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
            (default).
    Tr   r   )rw   r   r7   r8   Z_cuda_getCurrentStreamr	   r   r   r   r   r      s    	
r   c                 C   s    t   ttjt| dddS )a>  Returns the default :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the default :class:`Stream` for the current device, given by
            :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
            (default).
    Tr   r   )rw   r   r7   r8   Z_cuda_getDefaultStreamr	   r   r   r   r   r     s    	
r   c                   C   s   t   tj S )z7Returns cublasHandle_t pointer to current cuBLAS handle)rw   r7   r8   Z_cuda_getCurrentBlasHandler   r   r   r   current_blas_handle  s    r   )
debug_moder,   c                 C   sR   t   t| trB| dkrd} n$| dkr,d} n| dkr:d} ntdtj|  dS )	a   Sets the debug mode for cuda synchronizing operations.

    Args:
        debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations,
            if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations.

    Warning:
        This is an experimental feature, and not all synchronizing operations will trigger warning or error. In
        particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.
    rN   r   rX   r   errorr   zGinvalid value of debug_mode, expected one of `default`, `warn`, `error`N)rw   
isinstancer   r|   r7   r8   Z_cuda_set_sync_debug_mode)r   r   r   r   set_sync_debug_mode!  s    
r   c                   C   s   t   tj S )zFReturns current value of debug mode for cuda synchronizing operations.)rw   r7   r8   Z_cuda_get_sync_debug_moder   r   r   r   get_sync_debug_mode:  s    r   c                 C   s   zddl }W n tk
r(   tdY nX ddl m} z|  W n |k
r^   tdY nX t| dd} || }||jS )a  Returns the percent of time over the past sample period during which global (device)
    memory was being read or written. as given by `nvidia-smi`.

    Args:
        device (torch.device or int, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.cuda.current_device`,
            if :attr:`device` is ``None`` (default).

    Warning: Each sample period may be between 1 second and 1/6 second,
    depending on the product being queried.
    r   N.pynvml module not found, please install pynvmlNVMLError_DriverNotLoaded-cuda driver can't be loaded, is cuda enabled?Tr   )	pynvmlModuleNotFoundErrorr   nvmlInitr|   r	   nvmlDeviceGetHandleByIndexnvmlDeviceGetUtilizationRatesmemoryr   r   r   handler   r   r   memory_usageA  s    
r   c                 C   s   zddl }W n tk
r(   tdY nX ddl m} z|  W n |k
r^   tdY nX t| dd} || }||jS )a  Returns the percent of time over the past sample period during which one or
    more kernels was executing on the GPU as given by `nvidia-smi`.

    Args:
        device (torch.device or int, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.cuda.current_device`,
            if :attr:`device` is ``None`` (default).

    Warning: Each sample period may be between 1 second and 1/6 second,
    depending on the product being queried.
    r   Nr   r   r   Tr   )	r   r   r   r   r|   r	   r   r   Zgpur   r   r   r   utilization[  s    
r   )*c                 O   s   t   tt| j| f||S r   )rw   r   	_CudaBase__new__clsargsrt   r   r   r   	_lazy_new~  s    r   c                       s(   e Zd ZdZdZ fddZeZ  ZS )r   TFc              
      s6   t |    tt| j||W  5 Q R  S Q R X d S r   )r   r   r   r   r   )r$   r   rt   r   r   r   r     s    z_CudaBase.type)	r.   r/   r0   r   Z	is_sparser   r   r   r   r   r   r   r   r     s   r   )_LegacyStoragec                   @   s8   e Zd Zedd Zedd ZedddddZdS )	_CudaLegacyStoragec                 O   s   t dd S )Nz+from_buffer: Not available for CUDA storager|   r   r   r   r   from_buffer  s    z_CudaLegacyStorage.from_bufferc                 O   s   t dd S )Nz2_new_with_weak_ptr: Not available for CUDA storager   r   r   r   r   _new_with_weak_ptr  s    z%_CudaLegacyStorage._new_with_weak_ptrN)r   dtypec                C   s   t dd S )Nz4_new_shared_filename: Not available for CUDA storager   )r   managerr   sizer   r   r   r   r   _new_shared_filename  s    z'_CudaLegacyStorage._new_shared_filename)r.   r/   r0   classmethodr   r   r   r   r   r   r   r     s   

r   c                   @   s   e Zd Zedd ZdS )ByteStoragec                 C   s   t jS r   )r7   Zuint8r#   r   r   r   r     s    zByteStorage.dtypeNr.   r/   r0   r   r   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )DoubleStoragec                 C   s   t jS r   )r7   doubler#   r   r   r   r     s    zDoubleStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )FloatStoragec                 C   s   t jS r   )r7   floatr#   r   r   r   r     s    zFloatStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )HalfStoragec                 C   s   t jS r   )r7   Zhalfr#   r   r   r   r     s    zHalfStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )LongStoragec                 C   s   t jS r   )r7   longr#   r   r   r   r     s    zLongStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )
IntStoragec                 C   s   t jS r   )r7   r?   r#   r   r   r   r     s    zIntStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )ShortStoragec                 C   s   t jS r   )r7   shortr#   r   r   r   r     s    zShortStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )CharStoragec                 C   s   t jS r   )r7   Zint8r#   r   r   r   r     s    zCharStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )BoolStoragec                 C   s   t jS r   )r7   boolr#   r   r   r   r     s    zBoolStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )BFloat16Storagec                 C   s   t jS r   )r7   Zbfloat16r#   r   r   r   r     s    zBFloat16Storage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )ComplexDoubleStoragec                 C   s   t jS r   )r7   Zcdoubler#   r   r   r   r     s    zComplexDoubleStorage.dtypeNr   r   r   r   r   r     s   r   c                   @   s   e Zd Zedd ZdS )ComplexFloatStoragec                 C   s   t jS r   )r7   Zcfloatr#   r   r   r   r     s    zComplexFloatStorage.dtypeNr   r   r   r   r   r     s   r   )sparse)profiler)nvtx)amp)	jiterator)N)N)N)N)N)N)N){r   
contextlibosr7   Ztorch.typesr   r(   rW   	threadingtypingr   r   r   r   r   _utilsr	   r
   r   Zgraphsr   r   r   r   r   Zstreamsr   r   r   r   r   Z_deviceZtorch._Cr   ImportErrorrj   localrz   Lockr{   rq   getattrr8   rk   r   r?   Z	_device_tr   ro   r6   r1   r2   r   r   r3   r4   	Generatorr9   rD   rE   r]   ri   rl   ru   r   rv   rx   rw   r   objectr   r|   r   r   r   r   rT   rS   rA   r   r   r   r   rR   rV   r   rB   r   r   r   r   r   r   r   r   r   r   randomstaticmethodr   r   Ztorch.storager   r   r   r   r   r   r   r   r   r   r   r   r   r   Z_storage_classesaddr   r   r   r   r   r   r   r   r   <module>   s   



13	

