U
    *-e                     @   sD  U d Z ddlmZ ddlmZmZmZmZ ddlZddddd	d
dddddgZ	ee
 ed< G dd dZe Zeejjef ZejedddZejedddZejedddZeejed ddd	Zeeed ddd
ZeejdddZeeddddZejeddddZeedddZeejjdd dZdS )!zWUtilities for eliminating boilerplate code to handle abstract streams with
CPU device.
    )contextmanager)	GeneratorListUnioncastNCPUStreamType
new_streamcurrent_streamdefault_stream
use_device
use_stream
get_devicewait_streamrecord_streamis_cudaas_cuda__all__c                   @   s   e Zd ZdS )r   N)__name__
__module____qualname__ r   r   g/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/torch/distributed/pipeline/sync/stream.pyr      s   )devicereturnc                 C   s   | j dkrtS tj| S )z3Creates a new stream for either CPU or CUDA device.cuda)type	CPUStreamtorchr   Streamr   r   r   r   r      s    
c                 C   s   | j dkrtS tj| S )z@:func:`torch.cuda.current_stream` for either CPU or CUDA device.r   )r   r   r   r   r	   r   r   r   r   r	   &   s    
c                 C   s   | j dkrtS tj| S )z@:func:`torch.cuda.default_stream` for either CPU or CUDA device.r   )r   r   r   r   r
   r   r   r   r   r
   -   s    
)NNNc              	   c   s6   | j dkrdV  dS tj|  dV  W 5 Q R X dS )z8:func:`torch.cuda.device` for either CPU or CUDA device.r   N)r   r   r   r   r   r   r   r   r   4   s
    
)streamr   c              	   c   s8   t | sdV  dS tjt|  dV  W 5 Q R X dS )z8:func:`torch.cuda.stream` for either CPU or CUDA stream.N)r   r   r   r    r   r    r   r   r   r   ?   s
    c                 C   s   t | rt| jS tdS )z(Gets the device from CPU or CUDA stream.cpu)r   r   r   r   r!   r   r   r   r   J   s    
)sourcetargetr   c                 C   s4   t |r0t | r$t| t| nt|  dS )z:meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
    makes the source stream wait until the target stream completes work queued.
    N)r   r   r   Zsynchronize)r#   r$   r   r   r   r   Q   s    )tensorr    r   c                 C   s0   t |r,| dg|  } | t| dS )zA:meth:`torch.Tensor.record_stream` for either CPU or CUDA stream.r   N)r   Z	new_emptyset_Z_typed_storager   r   )r%   r    r   r   r   r   `   s    	c                 C   s   | t k	S )z<Returns ``True`` if the given stream is a valid CUDA stream.)r   r!   r   r   r   r   q   s    c                 C   s   t tjj| S )z5Casts the given stream as :class:`torch.cuda.Stream`.)r   r   r   r   r!   r   r   r   r   v   s    )__doc__
contextlibr   typingr   r   r   r   r   r   str__annotations__r   r   r   r   ZAbstractStreamr   r   r	   r
   r   r   r   r   ZTensorr   boolr   r   r   r   r   r   <module>   s6        

