U
    d                     @   s.  U d Z ddlmZ ddlmZmZmZmZ ddlZg Z	ee
 ed< G dd dZe Zeejjef Zejedd	d
ZejedddZejedddZeejed dddZeeed dddZeejdddZeeddddZejeddddZeedddZeejjddd ZdS )!zWUtilities for eliminating boilerplate code to handle abstract streams with
CPU device.
    )contextmanager)	GeneratorListUnioncastN__all__c                   @   s   e Zd ZdS )CPUStreamTypeN)__name__
__module____qualname__ r   r   J/tmp/pip-unpacked-wheel-ua33x9lu/torch/distributed/pipeline/sync/stream.pyr      s   r   )devicereturnc                 C   s   | j dkrtS tj| S )z3Creates a new stream for either CPU or CUDA device.cuda)type	CPUStreamtorchr   Streamr   r   r   r   
new_stream   s    
r   c                 C   s   | j dkrtS tj| S )z@:func:`torch.cuda.current_stream` for either CPU or CUDA device.r   )r   r   r   r   current_streamr   r   r   r   r   $   s    
r   c                 C   s   | j dkrtS tj| S )z@:func:`torch.cuda.default_stream` for either CPU or CUDA device.r   )r   r   r   r   default_streamr   r   r   r   r   +   s    
r   )NNNc              	   c   s6   | j dkrdV  dS tj|  dV  W 5 Q R X dS )z8:func:`torch.cuda.device` for either CPU or CUDA device.r   N)r   r   r   r   r   r   r   r   
use_device2   s
    
r   )streamr   c              	   c   s8   t | sdV  dS tjt|  dV  W 5 Q R X dS )z8:func:`torch.cuda.stream` for either CPU or CUDA stream.N)is_cudar   r   r   as_cudar   r   r   r   
use_stream=   s
    r   c                 C   s   t | rt| jS tdS )z(Gets the device from CPU or CUDA stream.cpu)r   r   r   r   r   r   r   r   
get_deviceH   s    
r    )sourcetargetr   c                 C   s4   t |r0t | r$t| t| nt|  dS )z:meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
    makes the source stream wait until the target stream completes work queued.
    N)r   r   wait_streamZsynchronize)r!   r"   r   r   r   r#   O   s    r#   )tensorr   r   c                 C   s0   t |r,| dg|  } | t| dS )zA:meth:`torch.Tensor.record_stream` for either CPU or CUDA stream.r   N)r   Z	new_emptyset_Zstoragerecord_streamr   )r$   r   r   r   r   r&   ^   s    	r&   c                 C   s   | t k	S )z<Returns ``True`` if the given stream is a valid CUDA stream.)r   r   r   r   r   r   o   s    r   c                 C   s   t tjj| S )z5Casts the given stream as :class:`torch.cuda.Stream`.)r   r   r   r   r   r   r   r   r   t   s    r   )__doc__
contextlibr   typingr   r   r   r   r   r   str__annotations__r   r   r   r   ZAbstractStreamr   r   r   r   r   r   r    r#   ZTensorr&   boolr   r   r   r   r   r   <module>   s&   

