U
    d                     @   s   d dl Z d dlmZ d dlZd dlmZmZmZmZm	Z	 d dl
mZmZ d dlmZ eG dd deZe jddd	ed d
ddZdd ZG dd de	ZG dd deZG dd dedZG dd deZe jee d
ddZdS )    N)Iterator)_enable_mode
_push_mode	_ModeInfo
_wrap_initMetaInitErrorInfo)_get_torch_dispatch_mode_set_torch_dispatch_mode)	dataclassc                       s,   e Zd Z fddZdd Zdd Z  ZS )TorchDispatchModeInfoc                    s   t  jdttd d S )Ntorch_dispatch)	mode_nameZ
mode_classZbase_mode_class)super__init__TorchDispatchModeBaseTorchDispatchModeself	__class__ @/tmp/pip-unpacked-wheel-ua33x9lu/torch/utils/_python_dispatch.pyr      s    
zTorchDispatchModeInfo.__init__c                 C   s   t  S N)r   r   r   r   r   get_mode   s    zTorchDispatchModeInfo.get_modec                 C   s   t |S r   )r	   )r   moder   r   r   set_mode   s    zTorchDispatchModeInfo.set_mode)__name__
__module____qualname__r   r   r   __classcell__r   r   r   r   r   
   s   r   F)replaceignore_preexisting)returnc                C   s   t | t ||dS )ah  
    Context manager that causes all pytorch operators to dispatch to the passed-in
    type's __torch_dispatch__ function, including operations that accept no tensors
    but return a tensor.

    This function is non-compositional; if there is already an existing mode,
    it will raise an error

    This function is safe to use inside a ``__torch_dispatch__`` mode handler,
    as the mode is guaranteed to be disabled in this context.  You can use
    this context manager to reinstate the mode so that calls to overridable
    APIs recursively call back into your mode handler (this can easily cause
    infinite loops, so use with care!)

    enable_torch_dispatch_mode is affected by _DisableTorchDispatch.

    Args:
        mode (:class:`TorchDispatchMode`, Tensor-like class, or None): the
            mode to set as current mode.  If you pass a Tensor-like class,
            it will be treated as a non-compositional mode with no state,
            which is convenient if you have an existing tensor subclass
            that you'd like to apply globally in a quick and dirty way.
            Passing None will disable the current mode.
        replace (:class:`TorchDispatchMode` or Tensor-like class): the
            mode to replace.  You can use this argument to change the mode in
            a situation where you know what the current mode is (and you are
            intentionally overwriting it.)  If you don't know what the current
            mode is, use ``ignore_preexisting`` instead.
        ignore_preexisting (bool): if True, ignore any preexisting mode
            and overwrite it with the passed mode.
    )	mode_infor    r!   )r   r   )r   r    r!   r   r   r   enable_torch_dispatch_mode   s    "r$   c                    s   t   fdd}|S )Nc              
      s2   t | j  | f||W  5 Q R  S Q R X d S r   )r$   inner)r   argskwargsfr   r   wrappedB   s    z%_wrap_torch_dispatch.<locals>.wrapped)	functoolswraps)r)   r*   r   r(   r   _wrap_torch_dispatchA   s    r-   c                       s   e Zd Z fddZ  ZS )TorchDispatchMetaInitErrorInfoc                    s   t  jddd d S )Nr   r   )Zmode_class_namer   )r   r   r   r   r   r   r   W   s    z'TorchDispatchMetaInitErrorInfo.__init__)r   r   r   r   r   r   r   r   r   r.   V   s   r.   c                       s    e Zd ZdZ fddZ  ZS )TorchDispatchModeMetaau  
    Metaclass for :class:`TorchDispatchMode`; it does two things:

        * Adds an implicit ``inner`` kwarg to ``__init__``, to
          allow the modes to be chained together to form a stack.

        * Reenables the inner mode, so that by default PyTorch API calls
          will compositionally proceed to the next mode on the stack.

    The default behavior for the second bullet is important, as it is easy to
    accidentally write ``_wrap_torch_dispatch`` implementations that are not
    compositional, and the wrapping here makes the obvious code do the
    right thing (aka, this is why there is a metaclass).
    c                    sF   d|krt |d t |d< d|kr4t|d |d< t | |||S )Nr   __torch_dispatch__)r   r.   r-   r   __new__)metaclsnamebasesdctr   r   r   r1   i   s
    zTorchDispatchModeMeta.__new__)r   r   r   __doc__r1   r   r   r   r   r   r/   Z   s   r/   c                   @   s.   e Zd ZdZdd Zd
ddZedd	 ZdS )r   a  
    A ``TorchDispatchMode`` allows you to override the meaning of all
    ``__torch_dispatch__`` overrideable functions within a dynamic scope,
    without having to actually create a tensor subclass or manually
    monkey-patch functions in the PyTorch API.  Some common situations
    where you should use a mode:

        * You want to override the meaning of factory functions, or other
          functions that do not otherwise take a tensor as an argument
          (these cannot be overridden with tensor subclasses).

        * You want to override the behavior of all functions without needing
          to wrap your inputs in tensor subclasses; e.g., if you are just
          interested in logging intermediate computations.

        * You want to control the order of execution of various tensor
          subclasses explicitly, rather than implicitly via the return of
          ``NotImplemented``.

    Independent subclasses of :class:`TorchDispatchMode` are compositional:
    modes can be pushed onto a stack with :func:`push_torch_dispatch_mode`.
    When you call functions in the PyTorch API inside your
    ``__torch_dispatch__`` implementation, by default, they will forward on to
    the next mode on the mode stack.  If you want recursively call back into
    your current ``__torch_dispatch__`` implementation, either explicitly
    invoke ``self.__torch_dispatch__(...)``, or use the context manager
    ``__torch_dispatch__(self, replace=self.inner)`` to make PyTorch
    API self-referential (beware of infinite loops, in this case!)
    c                 C   s   d S r   r   r   r   r   r   r      s    zTorchDispatchMode.__init__r   Nc                 C   s
   t  d S r   )NotImplementedErrorr   functypesr&   r'   r   r   r   r0      s    z$TorchDispatchMode.__torch_dispatch__c                 O   s   t tj| f||S r   )push_torch_dispatch_moder+   partial)clsr&   r'   r   r   r   push   s    zTorchDispatchMode.push)r   N)r   r   r   r6   r   r0   classmethodr>   r   r   r   r   r   q   s
   
r   )	metaclassc                   @   s   e Zd ZdddZdS )r   r   Nc                 C   s   |d kri }|||S r   r   r8   r   r   r   r0      s    z(BaseTorchDispatchMode.__torch_dispatch__)r   N)r   r   r   r0   r   r   r   r   r      s   r   c                 C   s   t | t dS )N)r#   )r   r   )Zctorr   r   r   r;      s    r;   )
contextlibtypingr   r+   Ztorch.utils._mode_utilsr   r   r   r   r   Ztorch._Cr   r	   Zdataclassesr
   r   contextmanagerr$   r-   r.   typer/   r   r   objectr;   r   r   r   r   <module>   s    $*