U
    9%e                     @   s   d dl Z d dlmZ d dlZd dlZd dlmZmZmZm	Z	m
Z
 G dd dZdd Zdd	 Zdee
 d
ddZdee
 d
ddZe jdee
 d
ddZe jdd ZG dd deZdd Zdd ZdS )    N)Optional)_len_torch_dispatch_stack_get_dispatch_stack_at_pop_torch_dispatch_stack_push_on_torch_dispatch_stackDispatchKeyc                   @   s@   e Zd ZdZdddZdddZdd	 Zd
d Zedd Z	dS )TorchDispatchModea  
    A ``TorchDispatchMode`` allows you to override the meaning of all
    ``__torch_dispatch__`` overrideable functions within a dynamic scope,
    without having to actually create a tensor subclass or manually
    monkey-patch functions in the PyTorch API.  Some common situations
    where you should use a mode:

        * You want to override the meaning of factory functions, or other
          functions that do not otherwise take a tensor as an argument
          (these cannot be overridden with tensor subclasses).

        * You want to override the behavior of all functions without needing
          to wrap your inputs in tensor subclasses; e.g., if you are just
          interested in logging intermediate computations.

        * You want to control the order of execution of various tensor
          subclasses explicitly, rather than implicitly via the return of
          ``NotImplemented``.

    Independent subclasses of :class:`TorchDispatchMode` are compositional:
    modes can be pushed onto a stack using ``with MyMode():``.
    When you call functions in the PyTorch API inside your
    ``__torch_dispatch__`` implementation, by default, they will forward on to
    the next mode on the mode stack.  If you want recursively call back into
    your current ``__torch_dispatch__`` implementation, either explicitly
    invoke ``self.__torch_dispatch__(...)``, or use the context manager
    ``__torch_dispatch__(self)`` to make PyTorch
    API self-referential (beware of infinite loops, in this case!)
    Nc                 C   s(   |d k	r$t |tjjst|| jd< d S N_dispatch_key)
isinstancetorch_Cr   AssertionError__dict__)selfr
    r   [/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/utils/_python_dispatch.py__init__.   s    zTorchDispatchMode.__init__r   c                 C   s
   t  d S N)NotImplementedErrorr   functypesargskwargsr   r   r   __torch_dispatch__3   s    z$TorchDispatchMode.__torch_dispatch__c                 C   s   t | | jdd  | S r	   )
_push_moder   get)r   r   r   r   	__enter__6   s    zTorchDispatchMode.__enter__c                 C   s   t | jdd  d S r	   )	_pop_moder   r   )r   exc_typeexc_valexc_tbr   r   r   __exit__:   s    zTorchDispatchMode.__exit__c                 O   s   t d | ||}|S )NzP`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`)warningswarn)clsr   r   instancer   r   r   push=   s    

zTorchDispatchMode.push)N)r   N)
__name__
__module____qualname____doc__r   r   r   r#   classmethodr(   r   r   r   r   r      s   

r   c                  C   s   t  } | dkrt| d S d S )Nr      )r   r   Z	stack_lenr   r   r   _get_current_dispatch_modeC   s    r0   c                  C   s   t  } dd t| D S )Nc                 S   s   g | ]}t |qS r   )r   ).0ir   r   r   
<listcomp>J   s     z4_get_current_dispatch_mode_stack.<locals>.<listcomp>)r   ranger/   r   r   r    _get_current_dispatch_mode_stackH   s    r5   )kc                 C   s\   |d k	rPddl m}m} tj|}| D ]}|D ]}|| q2q*|||  nt|  d S )Nr   )push_mode_for_keyget_cached_ops)
torch._opsr7   r8   r   r   Z_functionality_to_backend_keysZ_uncache_dispatchr   )moder6   r7   r8   ksopkeyr   r   r   r   L   s    
r   c                 C   s&   | d k	rddl m} || S t S d S )Nr   )pop_mode_for_key)r9   r>   r   )r6   r>   r   r   r   r   Z   s    r   c              	   c   s$   t | }z
|V  W 5 t||  X d S r   )r   r   )r6   oldr   r   r   _pop_mode_temporarilyb   s    
r@   c               	   c   s@   t  } dd t| D }z
|V  W 5 t|D ]}t| q,X d S )Nc                 S   s   g | ]
}t  qS r   )r   )r1   _r   r   r   r3   n   s     z*_disable_current_modes.<locals>.<listcomp>)r   r4   reversedr   )Zmode_lenZ	old_modesr:   r   r   r   _disable_current_modesk   s    
rC   c                   @   s   e Zd ZdddZdS )BaseTorchDispatchModer   Nc                 C   s   |d kri }|||S r   r   r   r   r   r   r   w   s    z(BaseTorchDispatchMode.__torch_dispatch__)r   N)r)   r*   r+   r   r   r   r   r   rD   v   s   rD   c                 C   s2   t | tjot| tjk}|o0t| do0t| dS )N__tensor_flatten____tensor_unflatten__)r   r   Tensortypehasattr)tZis_subclassr   r   r   is_traceable_wrapper_subclass}   s    rK   c                 C   sV   t | stdt|  ddlm} t| | \}}|tj||}t| ||S )Nz+Expects traceable wrapper subclass but got r   )tree_map_only)	rK   r   rH   Ztorch.utils._pytreerL   rE   r   rG   rF   )rJ   callbackrL   Zflattened_tensorsctxZtransformed_tensorsr   r   r   transform_subclass   s
    rO   )N)N)N)
contextlibtypingr   r$   r   Ztorch._Cr   r   r   r   r   r   r0   r5   r   r   contextmanagerr@   rC   rD   rK   rO   r   r   r   r   <module>   s    
3

