U
    9%e                     @   s   d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
 d dlmZmZmZ ddlmZ d dlm  mZ d dlZeeeddd	d
Zdd Zdd ZedddZedddZedddZdS )    N)Library)
OpOverload)FunctionSchemaOperatorName
SchemaKindBaseTyBaseType)_ExcludeDispatchKeyGuardDispatchKeySetDispatchKey   )autograd_not_implemented)libnew_op_name
mutable_opreturnc                 C   s|   t | t||}| | t|}| ||d tttj| j|j	}| |t
|d tt||}| ||d dS )a)  Given a mutable operator, registers the functional variant.

    This API also correctly links the functional variant with the mutable
    operator for the purposes of functionalization.

    All of the new registrations are performed on the ``lib`` passed in.

    Arguments:
        lib (Library): Should be a torch.library.Library object that has
            the same namespace as ``mutable_op``'s namespace.
            lib will be used to register the new functional op as well
            as a functionalization kernel for the ``mutable_op``
            If you don't have a library handy, use
            ``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
        new_op_name (str): The name of the functional operator (without the
            namespace). If no namespace, the new functional variant will be
            accessible under ``torch.ops.{lib.ns}.new_op_name``.
        mutable_op (OpOverload): The mutable custom operator. Note
            that you may need to add a `.default` to it, like
            `torch.ops.aten.abs_.default`.

    ZCompositeExplicitAutogradZAutogradFunctionalizeN)validatefunctional_schemaZdefineconstruct_functional_implimplgetattrtorchZopsnsdefaultr   "construct_functionalization_kernelweakrefproxy)r   r   r   schemafunctional_implfunctional_opZf_kernel r!   Z/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/_custom_op/functional.pyregister_functional_op   s    

r#   c                    s    fdd}|S )Nc                     s   g }g }t t | D ]4\}}|r@| }|| || q|| q | }|d krdt|S t|trv||S |f|S N)zipmutable_argscloneappendtuple
isinstance)argsnew_argsZ
extra_retsis_writeargZclonedresultr   r!   r"   r   <   s    

z2construct_functional_impl.<locals>.functional_implr!   )r   r   r!   r0   r"   r   ;   s    r   c                    s    fdd}|S )Nc            
   
      s  t tjdd | r>tttj |  W  5 Q R  S Q R X t tjtj| sXt	dg }| D ]D}t
|tjrt|rt| t|}|| q`|| q`tttj  | }W 5 Q R X tjj}t tj|d | }||d  }dd tt| D }t|t|ks$tt||D ]<\}	}tj||	 tj||	 tj| t| q.t|dkr|d S t|dkrd S |S )Nc                 S   s   t |  S r$   )r   _is_functional_tensor)xr!   r!   r"   <lambda>W       zDconstruct_functionalization_kernel.<locals>.kernel.<locals>.<lambda>z={mutable_op}: expected all args to be FunctionalTensorWrapperc                 S   s   g | ]\}}|r|qS r!   r!   ).0r-   r.   r!   r!   r"   
<listcomp>t   s    zFconstruct_functionalization_kernel.<locals>.kernel.<locals>.<listcomp>r   r   )pytreeZtree_all_onlyr   Tensorr	   r
   r   r   r1   RuntimeErrorr*   Z_syncZ_from_functional_tensorr(   len_schemareturnsZtree_mapZ_to_functional_tensorr%   r&   AssertionErrorZ_CZ_propagate_xla_dataZ	_replace_Z_commit_update)
r+   Zunwrapped_argsr.   Z	unwrappedoutputZnum_actual_outputZactual_outputZnew_values_to_propagateZinputs_to_replace	new_valuer    r   r!   r"   kernelT   s@    

 
z2construct_functionalization_kernel.<locals>.kernelr!   )r   r    rA   r!   r@   r"   r   S   s    /r   r0   c                 C   s   t | tstdt|  tt| j}| t	j
ksBtd|jD ]}|jd k	rHtdqH|jjD ]&}|j rh|jttjkrhtdqhd S )Nz]register_functional_op(mutable_op): expected mutable_op to be instance of OpOverload but got zDExpected op to be mutable (as opposed to functional, inplace or out)zNYI: register_functional_op(op) where op returns a mutated or aliased value. Please file an issue (and as a workaround, modify your operator to not return the mutated value or aliases)zbNYI: register_functional_op(op) where op accepts Optional or List of tensors.Please file an issue.)r*   r   	TypeErrortyper   parsestrr;   kindr   Zmutabler9   r<   
annotationNotImplementedError	argumentsZflat_allZis_tensor_liker   r   r8   )r   r   retr.   r!   r!   r"   r      s"    


r   opc                 C   s,   t t|j}| t| }t|S r$   )r   rD   rE   r;   	signature	with_namer   )r   rL   r   r!   r!   r"   r      s    r   c                 C   s   t dd | jjD S )Nc                 s   s$   | ]}|j d krdn|j jV  qd S )NF)Z
alias_infor-   )r5   r.   r!   r!   r"   	<genexpr>   s   zmutable_args.<locals>.<genexpr>)r)   r;   rI   rK   r!   r!   r"   r&      s    r&   )r   Ztorch.libraryr   Z
torch._opsr   Ztorchgen.modelr   r   r   r   r   Ztorch._Cr	   r
   r   Zautogradr   Ztorch.utils._pytreeutilsZ_pytreer7   r   rE   r#   r   r   r   r   r&   r!   r!   r!   r"   <module>   s"   03