U
    9%e2$                     @  s  d Z ddlmZ ddlZddlmZ ddlZddlmZm	Z	m
Z
 ddlmZ ddlmZmZmZ dd	d
ddddgZejejddZede
dejddddZedejd3dddd	Zedejd4dddd
Zede
de
ddejddddZede
ddddddddd	ejddd dZed!ejddd"dZed#e
dddddd$dejd5dd'd'd'd(d)d*d(d+d,dZejdd'd'd-d.d/Zejdd'd'd'd0d1d2Z dS )6a&  This file exports ONNX ops for opset 14.

Note [ONNX operators that are added/updated in opset 14]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New operators:
    HardSwish, Trilu

Updated operators:
    Reshape
    Add, Sub, Mul, Div
    GRU, LSTM, RNN
    BatchNorm, Cumsum, Relu
    )annotationsN)Optional)
_constants_type_utilssymbolic_helper)GLOBALS)	_beartype	jit_utilsregistration	hardswishtriltriureshape
batch_normquantized_hardswishscaled_dot_product_attention   )Zopsetzaten::hardswishvzjit_utils.GraphContext)gc                 C  s   |  d|S )NZ	HardSwishop)r   self r   Z/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/onnx/symbolic_opset14.pyr   (   s    z
aten::trilc                 C  s   | j d||ddS )NTrilur   Zupper_ir   r   r   Zdiagonaloutr   r   r   r   /   s    z
aten::triuc                 C  s   | j d||ddS )Nr      r   r   r   r   r   r   r   5   s    zaten::reshapeTc                 C  s   t j| ||ddS )Nr   )Z	allowzero)r   Z_reshape_helper)r   r   shaper   r   r   r   ;   s    zaten::batch_normifc
                 C  s   t  r8t|||||gs8tjdk r8tdddd|S t|d t| |||||\}}}}| j	d||||||d| |sdnd|sdndd	
}
|s|
S |
\}}}|
|  |
|  |S d S )
N   ZBatchNormalizationr   zaAll input tensors must have the same `dtype`. Turn off Autocast or export using opset version 15.r   r   r      )Z	epsilon_fZ
momentum_fZtraining_mode_ioutputs)torchZis_autocast_enabledr   Zargs_have_same_dtyper   Zexport_onnx_opset_versionZ _onnx_opset_unsupported_detailedZcheck_training_modeZ_batchnorm_helperr   ZsetTypetype)r   inputweightZbiasZrunning_meanZrunning_varZtrainingZmomentumZepsZcudnn_enabledr   resZnew_running_meanZnew_running_varr   r   r   r   E   sT    	     


zquantized::hardswishc                 C  s.   t | |\}}}}t| |}t | |||S )N)r   Zdequantize_helperr   Zquantize_helper)r   xZop_scaleZop_zero_point_outputr   r   r   r   }   s    
z"aten::scaled_dot_product_attentionb        Fztorch._C.ValuezOptional[torch._C.Value]floatbool)r   querykeyvalue	attn_mask	dropout_p	is_causalscalec              
   C  s  |r|rt |stdt |d}t |r:t| |}|rJt| ||}t |}tt|}	|	d |	d  |	d< |	d< | j	d||	d}
| 	d|| 	d|}| 	d|
| 	d|}| 	d	||}t |r|}nt
j|t
jjkr<| j	d
tdgd}| j	d
ttd gd}| 	d|||}| 	d||}n<t
j|t
jjkrb| 	d||}ntdt
j| | j	d|dd}|dkr| 	d|| j	d
tj|tjdd}| 	d	||S )Nz6is_causal and attn_mask cannot be set at the same timer!   Z	Transpose)Zperm_iZMulSqrtZMatMulConstantr.   Zvalue_tinfWhereAddz Unsupported type for attn_mask: ZSoftmaxZaxis_ir   ZDropoutZdtype)r   Z_is_noneAssertionErrorZ_maybe_get_const_attention_scale_causal_attention_maskZ_get_tensor_ranklistranger   r   JitScalarType
from_valueZBOOLr%   tensorr/   FLOAT
ValueError)r   r1   r2   r3   r4   r5   r6   r7   Zkey_shape_builtinZkey_transposed_axesZkey_transposedZquery_scaledZkey_transposed_scaledZmul_qkZ
mul_qk_add
const_zeroconst_neg_infZattn_weightr   r   r   r      s^    






)r   r1   returnc                 C  s   |  d|}|  d|| j dtjdgtjdd| j dtjtjgtjdd}| j d|tj|	 d}| j dtjd	gtj
dd}|  d
||  d|}|S )zCalculate the scale factor for the attention result.

    Args:
        query: Tensor of shape [..., L, E]

    Returns:
        Scalar scale factor := 1 / math.sqrt(query.size(-1))
    ShapeSlicer;   r9   rA   r<   ZCast)Zto_i      ?Divr:   )r   r%   rI   int64r   Z	INT64_MAXr   rG   rH   Z	onnx_typer/   )r   r1   query_shapeZquery_shape_lastZembedding_size	const_oner7   r   r   r   rC      s$     rC   )r   r1   r2   rN   c                 C  s  |  d|}|  d|}| j dtjdgtjdd}| j dtjdgtjdd}|  d|||}|  d|||}| j d||d	d
}	| j dtdgd}
|  d|
|	}| j d|d	d}| j dtdgd}| j dttd gd}|  d|  d||||}|S )a  Create a causal mask for the given query and key tensors.

    Equivalent to::
        mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
        attn_mask = torch.zeros(L, S, dtype=torch.float)
        attn_mask = attn_mask.masked_fill(not mask, -float('inf'))

    Args:
        query: Tensor of shape [..., L, E]
        key: Tensor of shape [..., S, E]

    Returns:
        Tensor of shape [L, S]
    rO   r;   r9   rA   r<   r8   rP   ZConcatr   r@   rQ   ZExpandr   r   r.   r=   r>   ZEqual)r   r%   rI   rS   r/   )r   r1   r2   rT   Z	key_shapeZlast_idxZsecond_last_idxZtarget_lengthZsource_lengthsizerU   r4   rL   rM   r   r   r   rD      s&       rD   )N)N)Nr.   FN)!__doc__
__future__r   	functoolstypingr   r%   Z
torch.onnxr   r   r   Ztorch.onnx._globalsr   Ztorch.onnx._internalr   r	   r
   __all__partialZonnx_symbolicZ_onnx_symbolic
parse_argsZbeartyper   r   r   Zquantized_argsr   r   r   r   rC   rD   r   r   r   r   <module>   sd   

5    $G