U
    9%e(D                  
   @   s  d dl Z d dlZd dlZd dlmZmZ d dlmZ d dlm	  m
Z d dlmZ d dlmZmZmZmZmZmZmZ d dlmZ ddd	d
gZdd Zdd Zeeee eeddddZeddddZeeeeeef f dddZeeedf edddZ edddd
Z!edddZ"dd  Z#d,ej$je%eee  d"d#d$Z&d-ej$jeee  eeee'e(e%ej)f e(f  d%d&d'Z*edd(d	Z+ed)d*d+Z,dS ).    N)GraphModuleNode)replace_pattern_with_filters)fuse_conv_bn_weights)AnyCallableDictOptionalTupleListUnion)LeafSpecfold_bn_weights_into_conv_nodeget_aten_graph_modulemove_exported_model_to_eval"remove_tensor_overload_for_qdq_opsc                 C   s&   | d krd S | j dkstt|| jS )Nget_attr)opAssertionErrorgetattrtarget)nodem r   _/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/ao/quantization/pt2e/utils.py_get_tensor_constant_from_node   s    r   c                 C   sd   g }t |D ]R\}}|j|kr0|||j  q|jsR|t| k rR|| |  q||j q|S )N)	enumeratenameappendZ
kwarg_onlylendefault_value)	orig_argsZorig_kwargsZargs_schemaZall_argsiZschemar   r   r   _get_all_arguments   s    
r#   )	conv_nodeconv_weight_nodeconv_bias_nodebn_noder   returnc              
   C   s
  t ||}t ||}| jtjjjjk }| jtjjjjkrZt| j	d t
ksPt| j	d }|jjj}t|j	|j|}	t |	d |}
t |	d |}t |	d |}t |	d |}|jtjjjjkrd}n$|jtjjjjkrd}ntd|j|	| }t||||||
||d\}}t| j	}t|dkrB| jtjjjjkrB|d  |j}t|tsXtt||| |d k	rv|j}n4|d	 }|j|  |j|}W 5 Q R X ||d< t||| t|| _	|jD ]>}|jd
ks|jt j!ks|j	d dkrq|"|  qd S )N                  zBN node target is unexpected )	transposeZ_biascall_functionr   )#r   r   torchopsatenconv2ddefaultconvolutiontypeargsboolr   Z_schema	argumentsr#   kwargs$_native_batch_norm_legit_no_trainingZ_native_batch_norm_legit
ValueErrorr   listr   r   
isinstancestrsetattrgraphZinserting_beforer   tupleZusersr   operatorgetitemZreplace_all_uses_with)r$   r%   r&   r'   r   Zconv_wZconv_br/   Zbn_args_schemaZbn_argsZbn_wZbn_bZbn_rmZbn_rvZeps_arg_indexZbn_epsZfused_weightZ
fused_biasZ	conv_argsZweight_attr_nameZbias_attr_nameZget_bias_nodeuserr   r   r   r   '   sJ    





"



*)r   r(   c                 C   s   | j jD ]}|jdks|jtjjjjkr*q|}|j	d }|jdks|jtjjj
jkrh|jtjjjjkrhq|}|j	d }t|j	dkr|j	d nd }t|||||  q| j   |   d S )Nr0   r   r*   r+   )rB   nodesr   r   r1   r2   r3   r<   r5   r8   r4   r6   r   r   eliminate_dead_code	recompile)r   nr'   r$   r%   r&   r   r   r   _fuse_conv_bn_x   s"    



rK   )modelr(   c                 C   sh   i }| j jD ]V}|jdd }dtd f}|rXt| d }|d dd |d f}|||j< q|S )Nnn_module_stack r   .r*   )	rB   rG   metagetr7   r>   valuessplitr   )rL   Znode_name_to_scoperJ   rM   Zcurrent_scopeZbtr   r   r   _get_node_name_to_scope   s    rU   .)patternexample_inputsr(   c                 K   s.   ddl m} || ||}|j  |  |S )zF
    Convert the pattern to an FX graph with decomposed aten ops.
    r   )capture_pre_autograd_graph)Ztorch._exportrX   rB   rH   rI   )rV   rW   r;   rX   Zaten_patternr   r   r   r      s    	
)match_patternr(   c                 C   s   t jjjjt jjjt jjjjt jjjt jjjjt jjjt jjjjt jjjt jjjjt jjjt jjjjt jjjt jjjjt jjjt jjj	jt jjj	t jj
jjt jj
ji	}| jjD ]&}|jdkrq|j|kr||j |_qdS )z Remove .tensor overload for quantize/dequantize ops so that we can
    use the match_pattern that we get from torchdynamo export to match the output of convert_pt2e
    r0   N)r1   r2   Zquantized_decomposedZquantize_per_tensorr5   Zdequantize_per_tensorZtensorZtensor2Zquantize_per_channelZdequantize_per_channelr3   clampZTensorrB   rG   r   r   )rY   Z_MAPrJ   r   r   r   r      s0    
 
 
 
 
 
 
 
 
 

r   c                 C   s`   | j   |   dd }dd }tdf}t||}t||}t| ||g dd |   dS )	a%  
    Replace the aten training dropout pattern with a noop, intended for eval.

    For models with dropout torch ops (nn.Dropout, F.dropout), calling model.eval()
    effectively turns these dropout ops into noops. For exported models, however,
    this is not done automatically, since the aten dropout patterns previously generated
    for training remain in the graph. Here we rewrite these dropout patterns with noops
    to avoid incorrectly applying further dropout during eval.

    See https://github.com/pytorch/pytorch/issues/103681.
    c                 S   s   t j| dddS )N      ?TpZtrainingFZdropoutxr   r   r   dropout_train   s    z0_replace_dropout_for_eval.<locals>.dropout_trainc                 S   s   t j| dddS )Nr\   Fr]   r_   ra   r   r   r   dropout_eval   s    z/_replace_dropout_for_eval.<locals>.dropout_evalr*   T)Zmatch_filtersZignore_literalsN)rB   rH   rI   r1   Zrandnr   r   )r   rc   rd   rW   rY   Zreplacement_patternr   r   r   _replace_dropout_for_eval   s    


re   c                 C   s2   t | ttfrdS t | ttfr.ttt| S dS )NTF)r?   intfloatrC   r>   allmap_is_literal)argr   r   r   rj      s
    rj   F)gm	merge_dupexclude_literalsc           
   
   C   s   d}d}i }|dkrg }| j jD ]}|jdkr<|}|d7 }q | j | g }|jD ]}t|r||kr|r||kr|||  q| j dt| }	||	 | j	j
d j
t  |d7 }|r|	||< qT|| qTt|}W 5 Q R X ||_q | S )a  Replace the literals in the graph with placeholder nodes that's created on the fly while we
    traverse the graph, so that the literal arguments in the graph can be matched and replaced

    To use this, the pattern and replacement graph should have the exact same number of literal args
    and they should be used in the exact same order in the pattern and replacement graph.

    If the literal arguments are not used in the same order in pattern and replacement graph, please
    use `_replace_literals_with_existing_placeholders` instead

    Args:
        `gm`: input GraphModule that we'll transform
        `merge_dup`: boolean flag to indicate that if the same literal appears multiple times in
         the graph, whether they should correspond to the same placeholder or not
        `exclude_literals`: a list of literals that will not be replaced with placeholders

    Example:

    # 1. Original Graph
    def pattern(self, x):
        return x + 3

    def replacement(self, x):
        return x - 3

    example_inputs = (torch.randn(1, 3, 3, 3),)
    pattern_gm = get_aten_graph_module(pattern, example_inputs)
    replacement_gm = get_aten_graph_module(pattern, example_inptus)

    # 2. Before calling replace literals we'll see the following graph:
    def pattern(self, x):
        return x + 3

    def replacement(self, x):
        return x - 3

    pattern_gm = _replace_literals_with_new_placeholders(pattern_gm)
    replacement_gm = _replace_literals_with_new_placeholders(replacement_gm)

    # 3. After replacing literals with new placeholder nodes

    def pattern(self, x, new_ph):
        return x + new_ph

    def pattern(self, x, new_ph):
        return x - new_ph

    Nr   placeholderr*   rk   )rB   rG   r   Zinserting_afterr8   rj   r   ro   r@   Z_in_specZchildren_specsr   rC   )
rl   rm   rn   Zlast_phZcntZliteral_to_phr   new_argsrk   ph_noder   r   r   '_replace_literals_with_new_placeholders   s4    4



rr   )rl   rn   literal_to_ph_idxc           	      C   s   |dkrg }|dkri }dd | j jD }| j jD ]l}|jdkrBq2g }|jD ]B}t|r||kr||kr|| }|| }|| qL|| qLt|}||_q2| S )a	  Replace the literals in the graph with **existing** placeholder nodes, so that the literal arguments
    in the graph can be matched and replaced

    To use this, all literal args in the graph should be unique and each of them should correspond
    to exactly one placeholder node

    # 1. Original Graph
    def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
        return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max)

    def replacement(x_i8, scale, zero_point, quant_min, quant_max):
        x_i8 = torch.clamp(x_i8, quant_min, quant_max)
        return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32)

    example_inputs = (
        torch.randn(1, 3, 3, 3),
        1.0,
        0,
        -128,
        127,
    )
    pattern_gm = get_aten_graph_module(pattern, example_inputs)
    replacement_gm = get_aten_graph_module(pattern, example_inptus)

    # 2. Before calling replace literals we'll see the following graph:
    def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
        # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
        return torch.dequantize_per_tensor(x_i8, 1.0, 0, -128, 127)

    def replacement(x_i8, scale, zero_point, quant_min, quant_max):
        # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
        x_i8 = torch.clamp(x_i8, -128, 127)
        return ((x_i8.to(torch.float32) - 0) * 1.0).to(dtype=torch.float32)

    # Note that literal args appear in different order in pattern and replacement graph, so
    # we can't use _replace_literals_with_new_placeholders

    literal_to_ph_idx = {1.0: 1, 0: 2, -128: 3, 127: 4}
    pattern_gm = _replace_literals_with_existing_placeholders(pattern_gm, literal_to_ph_idx)
    replacement_gm = _replace_literals_with_existing_placeholders(replacement_gm, literal_to_ph_idx)

    # 3. After replacing literals with existing placeholder nodes

    def pattern(self, x_i8, scale, zero_point, quant_min, quant_max):
        # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
        return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max)

    def replacement(x_i8, scale, zero_point, quant_min, quant_max):
        # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values
        x_i8 = torch.clamp(x_i8, quant_min, quant_max)
        return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32)
    Nc                 S   s   g | ]}|j d kr|qS )ro   )r   ).0r   r   r   r   
<listcomp>~  s     
 z@_replace_literals_with_existing_placeholders.<locals>.<listcomp>r0   )rB   rG   r   r8   rj   r   rC   )	rl   rn   rs   Zphsr   rp   rk   Zph_idxrq   r   r   r   ,_replace_literals_with_existing_placeholders?  s$    9

rv   c                 C   s   t |  | S )z
    Move an exported GraphModule to eval mode.

    This is equivalent to model.eval() but only for certain special ops like dropout.
    QAT users should call this before performing inference on the model.
    )re   r[   r   r   r   r     s    )rL   c                 C   s@   dt ddd}dt ddd}t|| | _t|| | _| S )	z
    Disallow calling `model.train()` or `model.eval()` on the given GraphModule.
    This is useful for exported models, where these methods don't actually behave as expected.
    T)modec                 S   s   t dd S )Nz%Calling train() is not supported yet.NotImplementedErrorselfrw   r   r   r   _train  s    z$_disallow_eval_train.<locals>._trainc                 S   s   t dd S )Nz$Calling eval() is not supported yet.rx   rz   r   r   r   _eval  s    z#_disallow_eval_train.<locals>._eval)T)T)r9   types
MethodTypetraineval)rL   r|   r}   r   r   r   _disallow_eval_train  s
    r   )FN)NN)-rD   r~   r1   Ztorch.fxr   r   Ztorch.fx.subgraph_rewriterr   Ztorch.nn.functionalnnZ
functionalr`   Ztorch.nn.utils.fusionr   typingr   r   r   r	   r
   r   r   Ztorch.utils._pytreer   __all__r   r#   r   rK   r@   r7   rU   r   r   re   rj   Zfxr9   rr   rg   rf   Zdtyperv   r   r   r   r   r   r   <module>   s^   $Q 
#	  
V  
R