U
    9%eh                     @  s   d dl mZ d dlZd dlmZmZmZmZmZm	Z	 d dl
Z
d dlmZ ddlmZmZmZmZmZ dgZejZedd	d
d	ddddZdd
d	ddddZdS )    )annotationsN)CallableDictListSequenceTupleUnion)dim   )	_ellipsisAnonymousAxiscomma_separateparse_patternvalidate_rearrange_expressions	rearrange   intstrz&Callable[[torch.Tensor], torch.Tensor])tensor_ndimpatternaxes_lengthsreturnc                   s  t ||\}}t||| tdd |jD }|jr|| t|jd  }t|jd }||  }| krtd| d|  dn6d}t|j}t|j }| krtd| d|  d|| | }	|	dkrd	d
 S tdd t	|	D i g }
d |jD ]}t
|trr|D ],}t
|tst  f|<  d7  q
|std}  f|< |
| ||  d7  q|tkrt}t fddt	|D |<  |7  qtd| qdddfdd}||j}||j}tfdd|
D }tfdd| D }d}d| dt d|	 d|rJddd |D nd dt| dt| d |rdt|g d nd! }t| t | S )"az  Translate an `einops`-style pattern into a callable that performs the rearrange using first-class dimensions.

    Since the an equivalent result is computed for tensors with the same number of dimensions, with the same pattern and
    specified axes lengths, this function can be memoized.

    Args:
        tensor_ndim (int): the number of dimensions in the tensor to rearrange
        pattern (str): the `einops`-style rearrangement pattern
        axes_lengths (int): any additional length specifications for dimensions

    Returns:
        Callable[[torch.Tensor], torch.Tensor]: a callable that performs the rearrangement
    c                 s  s   | ]}| V  qd S N ).0r	   r   r   Y/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/functorch/einops/rearrange.py	<genexpr>*   s     z-_create_rearrange_callable.<locals>.<genexpr>r
   z!Number of dimensions in pattern (zH) must be less than or equal to the number of dimensions in the tensor ()r   z;) must be equal to the number of dimensions in the tensor (c                 S  s   | S r   r   )tensorr   r   r   <lambda>A       z,_create_rearrange_callable.<locals>.<lambda>c                 s  s   | ]}d | V  qdS )dNr   )r   ir   r   r   r   C   s     1c                 3  s   | ]} |  V  qd S r   r   )r   j)dims_ifirst_class_dimsr   r   r   Y   s    Unexpected dimension: z5Sequence[Union[List[Union[str, AnonymousAxis]], str]]z!List[Union[str, Tuple[str, ...]]])compositionr   c                   s`   g }| D ]R}t |tr4|t fdd|D  q|tkrL| t  qtd| q|S )z|Convert a `ParsedExpression.composition` into a `Tensor.__getitem__` index of strings representing first
        class dims.c                 3  s    | ]} | D ]
}|V  qqd S r   r   )r   
identifierr	   identifier_dim_mapr   r   r   i   s   
 zJ_create_rearrange_callable.<locals>.composition_to_dims.<locals>.<genexpr>r'   )
isinstancelistappendtupler   extend
ValueError)r(   Zdim_composition	dimensionr*   r   r   composition_to_dims`   s    
z7_create_rearrange_callable.<locals>.composition_to_dimsc                 3  s   | ]} | d  V  qdS r   Nr   )r   axisr*   r   r   r   w   s     c                 3  s"   | ]\}} | d  |fV  qdS r4   r   )r   r5   lengthr*   r   r   r   x   s    Zdo_rearrangezdef z(tensor):
    z = dims(z)
 c                 s  s$   | ]\}}d | d| dV  qdS )z    z.size = 
Nr   )r   r	   r6   r   r   r   r      s    z    tensor = tensor[z].order(z    return tensor.sum(z, keepdim=False)
z    return tensor
)r   r   sumr(   Zhas_ellipsislenZidentifiersr1   r/   ranger,   r-   r   AssertionErrorr   r.   r   itemsr   joinexeclocals)r   r   r   leftrightZn_anon_dimsZn_ellipsis_dimsZn_named_dimsZpattern_ndimZn_dimsZ	anon_axesr2   r)   Z	anon_axisr3   Z	left_dimsZ
right_dimsZ	anon_dimsZspecified_lengthsZcustom_rearrange_callable_nameZcustom_rearrange_callable_coder   )r%   r&   r+   r   _create_rearrange_callable   s    








rC   zAUnion[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor, ...]]ztorch.Tensor)r   r   r   r   c                 K  s.   t | tjst| } t| j|f|}|| S )a  A native implementation of `einops.rearrange`, a reader-friendly smart element reordering for multidimensional
    tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
    stack, concatenate and other operations.

    See: https://einops.rocks/api/rearrange/

    Args:
        tensor (Tensor or sequence of Tensor): the tensor(s) to rearrange
        pattern (str): the rearrangement pattern
        axes_lengths (int): any additional length specifications for dimensions

    Returns:
        Tensor: the rearranged tensor

    Examples:
        >>> # suppose we have a set of 32 images in "h w c" format (height-width-channel)
        >>> images = torch.randn((32, 30, 40, 3))

        >>> # stack along first (batch) axis, output is a single array
        >>> rearrange(images, 'b h w c -> b h w c').shape
        torch.Size([32, 30, 40, 3])

        >>> # concatenate images along height (vertical axis), 960 = 32 * 30
        >>> rearrange(images, 'b h w c -> (b h) w c').shape
        torch.Size([960, 40, 3])

        >>> # concatenated images along horizontal axis, 1280 = 32 * 40
        >>> rearrange(images, 'b h w c -> h (b w) c').shape
        torch.Size([30, 1280, 3])

        >>> # reordered axes to "b c h w" format for deep learning
        >>> rearrange(images, 'b h w c -> b c h w').shape
        torch.Size([32, 3, 30, 40])

        >>> # flattened each image into a vector, 3600 = 30 * 40 * 3
        >>> rearrange(images, 'b h w c -> b (c h w)').shape
        torch.Size([32, 3600])

        >>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
        >>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
        torch.Size([128, 15, 20, 3])

        >>> # space-to-depth operation
        >>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
        torch.Size([32, 15, 20, 12])
    )r,   torchZTensorstackrC   ndim)r   r   r   Zrearrange_callabler   r   r   r      s    3
 )
__future__r   	functoolstypingr   r   r   r   r   r   rD   Zfunctorch._Cr	   Z_CZ_parsingr   r   r   r   r   __all__Zdims	lru_cacherC   r   r   r   r   r   <module>   s    ~