U
    9%eY                  	   @   s*  d dl mZmZmZmZmZ d dlZd dlmZm	Z	 d dlm
Z
 ddlmZmZ d dl mZ erd dlmZ eeeee ee f  ZneZeee  Zd	d
dddddddg	Zee	jdZee	jdZee	jdZde
eee e
dddZee	jdZee	jdZee	j dZ!G dd
 d
Z"dd Z#dS )    )OptionalTupleListUnionAnyN)_add_docstr_sparse)Tensor   )SparseSemiStructuredTensorto_sparse_semi_structured)TYPE_CHECKING)_dtypeaddmmcheck_sparse_tensor_invariantsmmsumsoftmaxlog_softmaxr   r   as_sparse_gradchecka%  
sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor

This function does exact same thing as :func:`torch.addmm` in the forward,
except that it supports backward for sparse COO matrix :attr:`mat1`.
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
When inputs are COO tensors, this function also supports backward for both inputs.

Supports both CSR and COO storage formats.

.. note::
    This function doesn't support computing derivaties with respect to CSR matrices.

Args:
    mat (Tensor): a dense matrix to be added
    mat1 (Tensor): a sparse matrix to be multiplied
    mat2 (Tensor): a dense matrix to be multiplied
    beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
a
  
    Performs a matrix multiplication of the sparse matrix :attr:`mat1`
    and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a
    :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a
    :math:`(n \times p)` tensor.
    When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
    When inputs are COO tensors, this function also supports backward for both inputs.

    Supports both CSR and COO storage formats.

.. note::
    This function doesn't support computing derivaties with respect to CSR matrices.

    This function also additionally accepts an optional :attr:`reduce` argument that allows
    specification of an optional reduction operation, mathematically performs the following operation:

.. math::

    z_{ij} = \bigoplus_{k = 0}^{K - 1} x_{ik} y_{kj}

where :math:`\bigoplus` defines the reduce operator. :attr:`reduce` is implemented only for
CSR storage format on CPU device.

Args:
    mat1 (Tensor): the first sparse matrix to be multiplied
    mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense
    reduce (str, optional): the reduction operation to apply for non-unique indices
        (:obj:`"sum"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`). Default :obj:`"sum"`.

Shape:
    The format of the output tensor of this function follows:
    - sparse x sparse -> sparse
    - sparse x dense -> dense

Example::

    >>> a = torch.tensor([[1., 0, 2], [0, 3, 0]]).to_sparse().requires_grad_()
    >>> a
    tensor(indices=tensor([[0, 0, 1],
                           [0, 2, 1]]),
           values=tensor([1., 2., 3.]),
           size=(2, 3), nnz=3, layout=torch.sparse_coo, requires_grad=True)
    >>> b = torch.tensor([[0, 1.], [2, 0], [0, 0]], requires_grad=True)
    >>> b
    tensor([[0., 1.],
            [2., 0.],
            [0., 0.]], requires_grad=True)
    >>> y = torch.sparse.mm(a, b)
    >>> y
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseAddmmBackward0>)
    >>> y.sum().backward()
    >>> a.grad
    tensor(indices=tensor([[0, 0, 1],
                           [0, 2, 1]]),
           values=tensor([1., 0., 2.]),
           size=(2, 3), nnz=3, layout=torch.sparse_coo)
    >>> c = a.detach().to_sparse_csr()
    >>> c
    tensor(crow_indices=tensor([0, 2, 3]),
           col_indices=tensor([0, 2, 1]),
           values=tensor([1., 2., 3.]), size=(2, 3), nnz=3,
           layout=torch.sparse_csr)
    >>> y1 = torch.sparse.mm(c, b, 'sum')
    >>> y1
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
    >>> y2 = torch.sparse.mm(c, b, 'max')
    >>> y2
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
a  
sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor

Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations
specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result.

Mathematically this performs the following operation:

.. math::

    \text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input}

where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha`
and :attr:`beta` are the scaling factors.
:math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere.

.. note::
    :attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors.

Args:
    input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute
        the sampled matrix multiplication
    mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied
    mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied

Keyword args:
    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
    out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.

Examples::

    >>> input = torch.eye(3, device='cuda').to_sparse_csr()
    >>> mat1 = torch.randn(3, 5, device='cuda')
    >>> mat2 = torch.randn(5, 3, device='cuda')
    >>> torch.sparse.sampled_addmm(input, mat1, mat2)
    tensor(crow_indices=tensor([0, 1, 2, 3]),
        col_indices=tensor([0, 1, 2]),
        values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0',
        size=(3, 3), nnz=3, layout=torch.sparse_csr)
    >>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense()
    tensor([[ 0.2847,  0.0000,  0.0000],
        [ 0.0000, -0.7805,  0.0000],
        [ 0.0000,  0.0000, -0.1900]], device='cuda:0')
    >>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5)
    tensor(crow_indices=tensor([0, 1, 2, 3]),
        col_indices=tensor([0, 1, 2]),
        values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0',
        size=(3, 3), nnz=3, layout=torch.sparse_csr)
)inputdimdtypereturnc                 C   sR   |dkr(|dk	rt | |S t | S n&|dk	r@t j| ||dS t j| |dS dS )an	  
    Returns the sum of each row of the sparse tensor :attr:`input` in the given
    dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions,
    reduce over all of them. When sum over all ``sparse_dim``, this method
    returns a dense tensor instead of a sparse tensor.

    All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output
    tensor having :attr:`dim` fewer dimensions than :attr:`input`.

    During backward, only gradients at ``nnz`` locations of :attr:`input`
    will propagate back. Note that the gradients of :attr:`input` is coalesced.

    Args:
        input (Tensor): the input sparse tensor
        dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce
            over all dims.
        dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
            Default: dtype of :attr:`input`.

    Example::

        >>> nnz = 3
        >>> dims = [5, 5, 2, 3]
        >>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
                           torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
        >>> V = torch.randn(nnz, dims[2], dims[3])
        >>> size = torch.Size(dims)
        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
        >>> S = torch.sparse_coo_tensor(I, V, size)
        >>> S
        tensor(indices=tensor([[2, 0, 3],
                               [2, 4, 1]]),
               values=tensor([[[-0.6438, -1.6467,  1.4004],
                               [ 0.3411,  0.0918, -0.2312]],

                              [[ 0.5348,  0.0634, -2.0494],
                               [-0.7125, -1.0646,  2.1844]],

                              [[ 0.1276,  0.1874, -0.6334],
                               [-1.9682, -0.5340,  0.7483]]]),
               size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo)

        # when sum over only part of sparse_dims, return a sparse tensor
        >>> torch.sparse.sum(S, [1, 3])
        tensor(indices=tensor([[0, 2, 3]]),
               values=tensor([[-1.4512,  0.4073],
                              [-0.8901,  0.2017],
                              [-0.3183, -1.7539]]),
               size=(5, 2), nnz=3, layout=torch.sparse_coo)

        # when sum over all sparse dim, return a dense tensor
        # with summed dims squeezed
        >>> torch.sparse.sum(S, [0, 1, 3])
        tensor([-2.6596, -1.1450])
    N)r   )torchZ_sparse_sum)r   r   r    r   T/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/sparse/__init__.pyr      s    9a  
sparse.softmax(input, dim, *, dtype=None) -> Tensor

Applies a softmax function.

Softmax is defined as:

:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}`

where :math:`i, j` run over sparse tensor indices and unspecified
entries are ignores. This is equivalent to defining unspecified
entries as negative infinity so that :math:`exp(x_k) = 0` when the
entry with index :math:`k` has not specified.

It is applied to all slices along `dim`, and will re-scale them so
that the elements lie in the range `[0, 1]` and sum to 1.

Args:
    input (Tensor): input
    dim (int): A dimension along which softmax will be computed.
    dtype (:class:`torch.dtype`, optional): the desired data type
        of returned tensor.  If specified, the input tensor is
        casted to :attr:`dtype` before the operation is
        performed. This is useful for preventing data type
        overflows. Default: None
a  
sparse.log_softmax(input, dim, *, dtype=None) -> Tensor

Applies a softmax function followed by logarithm.

See :class:`~torch.sparse.softmax` for more details.

Args:
    input (Tensor): input
    dim (int): A dimension along which softmax will be computed.
    dtype (:class:`torch.dtype`, optional): the desired data type
        of returned tensor.  If specified, the input tensor is
        casted to :attr:`dtype` before the operation is
        performed. This is useful for preventing data type
        overflows. Default: None
a(  
sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor

Creates a sparse 2D tensor by placing the values from rows of
:attr:`diagonals` along specified diagonals of the output

The :attr:`offsets` tensor controls which diagonals are set.

- If :attr:`offsets[i]` = 0, it is the main diagonal
- If :attr:`offsets[i]` < 0, it is below the main diagonal
- If :attr:`offsets[i]` > 0, it is above the main diagonal

The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`,
and an offset may not be repeated.

Args:
    diagonals (Tensor): Matrix storing diagonals row-wise
    offsets (Tensor): The diagonals to be set, stored as a vector
    shape (2-tuple of ints): The desired shape of the result
Keyword args:
    layout (:class:`torch.layout`, optional): The desired layout of the
        returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr``
        are supported. Default: ``torch.sparse_coo``

Examples:

Set the main and first two lower diagonals of a matrix::

    >>> diags = torch.arange(9).reshape(3, 3)
    >>> diags
    tensor([[0, 1, 2],
            [3, 4, 5],
            [6, 7, 8]])
    >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3))
    >>> s
    tensor(indices=tensor([[0, 1, 2, 1, 2, 2],
                           [0, 1, 2, 0, 1, 0]]),
           values=tensor([0, 1, 2, 3, 4, 6]),
           size=(3, 3), nnz=6, layout=torch.sparse_coo)
    >>> s.to_dense()
    tensor([[0, 0, 0],
            [3, 1, 0],
            [6, 4, 2]])


Change the output layout::

    >>> diags = torch.arange(9).reshape(3, 3)
    >>> diags
    tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8])
    >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr)
    >>> s
    tensor(crow_indices=tensor([0, 1, 3, 6]),
           col_indices=tensor([0, 0, 1, 0, 1, 2]),
           values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6,
           layout=torch.sparse_csr)
    >>> s.to_dense()
    tensor([[0, 0, 0],
            [3, 1, 0],
            [6, 4, 2]])

Set partial diagonals of a large output::

    >>> diags = torch.tensor([[1, 2], [3, 4]])
    >>> offsets = torch.tensor([0, -1])
    >>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense()
    tensor([[1, 0, 0, 0, 0],
            [3, 2, 0, 0, 0],
            [0, 4, 0, 0, 0],
            [0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0]])

.. note::

    When setting the values along a given diagonal the index into the diagonal
    and the index into the row of :attr:`diagonals` is taken as the
    column index in the output. This has the effect that when setting a diagonal
    with a positive offset `k` the first value along that diagonal will be
    the value in position `k` of the row of :attr:`diagonals`

Specifying a positive offset::

    >>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
    >>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense()
    tensor([[1, 2, 3, 0, 0],
            [0, 2, 3, 0, 0],
            [0, 0, 3, 0, 0],
            [0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0]])
c                   @   sV   e Zd ZdZedd Zedd Zedd Zdd	d
Zdd Z	dd Z
dd ZdS )r   aa  A tool to control checking sparse tensor invariants.

The following options exists to manage sparsr tensor invariants
checking in sparse tensor construction:

1. Using a context manager:

   .. code:: python

       with torch.sparse.check_sparse_tensor_invariants():
           run_my_model()

2. Using a procedural approach:

   .. code:: python

       prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled()
       torch.sparse.check_sparse_tensor_invariants.enable()

       run_my_model()

       if not prev_checks_enabled:
           torch.sparse.check_sparse_tensor_invariants.disable()

3. Using function decoration:

   .. code:: python

       @torch.sparse.check_sparse_tensor_invariants()
       def run_my_model():
           ...

       run_my_model()

4. Using ``check_invariants`` keyword argument in sparse tensor constructor call.
   For example:

   >>> torch.sparse_csr_tensor([0, 1, 3], [0, 1], [1, 2], check_invariants=True)
   Traceback (most recent call last):
     File "<stdin>", line 1, in <module>
   RuntimeError: `crow_indices[..., -1] == nnz` is not satisfied.
    c                   C   s
   t j S )a  Returns True if the sparse tensor invariants checking is enabled.

.. note::

    Use :func:`torch.sparse.check_sparse_tensor_invariants.enable` or
    :func:`torch.sparse.check_sparse_tensor_invariants.disable` to
    manage the state of the sparse tensor invariants checks.
        )r   _CZ_check_sparse_tensor_invariantsr   r   r   r   
is_enabled  s    
z)check_sparse_tensor_invariants.is_enabledc                   C   s   t jd dS )a8  Enable sparse tensor invariants checking in sparse tensor constructors.

.. note::

    By default, the sparse tensor invariants checks are disabled. Use
    :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled` to
    retrieve the current state of sparse tensor invariants checking.

.. note::

    The sparse tensor invariants check flag is effective to all sparse
    tensor constructors, both in Python and ATen.

    The flag can be locally overridden by the ``check_invariants``
    optional argument of the sparse tensor constructor functions.
        TNr   r   #_set_check_sparse_tensor_invariantsr   r   r   r   enable  s    z%check_sparse_tensor_invariants.enablec                   C   s   t jd dS )zDisable sparse tensor invariants checking in sparse tensor constructors.

See :func:`torch.sparse.check_sparse_tensor_invariants.enable` for more information.
        FNr   r   r   r   r   disable  s    z&check_sparse_tensor_invariants.disableTc                 C   s   || _ d | _d S N)statesaved_state)selfr!   r   r   r   __init__  s    z'check_sparse_tensor_invariants.__init__c                 C   s.   | j d k	rtd|  | _ tj| j d S )NzqThis context manager instance is already activated. Use a different context manager instance for context nesting.)r%   RuntimeErrorr   r   r   r    r$   )r&   r   r   r   	__enter__  s    

z(check_sparse_tensor_invariants.__enter__c                 C   s&   | j d k	sttj| j  d | _ d S r#   )r%   AssertionErrorr   r   r    )r&   typevalue	tracebackr   r   r   __exit__  s    z'check_sparse_tensor_invariants.__exit__c                    s    fdd}|S )Nc               
      s0   t j  | |W  5 Q R  S Q R X d S r#   )r+   r$   )argskwargsmthr&   r   r   test_mth  s    z9check_sparse_tensor_invariants.__call__.<locals>.test_mthr   )r&   r2   r3   r   r1   r   __call__  s    z'check_sparse_tensor_invariants.__call__N)T)__name__
__module____qualname____doc__staticmethodr   r!   r"   r'   r)   r.   r4   r   r   r   r   r     s   +



c                    s    fdd}|S )a,  Decorator for torch.autograd.gradcheck or its functools.partial
    variants that extends the gradcheck function with support to input
    functions that operate on or/and return sparse tensors.

    The specified gradcheck function itself is guaranteed to operate
    on strided tensors only.

    For example:

    >>> gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck)
    >>> x = torch.tensor([[0, 1], [2, 3]], dtype=torch.float64).to_sparse_coo().requires_grad_(True)
    >>> gradcheck(lambda x: x.to_sparse_csr(), x)
    True
    c                    s   | ddtjtjtjtjtjhtjtjtjtjhtjtjhd  fdd} fddfdd	}|||f}||S )
zmSame as :func:`torch.autograd.gradcheck` but with sparse tensors
        inputs and outputs support.
        maskedFZ__STRIDED_REPRESENTATION__c                    sv  t | ttfs| f} g }| D ]N}t |tjrb|jrb|jkrbt|j|jd}s|j	|
  |  }|jkr| j|d |d  nd}tj|j|jtjdj|j||
 d}| |}|jtjkr|j| | d | }nR|jtjtjhkr,|j| | d | }n|j| | d | }| ||d	f q| | qt|S )
zConvert differentiable non-strided tensors to a representation
            containing differentiable strided tensors.
            )layoutshaper
      N)devicer   )r;   	blocksize	dense_dim)indicesis_coalesced)compressed_indicesplain_indicesT)!
isinstancelisttupler   r	   requires_gradr;   dictr<   ndimr@   Z
sparse_dimvaluesZonesr>   boolZ	to_sparseto_denseZsparse_mask
sparse_cooupdateZ_indicesrB   Z_values
sparse_csr
sparse_bsrZcrow_indicesZcol_indicesZccol_indicesZrow_indicesextendZrequires_grad_append)r/   new_argsobjdZ	batch_dimr?   Z	full_maskrK   )STRIDED_REPRESENTATIONr:   sparse_block_layoutssparse_layoutsr   r   !convert_to_strided_representation  s4    
"(  

zeas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.convert_to_strided_representationc                    s   g }t | } | r| d}| kr| d| d }}|d tjkrftj|d ||d |d d}nF|d krtj|d |d ||d |d d	}ntd
|d  d|| qt|S )zgRestore non-strided differentiable tensosr from their strided
            representations.
            r   r;   rA   r<   rB   )sizerB   rC   rD   )r[   r;   zconversion of z! strided representation to tensor)	rF   popr   rN   Zsparse_coo_tensorZsparse_compressed_tensorNotImplementedErrorrS   rG   )r/   rT   arV   rK   )rW   sparse_compressed_layoutsr   r   #restore_from_strided_representation.  s     
  zgas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.restore_from_strided_representationc                     s`   | } ||}t |ttfr(t|n|f}tfdd|D }t |ttfrX|S |d S )Nc                 3   s:   | ]2}t |tjr.|jr.|jkr.|j d n|V  qdS ))Zmasked_gradN)rE   r   r	   rH   r;   rM   ).0o)r:   rY   r   r   	<genexpr>J  s   
zcas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.func_wrapper.<locals>.<genexpr>r   )rE   rF   rG   )r/   r0   Zrestored_argsoutputsZstrided_outputs)funcr:   r`   rY   r   r   func_wrapperB  s    
zPas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.func_wrapper)r\   r   rN   rP   Z
sparse_cscrQ   Z
sparse_bsc)re   inputsr0   rZ   rf   r/   	gradcheck)rW   re   r:   r`   rX   r_   rY   r   gradcheck_with_sparse_support  s    z:as_sparse_gradcheck.<locals>.gradcheck_with_sparse_supportr   )ri   rj   r   rh   r   r     s    O)NN)$typingr   r   r   r   r   r   Ztorch._Cr   r   r	   Zsemi_structuredr   r   r   Ztorch.typesr   ZDTypeintZ	DimOrDims__all__Z_sparse_addmmr   Z
_sparse_mmr   Zsparse_sampled_addmmZsampled_addmmr   Z_sparse_softmaxr   Z_sparse_log_softmaxr   Z_spdiagsZspdiagsr   r   r   r   r   r   <module>   sJ   J3   E^o