U
    9%eB                     @   s  d dl Z d dlZd dlZd dlmZmZmZmZmZm	Z	m
Z
mZmZmZ d dlmZmZ d dlmZ ddlmZmZ ddd	d
ddddgZedddZedZeeef Ze
edf ZedeeZG dd dee ZG dd dee ZG dd	 d	ee
edf  ZG dd
 d
ee Z G dd dee Z!G dd deZ"G dd dee Z#efee e	ee$e%f  ee ee#e  dddZ&dS )    N)
GenericIterableIteratorListOptionalSequenceTupleTypeVarUnionDict)default_generatorrandperm)_accumulate   )	GeneratorTensorDatasetIterableDatasetTensorDatasetStackDatasetConcatDatasetChainDatasetSubsetrandom_splitT_coT)	covariantT.T_stackc                   @   s.   e Zd ZdZedddZddddd	Zd
S )r   a  An abstract class representing a :class:`Dataset`.

    All datasets that represent a map from keys to data samples should subclass
    it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
    data sample for a given key. Subclasses could also optionally overwrite
    :meth:`__len__`, which is expected to return the size of the dataset by many
    :class:`~torch.utils.data.Sampler` implementations and the default options
    of :class:`~torch.utils.data.DataLoader`. Subclasses could also
    optionally implement :meth:`__getitems__`, for speedup batched samples
    loading. This method accepts list of indices of samples of batch and returns
    list of samples.

    .. note::
      :class:`~torch.utils.data.DataLoader` by default constructs a index
      sampler that yields integral indices.  To make it work with a map-style
      dataset with non-integral indices/keys, a custom sampler must be provided.
    returnc                 C   s   t dd S )Nz3Subclasses of Dataset should implement __getitem__.NotImplementedErrorselfindex r%   W/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/utils/data/dataset.py__getitem__<   s    zDataset.__getitem__zDataset[T_co]zConcatDataset[T_co])otherr   c                 C   s   t | |gS N)r   r#   r(   r%   r%   r&   __add__C   s    zDataset.__add__N)__name__
__module____qualname____doc__r   r'   r+   r%   r%   r%   r&   r   )   s   c                   @   s4   e Zd ZdZee dddZee dddZdS )	r   aH  An iterable Dataset.

    All datasets that represent an iterable of data samples should subclass it.
    Such form of datasets is particularly useful when data come from a stream.

    All subclasses should overwrite :meth:`__iter__`, which would return an
    iterator of samples in this dataset.

    When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
    item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
    iterator. When :attr:`num_workers > 0`, each worker process will have a
    different copy of the dataset object, so it is often desired to configure
    each copy independently to avoid having duplicate data returned from the
    workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
    process, returns information about the worker. It can be used in either the
    dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
    :attr:`worker_init_fn` option to modify each copy's behavior.

    Example 1: splitting workload across all workers in :meth:`__iter__`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> # xdoctest: +SKIP("Fails on MacOS12")
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         worker_info = torch.utils.data.get_worker_info()
        ...         if worker_info is None:  # single-process data loading, return the full iterator
        ...             iter_start = self.start
        ...             iter_end = self.end
        ...         else:  # in a worker process
        ...             # split workload
        ...             per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
        ...             worker_id = worker_info.id
        ...             iter_start = self.start + worker_id * per_worker
        ...             iter_end = min(iter_start + per_worker, self.end)
        ...         return iter(range(iter_start, iter_end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [tensor([3]), tensor([4]), tensor([5]), tensor([6])]

        >>> # xdoctest: +REQUIRES(POSIX)
        >>> # Mult-process loading with two worker processes
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

        >>> # With even more workers
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

    Example 2: splitting workload across all workers using :attr:`worker_init_fn`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         return iter(range(self.start, self.end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [3, 4, 5, 6]
        >>>
        >>> # Directly doing multi-process loading yields duplicate data
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [3, 3, 4, 4, 5, 5, 6, 6]

        >>> # Define a `worker_init_fn` that configures each dataset copy differently
        >>> def worker_init_fn(worker_id):
        ...     worker_info = torch.utils.data.get_worker_info()
        ...     dataset = worker_info.dataset  # the dataset copy in this worker process
        ...     overall_start = dataset.start
        ...     overall_end = dataset.end
        ...     # configure the dataset to only process the split workload
        ...     per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
        ...     worker_id = worker_info.id
        ...     dataset.start = overall_start + worker_id * per_worker
        ...     dataset.end = min(dataset.start + per_worker, overall_end)
        ...

        >>> # Mult-process loading with the custom `worker_init_fn`
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
        [3, 5, 4, 6]

        >>> # With even more workers
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn)))
        [3, 4, 5, 6]
    r   c                 C   s   t dd S )Nz8Subclasses of IterableDataset should implement __iter__.r    r#   r%   r%   r&   __iter__   s    zIterableDataset.__iter__)r(   c                 C   s   t | |gS r)   )r   r*   r%   r%   r&   r+      s    zIterableDataset.__add__N)	r,   r-   r.   r/   r   r   r1   r   r+   r%   r%   r%   r&   r   K   s   kc                   @   sB   e Zd ZU dZeedf ed< eddddZdd	 Zd
d Z	dS )r   zDataset wrapping tensors.

    Each sample will be retrieved by indexing tensors along the first dimension.

    Args:
        *tensors (Tensor): tensors that have the same size of the first dimension.
    .tensorsN)r2   r   c                    s(   t  fdd D std | _d S )Nc                 3   s&   | ]} d   d | d kV  qdS )r   N)size.0Ztensorr2   r%   r&   	<genexpr>   s     z)TensorDataset.__init__.<locals>.<genexpr>zSize mismatch between tensors)allAssertionErrorr2   )r#   r2   r%   r6   r&   __init__   s    zTensorDataset.__init__c                    s   t  fdd| jD S )Nc                 3   s   | ]}|  V  qd S r)   r%   r4   r$   r%   r&   r7      s     z,TensorDataset.__getitem__.<locals>.<genexpr>)tupler2   r"   r%   r;   r&   r'      s    zTensorDataset.__getitem__c                 C   s   | j d dS Nr   )r2   r3   r0   r%   r%   r&   __len__   s    zTensorDataset.__len__)
r,   r-   r.   r/   r   r   __annotations__r:   r'   r>   r%   r%   r%   r&   r      s
   
c                   @   sL   e Zd ZU dZeeef ed< ee	 ee	 ddddZ
dd Zd	d
 ZdS )r   a  Dataset as a stacking of multiple datasets.

    This class is useful to assemble different parts of complex input data, given as datasets.

    Example:
        >>> # xdoctest: +SKIP
        >>> images = ImageDataset()
        >>> texts = TextDataset()
        >>> tuple_stack = StackDataset(images, texts)
        >>> tuple_stack[0] == (images[0], texts[0])
        >>> dict_stack = StackDataset(image=images, text=texts)
        >>> dict_stack[0] == {'image': images[0], 'text': texts[0]}

    Args:
        *args (Dataset): Datasets for stacking returned as tuple.
        **kwargs (Dataset): Datasets for stacking returned as dict.
    datasetsN)argskwargsr   c                    s   |rD|rt dt|d  _t fdd|D r<t d| _nL|rt| }t|d  _t fdd|D rt d| _nt dd S )NztSupported either ``tuple``- (via ``args``) or``dict``- (via ``kwargs``) like input/output, but both types are given.r   c                 3   s   | ]} j t|kV  qd S r)   _lengthlenr5   datasetr0   r%   r&   r7      s     z(StackDataset.__init__.<locals>.<genexpr>zSize mismatch between datasetsc                 3   s   | ]} j t|kV  qd S r)   rC   rF   r0   r%   r&   r7      s     z%At least one dataset should be passed)
ValueErrorrE   rD   anyr@   listvalues)r#   rA   rB   tmpr%   r0   r&   r:      s    zStackDataset.__init__c                    s<   t | jtr$ fdd| j D S t fdd| jD S )Nc                    s   i | ]\}}||  qS r%   r%   )r5   krG   r;   r%   r&   
<dictcomp>   s      z,StackDataset.__getitem__.<locals>.<dictcomp>c                 3   s   | ]}|  V  qd S r)   r%   rF   r;   r%   r&   r7      s     z+StackDataset.__getitem__.<locals>.<genexpr>)
isinstancer@   dictitemsr<   r"   r%   r;   r&   r'      s    zStackDataset.__getitem__c                 C   s   | j S r)   )rD   r0   r%   r%   r&   r>     s    zStackDataset.__len__)r,   r-   r.   r/   r
   r<   rP   r?   r   r   r:   r'   r>   r%   r%   r%   r&   r      s
   
c                       sr   e Zd ZU dZeee  ed< ee ed< e	dd Z
ee dd fdd	Zd
d Zdd Zedd Z  ZS )r   zDataset as a concatenation of multiple datasets.

    This class is useful to assemble different existing datasets.

    Args:
        datasets (sequence): List of datasets to be concatenated
    r@   cumulative_sizesc                 C   s6   g d }}| D ]"}t |}|||  ||7 }q|S r=   )rE   append)sequencerselr%   r%   r&   cumsum  s    

zConcatDataset.cumsumNr@   r   c                    sZ   t    t|| _t| jdks*td| jD ]}t|tr0tdq0| | j| _	d S )Nr   z(datasets should not be an empty iterablez.ConcatDataset does not support IterableDataset)
superr:   rJ   r@   rE   r9   rO   r   rY   rR   )r#   r@   d	__class__r%   r&   r:     s    


zConcatDataset.__init__c                 C   s
   | j d S )N)rR   r0   r%   r%   r&   r>   !  s    zConcatDataset.__len__c                 C   sf   |dk r*| t | krtdt | | }t| j|}|dkrF|}n|| j|d   }| j| | S )Nr   z8absolute value of index should not exceed dataset length   )rE   rH   bisectbisect_rightrR   r@   )r#   idxZdataset_idxZ
sample_idxr%   r%   r&   r'   $  s    zConcatDataset.__getitem__c                 C   s   t jdtdd | jS )Nz:cummulative_sizes attribute is renamed to cumulative_sizes   )
stacklevel)warningswarnDeprecationWarningrR   r0   r%   r%   r&   cummulative_sizes0  s
     zConcatDataset.cummulative_sizes)r,   r-   r.   r/   r   r   r   r?   intstaticmethodrY   r   r:   r>   r'   propertyri   __classcell__r%   r%   r]   r&   r     s   

c                       s<   e Zd ZdZee dd fddZdd Zdd	 Z  Z	S )
r   a_  Dataset for chaining multiple :class:`IterableDataset` s.

    This class is useful to assemble different existing dataset streams. The
    chaining operation is done on-the-fly, so concatenating large-scale
    datasets with this class will be efficient.

    Args:
        datasets (iterable of IterableDataset): datasets to be chained together
    NrZ   c                    s   t    || _d S r)   )r[   r:   r@   )r#   r@   r]   r%   r&   r:   A  s    
zChainDataset.__init__c                 c   s,   | j D ] }t|tstd|E d H  qd S )N*ChainDataset only supports IterableDataset)r@   rO   r   r9   )r#   r\   r%   r%   r&   r1   E  s    
zChainDataset.__iter__c                 C   s2   d}| j D ]"}t|ts td|t|7 }q
|S )Nr   rn   )r@   rO   r   r9   rE   )r#   totalr\   r%   r%   r&   r>   J  s
    
zChainDataset.__len__)
r,   r-   r.   r/   r   r   r:   r1   r>   rm   r%   r%   r]   r&   r   7  s   	c                   @   sl   e Zd ZU dZee ed< ee ed< ee ee ddddZ	dd	 Z
ee ee d
ddZdd ZdS )r   z
    Subset of a dataset at specified indices.

    Args:
        dataset (Dataset): The whole Dataset
        indices (sequence): Indices in the whole set selected for subset
    rG   indicesN)rG   rp   r   c                 C   s   || _ || _d S r)   rG   rp   )r#   rG   rp   r%   r%   r&   r:   ]  s    zSubset.__init__c                    s2   t |tr" j fdd|D  S  j j|  S )Nc                    s   g | ]} j | qS r%   rp   )r5   ir0   r%   r&   
<listcomp>c  s     z&Subset.__getitem__.<locals>.<listcomp>)rO   rJ   rG   rp   )r#   rc   r%   r0   r&   r'   a  s    
zSubset.__getitem__)rp   r   c                    sB   t t jdd r, j fdd|D S  fdd|D S d S )N__getitems__c                    s   g | ]} j | qS r%   rr   r5   rc   r0   r%   r&   rt   j  s     z'Subset.__getitems__.<locals>.<listcomp>c                    s   g | ]} j  j|  qS r%   rq   rv   r0   r%   r&   rt   l  s     )callablegetattrrG   ru   )r#   rp   r%   r0   r&   ru   f  s    zSubset.__getitems__c                 C   s
   t | jS r)   )rE   rp   r0   r%   r%   r&   r>   n  s    zSubset.__len__)r,   r-   r.   r/   r   r   r?   r   rj   r:   r'   r   ru   r>   r%   r%   r%   r&   r   R  s   
)rG   lengths	generatorr   c           
         s&  t t|drt|dkrg }t|D ]H\}}|dk s@|dkrPtd| dtt t | }|| q(t t| }t	|D ] }|t| }||  d7  < q|}t|D ]"\}}	|	dkrt
d| d qt|t krtdtt||d  fd	d
tt||D S )a  
    Randomly split a dataset into non-overlapping new datasets of given lengths.

    If a list of fractions that sum up to 1 is given,
    the lengths will be computed automatically as
    floor(frac * len(dataset)) for each fraction provided.

    After computing the lengths, if there are any remainders, 1 count will be
    distributed in round-robin fashion to the lengths
    until there are no remainders left.

    Optionally fix the generator for reproducible results, e.g.:

    Example:
        >>> # xdoctest: +SKIP
        >>> generator1 = torch.Generator().manual_seed(42)
        >>> generator2 = torch.Generator().manual_seed(42)
        >>> random_split(range(10), [3, 7], generator=generator1)
        >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2)

    Args:
        dataset (Dataset): Dataset to be split
        lengths (sequence): lengths or fractions of splits to be produced
        generator (Generator): Generator used for the random permutation.
    r`   r   zFraction at index z is not between 0 and 1zLength of split at index z- is 0. This might result in an empty dataset.zDSum of input lengths does not equal the length of the input dataset!)rz   c                    s&   g | ]\}}t  || | qS r%   )r   )r5   offsetlengthrq   r%   r&   rt     s     z random_split.<locals>.<listcomp>)mathisclosesum	enumeraterH   rj   floorrE   rS   rangerf   rg   r   tolistzipr   )
rG   ry   rz   Zsubset_lengthsrs   fracZn_items_in_split	remainderZidx_to_add_atr|   r%   rq   r&   r   r  s*    )'ra   rf   r}   typingr   r   r   r   r   r   r   r	   r
   r   Ztorchr   r   Ztorch._utilsr    r   r   __all__r   r   strZT_dictZT_tupler   r   r   r   r   r   r   r   rj   floatr   r%   r%   r%   r&   <module>   s@   0"v/2! 
