U
    ,-e2                     @   s   d dl Z d dlmZ d dlZd dlmZ d dlZejddddZejddd	Z	ej
jdd
dZedddZddddZedddZdae jdedddZdS )    N)	Generator)default_generator)	new_statereturnc                 C   s   t |  dS )zSets the random number generator state.

    .. note: This function only works for CPU. For CUDA, please use
             torch.manual_seed(seed), which works for both CPU and CUDA.

    Args:
        new_state (torch.ByteTensor): The desired state
    N)r   Z	set_state)r    r   M/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/torch/random.pyset_rng_state	   s    	r   )r   c                   C   s   t  S )zBReturns the random number generator state as a `torch.ByteTensor`.)r   Z	get_stater   r   r   r   get_rng_state   s    r	   c                 C   sV   t | } ddl}|j s&|j|  ddl}|j sD|j|  t|  t	| S )a  Sets the seed for generating random numbers. Returns a
    `torch.Generator` object.

    Args:
        seed (int): The desired seed. Value must be within the inclusive range
            `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
            is raised. Negative inputs are remapped to positive values with the formula
            `0xffff_ffff_ffff_ffff + seed`.
    r   N)
int
torch.cudacuda_is_in_bad_forkmanual_seed_all	torch.mpsmpsmanual_seed_seed_custom_devicer   seedtorchr   r   r   r      s    


r   c                  C   sP   t  } ddl}|j s&|j|  ddl}|j sD|j|  t	|  | S )zSets the seed for generating random numbers to a non-deterministic
    random number. Returns a 64 bit number used to seed the RNG.
    r   N)
r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   3   s    

r   c                 C   s   t | } tj }tt|rtt|}d}d}t||r^t||r^t|| st|||  n8d| d}|d| d| d| d7 }tj|td	d
 dS )zSets the seed to generate random numbers for custom device.

    Args:
        seed (int): The desired seed.

    See [Note: support the custom device with privateuse1]
    r   r   zSet seed for `z0` device does not take effect, please add API's `z` and `z` to `z` device module.   )
stacklevelN)	r
   r   _CZ_get_privateuse1_backend_namehasattrgetattrwarningswarnUserWarning)r   Zcustom_backend_nameZcustom_device_modZ_bad_fork_nameZ_seed_all_namemessager   r   r   r   F   s    


r   c                   C   s   t  S )zSReturns the initial seed for generating random numbers as a
    Python `long`.
    )r   initial_seedr   r   r   r   r    ]   s    r    FTfork_rngdevicesr   c                 c   sJ  t |j}tt |d}|dkr4td| dd |sBdV  dS | dkr| }|dkrts|  d| d| d|  d	|  d
|  d|  d| d| d|  d| d| d}t	| dat
t|} nt
| } t  }g }	| D ]}
|	||
 qz
dV  W 5 t | t| |	D ]\}
}|||
 q,X dS )a  
    Forks the RNG, so that when you return, the RNG is reset
    to the state that it was previously in.

    Args:
        devices (iterable of Device IDs): devices for which to fork
            the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
            on all devices, but will emit a warning if your machine has a lot
            of devices, since this function will run very slowly in that case.
            If you explicitly specify devices, this warning will be suppressed
        enabled (bool): if ``False``, the RNG is not forked.  This is a convenience
            argument for easily disabling the context manager without having
            to delete it and unindent your Python code under it.
        deivce_type (str): device type str, default is `cuda`. As for custom device,
            see details in [Note: support the custom device with privateuse1]
    Nztorch has no module of `z`, you should register z,a module by `torch._register_device_module`.   z reports that you have z& available devices, and you have used z_ without explicitly specifying which devices are being used. For safety, we initialize *every* zA device by default, which can be quite slow if you have a lot of z5s. If you know that you are only making use of a few z' devices, set the environment variable z_VISIBLE_DEVICES or the 'z' keyword argument of z with the set of devices you are actually using. For example, if you are using CPU only, set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, set zb_VISIBLE_DEVICES=0 or devices=[0].  To initialize all devices and suppress this warning, set the 'z#' keyword argument to `range(torch.z.device_count())`.T)r   devicetyper   RuntimeErrorZdevice_count_fork_rng_warned_alreadyupperr   r   listranger	   appendr   zip)r"   enabledZ_callerZ_devices_kwZdevice_typeZ
device_modZnum_devicesr   Zcpu_rng_stateZdevice_rng_statesr$   Zdevice_rng_stater   r   r   r!   g   s4    d


)NTr!   r"   r   )
contextlibtypingr   r   Ztorch._Cr   r   ZTensorr   r	   r   r   r
   r   r   r    r'   contextmanagerr!   r   r   r   r   <module>   s   