U
    0-e$                     @   s  d dl Z d dlZ d dlZd dlZd dlmZ d dlZd dlmZ d dl	m
Z
 ddlmZmZ ddlmZmZ zd dlm  mZ dZW n ek
r   d	ZY nX ej Zd
d ZedddZdd Zdd Zdd Zdd Z edGddZ!dd Z"dHddZ#dd Z$dd  Z%d!d" Z&d#d$ Z'd%d& Z(d'd( Z)d)d* Z*d+d, Z+d-d. Z,d/d0 Z-d1d2 Z.d3d4 Z/d5d6 Z0d7d8 Z1d9d: Z2d;d< Z3d=d> Z4d?d@ Z5dAdB Z6edIdCdDZ7edJdEdFZ8dS )K    N)	lru_cache)version)parse   )parse_flag_from_envstr_to_bool)compare_versionsis_torch_versionTFc                 C   sF   t j| d k	}|rBzt j| }W dS  t jjk
r@   Y dS X d S )NTF)	importlibutil	find_specmetadataPackageNotFoundError)pkg_namepackage_exists_ r   Y/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/accelerate/utils/imports.py_is_package_available)   s    r   )returnc                   C   s   t S )N)_torch_distributed_availabler   r   r   r   is_torch_distributed_available4   s    r   c                   C   sB   zW n t k
r    td Y nX tjdd k	p@tjdd k	S )NzIntel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not detected. If you see "ValueError: Invalid backend: 'ccl'" error, please install Intel(R) oneCCL Bindings for PyTorch*.Z	torch_cclZoneccl_bindings_for_pytorch)ImportErrorprintr
   r   r   r   r   r   r   is_ccl_available8   s    
r   c                   C   s   t jdS )NZoneccl_bind_pt)r
   r   r   r   r   r   r   get_ccl_versionG   s    r   c                   C   s   t dS )NZtransformer_enginer   r   r   r   r   is_fp8_availableK   s    r   c               
   C   s2   ztdt jd< tj } W 5 t jdd X | S )z
    Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
    uninitialized.
    ZPYTORCH_NVML_BASED_CUDA_CHECKNr   )osenvironpopstrtorchcudais_available)	availabler   r   r   is_cuda_availableO   s
    r&   c                 C   s<   t  r
dS | r8tr8zt }W dS  tk
r6   Y dS X tS )zQChecks if `torch_xla` is installed and potentially if a TPU is in the environmentFT)r&   _tpu_availablexmZ
xla_deviceRuntimeError)check_devicer   r   r   r   is_tpu_available\   s    r+   c                   C   s   t dS )NZ	deepspeedr   r   r   r   r   is_deepspeed_availablem   s    r,   c                 C   s$   t  r|  S tj r tj S dS )z8Checks if bf16 is supported, optionally ignoring the TPUT)r+   r"   r#   r$   Zis_bf16_supported)Z
ignore_tpur   r   r   is_bf16_availableq   s
    

r-   c                  C   s.   t d} | r*ttjd}t|ddS dS )Nbitsandbytes>=z0.39.0Fr   r   r   r
   r   r   r   Zbnb_versionr   r   r   is_4bit_bnb_availablez   s
    r2   c                  C   s.   t d} | r*ttjd}t|ddS dS )Nr.   r/   z0.37.2Fr0   r1   r   r   r   is_8bit_bnb_available   s
    r3   c                   C   s   t dS )Nr.   r   r   r   r   r   is_bnb_available   s    r4   c               
   C   s   t tjdddkrtjdd k	} | rzttj	d}t
|ddW S  tk
r } ztd|  W Y d	S d }~X Y nX d S )
NZACCELERATE_USE_MEGATRON_LMFalser   Zmegatronzmegatron-lmr/   z2.2.0z)Parse Megatron version failed. Exception:F)r   r   r   getr
   r   r   r   r   r   r   	Exceptionwarningswarn)r   Zmegatron_versioner   r   r   is_megatron_lm_available   s    r;   c                   C   s   t dS )NZsafetensorsr   r   r   r   r   is_safetensors_available   s    r<   c                   C   s   t dS )NZtransformersr   r   r   r   r   is_transformers_available   s    r=   c                   C   s   t dS )NZdatasetsr   r   r   r   r   is_datasets_available   s    r>   c                   C   s   t dS )NZtimmr   r   r   r   r   is_timm_available   s    r?   c                  C   s.   t d} | r*ttjd}t|ddS dS )NZaim<z4.0.0Fr0   )r   Zaim_versionr   r   r   is_aim_available   s
    rA   c                   C   s   t dpt dS )NZtensorboardZtensorboardXr   r   r   r   r   is_tensorboard_available   s    rB   c                   C   s   t dS )NZwandbr   r   r   r   r   is_wandb_available   s    rC   c                   C   s   t dS )NZcomet_mlr   r   r   r   r   is_comet_ml_available   s    rD   c                   C   s   t dS )NZboto3r   r   r   r   r   is_boto3_available   s    rE   c                   C   s6   t dr2dtjkr(td tdd S tddS dS )NrichZACCELERATE_DISABLE_RICHz`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`.FZACCELERATE_ENABLE_RICH)r   r   r   r8   r9   r   r   r   r   r   is_rich_available   s    

rG   c                   C   s   t dS )NZ	sagemakerr   r   r   r   r   is_sagemaker_available   s    rH   c                   C   s   t dS )NZtqdmr   r   r   r   r   is_tqdm_available   s    rI   c                  C   sN   t drdS tjdd k	rJztjd} W dS  tjjk
rH   Y dS X dS )NZmlflowTzmlflow-skinnyF)r   r
   r   r   r   r   )r   r   r   r   is_mlflow_available   s    rJ   c                   C   s"   t ddo tjj o tjj S )Nr/   1.12)r	   r"   backendsZmpsr$   Zis_builtr   r   r   r   is_mps_available   s    rM   c               	   C   s   dd } t jd}t jdd kr(dS d}zt jd}W n t jjk
rV   Y dS X | |}| |}||krtd| d| d	| d
 dS dS )Nc                 S   s$   t t| jd t t| j S )N.)r!   r   r   majorminor)Zfull_versionr   r   r    get_major_and_minor_from_version   s    z;is_ipex_available.<locals>.get_major_and_minor_from_versionr"   intel_extension_for_pytorchFzN/AzIntel Extension for PyTorch z needs to work with PyTorch z.*, but PyTorch z? is found. Please switch to the matching version and run again.T)r
   r   r   r   r   r   r8   r9   )rQ   Z_torch_versionZ_ipex_versionZtorch_major_and_minorZipex_major_and_minorr   r   r   is_ipex_available   s"    rS   c                 C   sz   t jddks t jddkr$dS ddl}ddl}| rfz|j }|j W S  tk
rd   Y dS X t	|dox|j S )zQChecks if `torch_npu` is installed and potentially if a NPU is in the environmentr"   N	torch_npuFr   npu)
r
   r   r   r"   rT   rU   device_countr$   r)   hasattr)r*   r"   rT   r   r   r   r   is_npu_available   s     
rX   c                 C   s   t dddsdS t r.ddl}tddr2dS ndS ddl}| rlz|j }|j W S  tk
rj   Y dS X t	|d	o~|j S )
z$check if user disables it explicitlyZACCELERATE_USE_XPUT)defaultFr   Nz<=rK   xpu)
r   rS   r"   r	   rR   rZ   rV   r$   r)   rW   )r*   r"   rR   r   r   r   r   is_xpu_available  s    

r[   )T)F)F)F)9r
   importlib.metadatar   r8   	functoolsr   r"   	packagingr   Zpackaging.versionr   environmentr   r   versionsr   r	   Ztorch_xla.core.xla_modelcoreZ	xla_modelr(   r'   r   distributedr$   r   r   boolr   r   r   r   r&   r+   r,   r-   r2   r3   r4   r;   r<   r=   r>   r?   rA   rB   rC   rD   rE   rG   rH   rI   rJ   rM   rS   rX   r[   r   r   r   r   <module>   s`   


	