U
    9%e2                     @   s   d dl mZmZ d dlmZ ddlmZmZmZm	Z	m
Z
 ddlmZ e rRd dlZedd	rnd dlm  mZ e	eZeG d
d deZdS )    )	dataclassfield)Tuple   )cached_propertyis_torch_availableis_torch_tpu_availableloggingrequires_backends   )BenchmarkArgumentsNF)Zcheck_devicec                       s   e Zd ZU dddddddgZ fdd	Zed
ddidZeed< ed
ddidZ	eed< edddidZ
eed< eedef dddZedd ZeedddZeddddZedd Zed d! Z  ZS )"PyTorchBenchmarkArgumentsZno_inferenceZno_cudaZno_tpuZno_speedZ	no_memoryZno_env_printZno_multi_processc                    s   | j D ]P}||kr|dd }t| |||  t| d| d| d||   q|d| j| _|d| j| _|d| j| _t j	f | dS )	z
        This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
        deleted
           Nz! is depreciated. Please use --no_z or =torchscripttorch_xla_tpu_print_metricsfp16_opt_level)
deprecated_argssetattrpoploggerwarningr   r   r   super__init__)selfkwargsZdeprecated_argZpositive_arg	__class__ d/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/benchmark/benchmark_args.pyr   .   s    
z"PyTorchBenchmarkArguments.__init__Fhelpz"Trace the models using torchscript)defaultmetadatar   zPrint Xla/PyTorch tpu metricsr   ZO1zFor fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details at https://nvidia.github.io/apex/amp.htmlr   ztorch.device)returnc                 C   sj   t | dg td | js,td}d}n6t r@t }d}n"ttj	 rRdnd}tj
 }||fS )NtorchzPyTorch: setting up devicescpur   cuda)r
   r   infor&   r$   devicer   xmZ
xla_deviceZis_availableZdevice_count)r   r(   n_gpur   r   r   _setup_devicesM   s    


z(PyTorchBenchmarkArguments._setup_devicesc                 C   s   t  o
| jS )N)r   Ztpur   r   r   r   is_tpu\   s    z PyTorchBenchmarkArguments.is_tpuc                 C   s   t | dg tj S )Nr$   )r
   r$   r&   Zcurrent_devicer,   r   r   r   
device_idx`   s    z$PyTorchBenchmarkArguments.device_idxc                 C   s   t | dg | jd S )Nr$   r   r
   r+   r,   r   r   r   r(   f   s    z PyTorchBenchmarkArguments.devicec                 C   s   t | dg | jd S )Nr$   r   r/   r,   r   r   r   r*   k   s    zPyTorchBenchmarkArguments.n_gpuc                 C   s
   | j dkS )Nr   )r*   r,   r   r   r   is_gpup   s    z PyTorchBenchmarkArguments.is_gpu)__name__
__module____qualname__r   r   r   r   bool__annotations__r   r   strr   r   intr+   propertyr-   r.   r(   r*   r0   __classcell__r   r   r   r   r   "   s:   




r   )dataclassesr   r   typingr   utilsr   r   r   r	   r
   Zbenchmark_args_utilsr   r$   Ztorch_xla.core.xla_modelcoreZ	xla_modelr)   Z
get_loggerr1   r   r   r   r   r   r   <module>   s   

