U
    9%e.                     @   s  U d Z ddlZddlZddlZddlZddlZddlmZ ddlm	Z	 ddl
mZmZmZmZ ddlZddlmZ ddlmZ ddlmZmZ dd	lmZ d
dlmZ d
dlmZ eeZdZdZ G dd deZ!ej"dddZ#eg ej"f Z$e#a%e$e&d< e#fe$ddddZ'ej"dddZ(ee)e)ej"dddZ*dd
deefe	j+dee,e)e-e-eee. eee. d f f ee)ee)d f f ed!d"d#Z/dS )$z>Contains utilities to handle HTTP requests in Huggingface Hub.    N)	lru_cache)
HTTPStatus)CallableTupleTypeUnion)Response)HTTPAdapter)
ProxyErrorTimeout)PreparedRequest   )logging)HTTP_METHOD_TX-Amzn-Trace-Idzx-request-idc                       s4   e Zd ZdZ fddZeed fddZ  ZS )UniqueRequestIdAdapterr   c                    s   t  j|f| t|jkr8|jtp0tt |jt< t|jdd	d}t
d|jt  d|j d|j d| d	 d S )	Nauthorization z
Bearer hf_zRequest z:  z (authenticated: ))superadd_headersX_AMZN_TRACE_IDheadersgetX_REQUEST_IDstruuiduuid4
startswithloggerdebugmethodurl)selfrequestkwargsZ	has_token	__class__ Z/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/huggingface_hub/utils/_http.pyr   /   s    
&z"UniqueRequestIdAdapter.add_headers)r%   returnc              
      sl   zt  j|f||W S  tjk
rf } z0|jt}|dk	rT|jd| df|_ W 5 d}~X Y nX dS )zSCatch any RequestException to append request id to the error message for debugging.Nz(Request ID: r   )r   sendrequestsRequestExceptionr   r   r   args)r$   r%   r/   r&   eZ
request_idr'   r)   r*   r,   <   s    zUniqueRequestIdAdapter.send)	__name__
__module____qualname__r   r   r   r   r,   __classcell__r)   r)   r'   r*   r   ,   s   r   )r+   c                  C   s(   t  } | dt  | dt  | S )Nzhttp://zhttps://)r-   Sessionmountr   )sessionr)   r)   r*   _default_backend_factoryH   s    r8   _GLOBAL_BACKEND_FACTORY)backend_factoryr+   c                 C   s   | a t  dS )a  
    Configure the HTTP backend by providing a `backend_factory`. Any HTTP calls made by `huggingface_hub` will use a
    Session object instantiated by this factory. This can be useful if you are running your scripts in a specific
    environment requiring custom configuration (e.g. custom proxy or certifications).

    Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,
    `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`
    set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between
    calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.

    See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.

    Example:
    ```py
    import requests
    from huggingface_hub import configure_http_backend, get_session

    # Create a factory function that returns a Session with configured proxies
    def backend_factory() -> requests.Session:
        session = requests.Session()
        session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}
        return session

    # Set it as the default session factory
    configure_http_backend(backend_factory=backend_factory)

    # In practice, this is mostly done internally in `huggingface_hub`
    session = get_session()
    ```
    N)r9   _get_session_from_cachecache_clear)r:   r)   r)   r*   configure_http_backendS   s     r=   c                   C   s   t t t dS )a  
    Get a `requests.Session` object, using the session factory from the user.

    Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,
    `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`
    set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between
    calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.

    See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.

    Example:
    ```py
    import requests
    from huggingface_hub import configure_http_backend, get_session

    # Create a factory function that returns a Session with configured proxies
    def backend_factory() -> requests.Session:
        session = requests.Session()
        session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}
        return session

    # Set it as the default session factory
    configure_http_backend(backend_factory=backend_factory)

    # In practice, this is mostly done internally in `huggingface_hub`
    session = get_session()
    ```
    
process_id	thread_id)r;   osgetpid	threading	get_identr)   r)   r)   r*   get_sessionw   s    rE   )r?   r@   r+   c                 C   s   t  S )z
    Create a new session per thread using global factory. Using LRU cache (maxsize 128) to avoid memory leaks when
    using thousands of threads. Cache is cleared when `configure_http_backend` is called.
    )r9   r>   r)   r)   r*   r;      s    r;         )max_retriesbase_wait_timemax_wait_timeretry_on_exceptionsretry_on_status_codes.)r"   r#   rH   rI   rJ   rK   rL   r+   c                K   s^  t |tr|f}t |tr |f}d}|}	d}
d|krPt |d tjrP|d  }
t }|d7 }zt|
dk	rv|d |
 |jf | |d|}|j	|kr|W S t
d|j	 d|  d|  ||kr|  |W S W nN |k
r  } z.t
d	| d
|  d|  ||kr|W 5 d}~X Y nX t
d|	 d| d| d t|	 t||	d }	qVdS )a>  Wrapper around requests to retry calls on an endpoint, with exponential backoff.

    Endpoint call is retried on exceptions (ex: connection timeout, proxy error,...)
    and/or on specific status codes (ex: service unavailable). If the call failed more
    than `max_retries`, the exception is thrown or `raise_for_status` is called on the
    response object.

    Re-implement mechanisms from the `backoff` library to avoid adding an external
    dependencies to `hugging_face_hub`. See https://github.com/litl/backoff.

    Args:
        method (`Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]`):
            HTTP method to perform.
        url (`str`):
            The URL of the resource to fetch.
        max_retries (`int`, *optional*, defaults to `5`):
            Maximum number of retries, defaults to 5 (no retries).
        base_wait_time (`float`, *optional*, defaults to `1`):
            Duration (in seconds) to wait before retrying the first time.
            Wait time between retries then grows exponentially, capped by
            `max_wait_time`.
        max_wait_time (`float`, *optional*, defaults to `8`):
            Maximum duration (in seconds) to wait before retrying.
        retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*, defaults to `(Timeout, ProxyError,)`):
            Define which exceptions must be caught to retry the request. Can be a single
            type or a tuple of types.
            By default, retry on `Timeout` and `ProxyError`.
        retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `503`):
            Define on which status codes the request must be retried. By default, only
            HTTP 503 Service Unavailable is retried.
        **kwargs (`dict`, *optional*):
            kwargs to pass to `requests.request`.

    Example:
    ```
    >>> from huggingface_hub.utils import http_backoff

    # Same usage as "requests.request".
    >>> response = http_backoff("GET", "https://www.google.com")
    >>> response.raise_for_status()

    # If you expect a Gateway Timeout from time to time
    >>> http_backoff("PUT", upload_url, data=data, retry_on_status_codes=504)
    >>> response.raise_for_status()
    ```

    <Tip warning={true}>

    When using `requests` it is possible to stream data by passing an iterator to the
    `data` argument. On http backoff this is a problem as the iterator is not reset
    after a failed call. This issue is mitigated for file objects or any IO streams
    by saving the initial position of the cursor (with `data.tell()`) and resetting the
    cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff
    will fail. If this is a hard constraint for you, please let us know by opening an
    issue on [Github](https://github.com/huggingface/huggingface_hub).

    </Tip>
    r   Ndatar   )r"   r#   zHTTP Error z thrown while requesting r   'z' thrown while requesting zRetrying in z	s [Retry /z].   )
isinstancetypeintioIOBasetellrE   seekr%   status_coder    warningraise_for_statustimesleepmin)r"   r#   rH   rI   rJ   rK   rL   r&   Znb_triesZ
sleep_timeZio_obj_initial_posr7   responseerrr)   r)   r*   http_backoff   s8    H





r`   )0__doc__rT   rA   rC   r[   r   	functoolsr   httpr   typingr   r   r   r   r-   r   Zrequests.adaptersr	   Zrequests.exceptionsr
   r   Zrequests.modelsr   r   r   Z_typingr   Z
get_loggerr1   r    r   r   r   r5   r8   ZBACKEND_FACTORY_Tr9   __annotations__r=   rE   rS   r;   SERVICE_UNAVAILABLEr   float	Exceptionr`   r)   r)   r)   r*   <module>   sV   
$ 