U
    9%e|                     @   sL   d Z ddlmZ ddlZddlmZmZ ddlm	Z	 dddZ
dd
dZdS )z
Common code for all metrics.

    )combinationsN   )check_arraycheck_consistent_length)type_of_targetc                 C   s  d}||krt d|t|}|dkr8t d||dkrN| |||dS t||| t|}t|}d}|}d}	|d	kr|dk	rt||jd }| }| }nj|d
kr|dk	rtj	t
|t|ddd}	ntj	|dd}	t|		 drdS n|dkr|}	d}d}|jdkr*|d}|jdkr@|d}|j| }
t|
f}t|
D ]@}|j|g|d }|j|g|d }| |||d||< q^|dk	r|	dk	rt|	}	d||	dk< tj||	dS |S dS )aM  Average a binary metric for multilabel classification.

    Parameters
    ----------
    y_true : array, shape = [n_samples] or [n_samples, n_classes]
        True binary labels in binary label indicators.

    y_score : array, shape = [n_samples] or [n_samples, n_classes]
        Target scores, can either be probability estimates of the positive
        class, confidence values, or binary decisions.

    average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
        If ``None``, the scores for each class are returned. Otherwise,
        this determines the type of averaging performed on the data:

        ``'micro'``:
            Calculate metrics globally by considering each element of the label
            indicator matrix as a label.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label).
        ``'samples'``:
            Calculate metrics for each instance, and find their average.

        Will be ignored when ``y_true`` is binary.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    binary_metric : callable, returns shape [n_classes]
        The binary metric function to use.

    Returns
    -------
    score : float or array of shape [n_classes]
        If not ``None``, average the score, else return the score for each
        classes.

    )Nmicromacroweightedsampleszaverage has to be one of {0})binaryzmultilabel-indicatorz{0} format is not supportedr   )sample_weight   Nr   r	   )r   r   )Zaxisg        r
   weights)
ValueErrorformatr   r   r   nprepeatshapeZravelsummultiplyZreshapeisclosendimZzerosrangeZtakeZasarrayaverage)binary_metricy_truey_scorer   r   Zaverage_optionsZy_typeZnot_average_axisZscore_weightZaverage_weight	n_classesZscorecZy_true_cZ	y_score_c r!   T/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/sklearn/metrics/_base.py_average_binary_score   s`    +
 






r#   r   c                 C   s   t || t|}|jd }||d  d }t|}|dk}|rNt|nd}	tt|dD ]~\}
\}}||k}||k}t||}|rt||	|
< || }|| }| ||||f }| ||||f }|| d ||
< q`tj||	dS )aL  Average one-versus-one scores for multiclass classification.

    Uses the binary metric for one-vs-one multiclass classification,
    where the score is computed according to the Hand & Till (2001) algorithm.

    Parameters
    ----------
    binary_metric : callable
        The binary metric function to use that accepts the following as input:
            y_true_target : array, shape = [n_samples_target]
                Some sub-array of y_true for a pair of classes designated
                positive and negative in the one-vs-one scheme.
            y_score_target : array, shape = [n_samples_target]
                Scores corresponding to the probability estimates
                of a sample belonging to the designated positive class label

    y_true : array-like of shape (n_samples,)
        True multiclass labels.

    y_score : array-like of shape (n_samples, n_classes)
        Target scores corresponding to probability estimates of a sample
        belonging to a particular class.

    average : {'macro', 'weighted'}, default='macro'
        Determines the type of averaging performed on the pairwise binary
        metric scores:
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean. This does not take label imbalance into account. Classes
            are assumed to be uniformly distributed.
        ``'weighted'``:
            Calculate metrics for each label, taking into account the
            prevalence of the classes.

    Returns
    -------
    score : float
        Average of the pairwise binary metric scores.
    r   r   r   r	   Nr   )	r   r   uniquer   empty	enumerater   
logical_orr   )r   r   r   r   Zy_true_uniquer   Zn_pairsZpair_scoresZis_weightedZ
prevalenceZixabZa_maskZb_maskZab_maskZa_trueZb_trueZa_true_scoreZb_true_scorer!   r!   r"   _average_multiclass_ovo_score   s&    (



r*   )N)r   )__doc__	itertoolsr   numpyr   utilsr   r   Zutils.multiclassr   r#   r*   r!   r!   r!   r"   <module>   s   
m