U
    9%eCz                     @   s  d Z ddlmZmZ ddlZddlmZmZ ddl	m
Z
mZ ddlmZ ddlmZ G d	d
 d
edZG dd deedZG dd deZG dd deZG dd deZG dd deZG dd deedZG dd deZG dd deZG dd deZeeeededZdS )zZLosses and corresponding default initial estimators for gradient boosting
decision trees.
    )ABCMetaabstractmethodN)expit	logsumexp   )DummyClassifierDummyRegressor)	TREE_LEAF)_weighted_percentilec                   @   sd   e Zd ZdZdZdd Zedd Zeddd	Zed
d Z	dddZ
edd Zedd ZdS )LossFunctionaM  Abstract base class for various loss functions.

    Parameters
    ----------
    n_classes : int
        Number of classes.

    Attributes
    ----------
    K : int
        The number of regression trees to be induced;
        1 for regression and binary classification;
        ``n_classes`` for multi-class classification.
    Fc                 C   s
   || _ d S N)Kself	n_classes r   Z/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/sklearn/ensemble/_gb_losses.py__init__!   s    zLossFunction.__init__c                 C   s   dS )z-Default ``init`` estimator for loss function.Nr   r   r   r   r   init_estimator$   s    zLossFunction.init_estimatorNc                 C   s   dS )ad  Compute the loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   r   yraw_predictionssample_weightr   r   r   __call__(   s    zLossFunction.__call__c                 K   s   dS )N  Compute the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        Nr   r   r   r   kargsr   r   r   negative_gradient8   s    zLossFunction.negative_gradient皙?r   c
                 C   s   | |}
|
 }d|| < t|jtkd D ](}| |||||||dd|	f | q0|dd|	f  ||jddddf j|
dd 7  < dS )az  Update the terminal regions (=leaves) of the given tree and
        updates the current predictions of the model. Traverses tree
        and invokes template method `_update_terminal_region`.

        Parameters
        ----------
        tree : tree.Tree
            The tree object.
        X : ndarray of shape (n_samples, n_features)
            The data array.
        y : ndarray of shape (n_samples,)
            The target labels.
        residual : ndarray of shape (n_samples,)
            The residuals (usually the negative gradient).
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        sample_weight : ndarray of shape (n_samples,)
            The weight of each sample.
        sample_mask : ndarray of shape (n_samples,)
            The sample mask to be used.
        learning_rate : float, default=0.1
            Learning rate shrinks the contribution of each tree by
             ``learning_rate``.
        k : int, default=0
            The index of the estimator being updated.

        r   NZaxis)	applycopynpwhereZchildren_leftr	   _update_terminal_regionvaluetake)r   treeXr   residualr   r   sample_masklearning_ratekterminal_regionsZmasked_terminal_regionsleafr   r   r   update_terminal_regionsF   s$    )

& z$LossFunction.update_terminal_regionsc	           	      C   s   dS )z=Template method for updating terminal regions (i.e., leaves).Nr   	r   r)   r/   r0   r*   r   r+   r   r   r   r   r   r&      s    z$LossFunction._update_terminal_regionc                 C   s   dS )aL  Return the initial raw predictions.

        Parameters
        ----------
        X : ndarray of shape (n_samples, n_features)
            The data array.
        estimator : object
            The estimator to use to compute the predictions.

        Returns
        -------
        raw_predictions : ndarray of shape (n_samples, K)
            The initial raw predictions. K is equal to 1 for binary
            classification and regression, and equal to the number of classes
            for multiclass classification. ``raw_predictions`` is casted
            into float64.
        Nr   )r   r*   	estimatorr   r   r   get_init_raw_predictions   s    z%LossFunction.get_init_raw_predictions)N)r   r   )__name__
__module____qualname____doc__is_multi_classr   r   r   r   r   r1   r&   r4   r   r   r   r   r      s    

  
A
r   )	metaclassc                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )RegressionLossFunctionz)Base class for regression loss functions.c                    s   t  jdd d S )N   r   )superr   r   	__class__r   r   r      s    zRegressionLossFunction.__init__c                 C   s    t |drt |dstddS )zMake sure estimator has the required fit and predict methods.

        Parameters
        ----------
        estimator : object
            The init estimator to check.
        fitpredictzNThe init parameter must be a valid estimator and support both fit and predict.Nhasattr
ValueErrorr   r3   r   r   r   check_init_estimator   s    z+RegressionLossFunction.check_init_estimatorc                 C   s   | |}|ddtjS )Nr    r<   )rB   reshapeastyper$   float64)r   r*   r3   Zpredictionsr   r   r   r4      s    
z/RegressionLossFunction.get_init_raw_predictions)r5   r6   r7   r8   r   rG   r4   __classcell__r   r   r?   r   r;      s   r;   c                   @   s<   e Zd ZdZdd ZdddZdd ZdddZdd ZdS )LeastSquaresErrorzLoss function for least squares (LS) estimation.
    Terminal regions do not need to be updated for least squares.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    c                 C   s
   t ddS )Nmeanstrategyr   r   r   r   r   r      s    z LeastSquaresError.init_estimatorNc                 C   sH   |dkrt ||  d S d|  t |||  d   S dS )ar  Compute the least squares loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   r<   )r$   rM   ravelsumr   r   r   r   r      s    zLeastSquaresError.__call__c                 K   s   ||   S )aT  Compute half of the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples,)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        rQ   r   r   r   r   r      s    z#LeastSquaresError.negative_gradientr   r   c
           
      C   s*   |dd|	f  || |  7  < dS )a  Least squares does not need to update terminal regions.

        But it has to update the predictions.

        Parameters
        ----------
        tree : tree.Tree
            The tree object.
        X : ndarray of shape (n_samples, n_features)
            The data array.
        y : ndarray of shape (n_samples,)
            The target labels.
        residual : ndarray of shape (n_samples,)
            The residuals (usually the negative gradient).
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        sample_weight : ndarray of shape (n,)
            The weight of each sample.
        sample_mask : ndarray of shape (n,)
            The sample mask to be used.
        learning_rate : float, default=0.1
            Learning rate shrinks the contribution of each tree by
             ``learning_rate``.
        k : int, default=0
            The index of the estimator being updated.
        N)rB   rQ   )
r   r)   r*   r   r+   r   r   r,   r-   r.   r   r   r   r1      s    (z)LeastSquaresError.update_terminal_regionsc	           	      C   s   d S r   r   r2   r   r   r   r&      s    z)LeastSquaresError._update_terminal_region)N)r   r   )	r5   r6   r7   r8   r   r   r   r1   r&   r   r   r   r   rL      s   	
  
*rL   c                   @   s2   e Zd ZdZdd ZdddZdd Zd	d
 ZdS )LeastAbsoluteErrorzLoss function for least absolute deviation (LAD) regression.

    Parameters
    ----------
    n_classes : int
        Number of classes
    c                 C   s   t dddS Nquantile      ?rO   rV   rP   r   r   r   r   r   7  s    z!LeastAbsoluteError.init_estimatorNc              	   C   sJ   |dkrt ||   S d|  t |t ||    S dS )at  Compute the least absolute error.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves).

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr<   )r$   absrQ   rM   rR   r   r   r   r   r   :  s    zLeastAbsoluteError.__call__c                 K   s   |  }d|| dk d S )a  Compute the negative gradient.

        1.0 if y - raw_predictions > 0.0 else -1.0

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        r   r   r<   rS   r   r   r   r   r   Q  s    z$LeastAbsoluteError.negative_gradientc	                 C   sZ   t ||kd }	|j|	dd}|j|	dd|j|	dd }
t|
|dd|j|ddf< dS )z1LAD updates terminal regions to median estimates.r   r!   2   
percentileN)r$   r%   r(   r
   r'   )r   r)   r/   r0   r*   r   r+   r   r   terminal_regiondiffr   r   r   r&   b  s       z*LeastAbsoluteError._update_terminal_region)N)r5   r6   r7   r8   r   r   r   r&   r   r   r   r   rT   .  s
   
rT   c                       sF   e Zd ZdZd fdd	Zdd Zddd	Zdd
dZdd Z  Z	S )HuberLossFunctionah  Huber loss function for robust regression.

    M-Regression proposed in Friedman 2001.

    Parameters
    ----------
    alpha : float, default=0.9
        Percentile at which to extract score.

    References
    ----------
    J. Friedman, Greedy Function Approximation: A Gradient Boosting
    Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
    ?c                    s   t    || _d | _d S r   )r>   r   alphagammar   ra   r?   r   r   r     s    
zHuberLossFunction.__init__c                 C   s   t dddS rU   rP   r   r   r   r   r     s    z HuberLossFunction.init_estimatorNc           
      C   s  |  }|| }| j}|dkrX|dkr@tt|| jd }ntt||| jd }t||k}|dkrtd|| d  }t|t||  |d   }|| |jd  }	nZtd||  || d  }t|||   t||  |d   }|| |  }	|	S )a  Compute the Huber loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nd   rW   r   r   )	rQ   rb   r$   r\   rY   ra   r
   rR   shape)
r   r   r   r   r^   rb   
gamma_maskZsq_lossZlin_losslossr   r   r   r     s4      "zHuberLossFunction.__call__c           	      K   s   |  }|| }|dkr2tt|| jd }ntt||| jd }t||k}tj|jd ftjd}|| ||< |t	||   || < || _
|S )a  Compute the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nrd   r   Zdtype)rQ   r$   r\   rY   ra   r
   zerosre   rJ   signrb   )	r   r   r   r   r   r^   rb   rf   r+   r   r   r   r     s    z#HuberLossFunction.negative_gradientc	              	   C   s   t ||kd }	|j|	dd}| j}
|j|	dd|j|	dd }t||dd}|| }|t t |t t ||
  |j	|df< d S )Nr   r!   rZ   r[   )
r$   r%   r(   rb   r
   rM   rj   minimumrY   r'   )r   r)   r/   r0   r*   r   r+   r   r   r]   rb   r^   ZmedianZdiff_minus_medianr   r   r   r&     s     z)HuberLossFunction._update_terminal_region)r`   )N)N
r5   r6   r7   r8   r   r   r   r   r&   rK   r   r   r?   r   r_   x  s   
)
r_   c                       sD   e Zd ZdZd fdd	Zdd Zddd	Zd
d Zdd Z  Z	S )QuantileLossFunctionzLoss function for quantile regression.

    Quantile regression allows to estimate the percentiles
    of the conditional distribution of the target.

    Parameters
    ----------
    alpha : float, default=0.9
        The percentile.
    r`   c                    s   t    || _|d | _d S )Nrd   )r>   r   ra   r\   rc   r?   r   r   r     s    
zQuantileLossFunction.__init__c                 C   s   t d| jdS )NrV   rX   )r   ra   r   r   r   r   r     s    z#QuantileLossFunction.init_estimatorNc                 C   s   |  }|| }| j}||k}|dkrX|||   d| ||     |jd  }nD|t|| ||   d| t||  ||     |  }|S )a  Compute the Quantile loss.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr<   r   )rQ   ra   rR   re   r$   )r   r   r   r   r^   ra   maskrg   r   r   r   r     s    $ zQuantileLossFunction.__call__c                 K   s,   | j }| }||k}|| d| |   S )r   r<   )ra   rQ   )r   r   r   r   ra   rn   r   r   r   r      s    z&QuantileLossFunction.negative_gradientc	                 C   s\   t ||kd }	|j|	dd|j|	dd }
|j|	dd}t|
|| j}||j|df< d S )Nr   r!   )r$   r%   r(   r
   r\   r'   )r   r)   r/   r0   r*   r   r+   r   r   r]   r^   valr   r   r   r&   1  s     z,QuantileLossFunction._update_terminal_region)r`   )Nrl   r   r   r?   r   rm     s   
rm   c                   @   s0   e Zd ZdZedd Zedd Zdd ZdS )	ClassificationLossFunctionz-Base class for classification loss functions.c                 C   s   dS )a  Template method to convert raw predictions into probabilities.

        Parameters
        ----------
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        Returns
        -------
        probas : ndarray of shape (n_samples, K)
            The predicted probabilities.
        Nr   r   r   r   r   r   _raw_prediction_to_probaI  s    z3ClassificationLossFunction._raw_prediction_to_probac                 C   s   dS )a  Template method to convert raw predictions to decisions.

        Parameters
        ----------
        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        Returns
        -------
        encoded_predictions : ndarray of shape (n_samples, K)
            The predicted encoded labels.
        Nr   rq   r   r   r   _raw_prediction_to_decisionY  s    z6ClassificationLossFunction._raw_prediction_to_decisionc                 C   s    t |drt |dstddS )zMake sure estimator has fit and predict_proba methods.

        Parameters
        ----------
        estimator : object
            The init estimator to check.
        rA   predict_probazTThe init parameter must be a valid estimator and support both fit and predict_proba.NrC   rF   r   r   r   rG   i  s    z/ClassificationLossFunction.check_init_estimatorN)r5   r6   r7   r8   r   rr   rs   rG   r   r   r   r   rp   F  s   

rp   c                       sZ   e Zd ZdZ fddZdd ZdddZd	d
 Zdd Zdd Z	dd Z
dd Z  ZS )BinomialDeviancea  Binomial deviance loss function for binary classification.

    Binary classification is a special case; here, we only need to
    fit one tree instead of ``n_classes`` trees.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    c                    s.   |dkrt d| jj|t jdd d S Nr   z-{0:s} requires 2 classes; got {1:d} class(es)r<   r=   rE   formatr@   r5   r>   r   r   r?   r   r   r     s     zBinomialDeviance.__init__c                 C   s
   t ddS NZpriorrN   r   r   r   r   r   r     s    zBinomialDeviance.init_estimatorNc              	   C   s\   |  }|dkr.dt|| td|  S d|  t||| td|   S dS )a  Compute the deviance (= 2 * negative log-likelihood).

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        Nr   )rQ   r$   rM   Z	logaddexprR   r   r   r   r   r     s    zBinomialDeviance.__call__c                 K   s   |t |  S )aP  Compute half of the negative gradient.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        )r   rQ   r   r   r   r   r     s    z"BinomialDeviance.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}t || }
t |||  d| |  }t|dk rd|j|ddf< n|
| |j|ddf< dS )zMake a single Newton-Raphson step.

        our node estimate is given by:

            sum(w * (y - prob)) / sum(w * prob * (1 - prob))

        we take advantage that: y - prob = residual
        r   r!   r<   u?j/         N)r$   r%   r(   rR   rY   r'   r   r)   r/   r0   r*   r   r+   r   r   r]   	numeratordenominatorr   r   r   r&     s    z(BinomialDeviance._update_terminal_regionc                 C   sZ   t j|jd dft jd}t| |d d df< |d d df  |d d df 8  < |S )Nr   r   rh   r<   r$   Zonesre   rJ   r   rQ   r   r   Zprobar   r   r   rr     s    $z)BinomialDeviance._raw_prediction_to_probac                 C   s   |  |}tj|ddS Nr<   r!   rr   r$   Zargmaxr   r   r   r   rs     s    
z,BinomialDeviance._raw_prediction_to_decisionc                 C   s`   | |}|d d df }ttjj}t||d| }t|d|  }|ddtj	S )Nr<   r    
rt   r$   finfofloat32epscliplogrH   rI   rJ   r   r*   r3   probasZproba_pos_classr   r   r   r   r   r4     s    
z)BinomialDeviance.get_init_raw_predictions)Nr5   r6   r7   r8   r   r   r   r   r&   rr   rs   r4   rK   r   r   r?   r   ru   x  s   

!ru   c                       s`   e Zd ZdZdZ fddZdd Zddd	ZdddZdd Z	dd Z
dd Zdd Z  ZS )MultinomialDeviancezMultinomial deviance loss function for multi-class classification.

    For multi-class classification we need to fit ``n_classes`` trees at
    each stage.

    Parameters
    ----------
    n_classes : int
        Number of classes.
    Tc                    s*   |dk rt d| jjt | d S )N   z#{0:s} requires more than 2 classes.rw   r   r?   r   r   r     s
    zMultinomialDeviance.__init__c                 C   s
   t ddS ry   rz   r   r   r   r   r   
  s    z"MultinomialDeviance.init_estimatorNc                 C   sj   t j|jd | jft jd}t| jD ]}||k|dd|f< q&t jd|| jdd t|dd |dS )a  Compute the Multinomial deviance.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        r   rh   Nr    r<   r!   )weights)	r$   ri   re   r   rJ   rangeZaveragerR   r   )r   r   r   r   Yr.   r   r   r   r     s    zMultinomialDeviance.__call__r   c              
   K   s,   |t t |dd|f t|dd  S )a  Compute negative gradient for the ``k``-th class.

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            The target labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.

        k : int, default=0
            The index of the class.
        Nr<   r!   )r$   
nan_to_numexpr   )r   r   r   r.   kwargsr   r   r   r   &  s     z%MultinomialDeviance.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}t || }
|
| jd | j 9 }
t |||  d| |  }t|dk rd|j|ddf< n|
| |j|ddf< dS )z"Make a single Newton-Raphson step.r   r!   r<   r|   r}   N)r$   r%   r(   rR   r   rY   r'   r~   r   r   r   r&   9  s    z+MultinomialDeviance._update_terminal_regionc              	   C   s*   t t |t|ddd d t jf  S r   )r$   r   r   r   Znewaxisrq   r   r   r   rr   U  s
    z,MultinomialDeviance._raw_prediction_to_probac                 C   s   |  |}tj|ddS r   r   r   r   r   r   rs   \  s    
z/MultinomialDeviance._raw_prediction_to_decisionc                 C   s@   | |}ttjj}t||d| }t|tj}|S )Nr<   )	rt   r$   r   r   r   r   r   rI   rJ   )r   r*   r3   r   r   r   r   r   r   r4   `  s
    
z,MultinomialDeviance.get_init_raw_predictions)N)r   )r5   r6   r7   r8   r9   r   r   r   r   r&   rr   rs   r4   rK   r   r   r?   r   r     s   

r   c                       sZ   e Zd ZdZ fddZdd ZdddZd	d
 Zdd Zdd Z	dd Z
dd Z  ZS )ExponentialLossa  Exponential loss function for binary classification.

    Same loss as AdaBoost.

    Parameters
    ----------
    n_classes : int
        Number of classes.

    References
    ----------
    Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
    c                    s.   |dkrt d| jj|t jdd d S rv   rw   r   r?   r   r   r   w  s     zExponentialLoss.__init__c                 C   s
   t ddS ry   rz   r   r   r   r   r     s    zExponentialLoss.init_estimatorNc                 C   s`   |  }|dkr.ttd| d  | S d|  t|td| d  |   S dS )a  Compute the exponential loss

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble.

        sample_weight : ndarray of shape (n_samples,), default=None
            Sample weights.
        N       @      ?r   r<   )rQ   r$   rM   r   rR   r   r   r   r   r     s     zExponentialLoss.__call__c                 K   s$   d| d }|t | |   S )aU  Compute the residual (= negative gradient).

        Parameters
        ----------
        y : ndarray of shape (n_samples,)
            True labels.

        raw_predictions : ndarray of shape (n_samples, K)
            The raw predictions (i.e. values from the tree leaves) of the
            tree ensemble at iteration ``i - 1``.
        r   r   )r$   r   rQ   )r   r   r   r   y_r   r   r   r     s    z!ExponentialLoss.negative_gradientc	                 C   s   t ||kd }	|j|	dd}|j|	dd}|j|	dd}d| d }
t |
| t |
 |  }t |t |
 |  }t|dk rd|j|ddf< n|| |j|ddf< d S )Nr   r!   r   r   r|   r}   )r$   r%   r(   rR   r   rY   r'   )r   r)   r/   r0   r*   r   r+   r   r   r]   r   r   r   r   r   r   r&     s    z'ExponentialLoss._update_terminal_regionc                 C   s^   t j|jd dft jd}td|  |d d df< |d d df  |d d df 8  < |S )Nr   r   rh   r   r<   r   r   r   r   r   rr     s    $z(ExponentialLoss._raw_prediction_to_probac                 C   s   |  dktS )Nr   )rQ   rI   intrq   r   r   r   rs     s    z+ExponentialLoss._raw_prediction_to_decisionc                 C   sd   | |}|d d df }ttjj}t||d| }dt|d|   }|ddtj	S )Nr<   rW   r    r   r   r   r   r   r4     s    
z(ExponentialLoss.get_init_raw_predictions)Nr   r   r   r?   r   r   h  s   

r   )Zsquared_errorZabsolute_errorZhuberrV   Zlog_lossZexponential)r8   abcr   r   numpyr$   Zscipy.specialr   r   dummyr   r   Z
tree._treer	   Zutils.statsr
   r   r;   rL   rT   r_   rm   rp   ru   r   r   ZLOSS_FUNCTIONSr   r   r   r   <module>   s0    jJuY2}su