U
    9%e                     @   sR  d dl Z d dlmZmZ d dlmZmZ d dlZd dl	m
Z ddlmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z, ddl*m-Z. ddl*m/Z0 dddddgZ1dd Z2G dd deedZ3G dd dee3edZ4dd Z5d%d#d$Z6dS )&    N)ABCMetaabstractmethod)IntegralReal   )BaseEstimatorClassifierMixin_fit_context)ConvergenceWarningNotFittedError)LabelEncoder)check_arraycheck_random_statecolumn_or_1dcompute_class_weight)Interval
StrOptions)safe_sparse_dot)available_if)_ovr_decision_functioncheck_classification_targets)_check_large_sparse_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fitted   )
_liblinear)_libsvm)_libsvm_sparsec_svcnu_svcZ	one_classZepsilon_svrZnu_svrc                 C   s   | j d d }g }ttdg|g}t|D ]}||| ||d  ddf }t|d |D ]z}||| ||d  ddf }	| |d || ||d  f }
| ||| ||d  f }|t|
|t||	  qbq0|S )zGenerate primal coefficients from dual coefficients
    for the one-vs-one multi class LibSVM in the case
    of a linear kernel.r   r   N)shapenpZcumsumZhstackrangeappendr   )	dual_coefZ	n_supportZsupport_vectorsn_classcoefZsv_locsZclass1Zsv1Zclass2Zsv2Zalpha1Zalpha2 r)   P/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/sklearn/svm/_base.py_one_vs_one_coef!   s    	   r+   c                   @   s  e Zd ZU dZedddddhegeeddd	d
geddheeddd	d
geedddd
geedddd
geedddd
geedddd
geeddd	d
gdgdgeedddd
gedhe	dgdgeeddd	d
gdgdZ
e	ed< dddddgZedd Zdd Zeddd>ddZd d! Zd"d# Zd$d% Zd&d' Zd(d) Zd*d+ Zd,d- Zd.d/ Zd0d1 Zd2d3 Zd4d5 Zd6d7 Zed8d9 Zd:d; Z ed<d= Z!dS )?
BaseLibSVMzBase class for estimators that use libsvm as backing library.

    This implements support vector machine classification and regression.

    Parameter documentation is in the derived `SVC` class.
    linearZpolyZrbfZsigmoidprecomputedr   Nleft)closedscaleauto        Zneither      ?rightbooleanZbalancedverboserandom_statekerneldegreegammacoef0tolCnuepsilon	shrinkingprobability
cache_sizeclass_weightr7   max_iterr9   _parameter_constraintsc                 C   sz   | j tkrtdt| j f || _|| _|| _|| _|| _|| _|| _	|| _
|	| _|
| _|| _|| _|| _|| _|| _d S )Nz&impl should be one of %s, %s was given)_implLIBSVM_IMPL
ValueErrorr;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   r7   rG   r9   )selfr;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   r7   rG   r9   r)   r)   r*   __init__f   s&    
zBaseLibSVM.__init__c                 C   s   d| j dkiS )Npairwiser.   )r;   rL   r)   r)   r*   
_more_tags   s    zBaseLibSVM._more_tagsT)Zprefer_skip_nested_validationc                 C   s$  t | j}t|}|r*| jdkr*td|o8t| j | _t| jrRt|| n| j	||t
jdddd\}}| |}t
j|dkrg n|t
jd}t| j}t|}|d	kr||jd
 krtdd||jd
 f  | jdkr||jd krtd|jd
 |jd |jd
 d
krD|jd
 |krDtd|j|jf t| jrTdn| j}|dkrld| _nt| jtr| jdkr|r|| | d	  n| }	|	d
krd|jd |	  nd| _n| jdkrd|jd  | _nt| jtr| j| _| jr| jn| j}
| jr.t ddd |!t
"dj#}|
||||||d t$|drf|jn|f| _%| j&' | _(| j)| _*| jdkrt+| j,d	kr|  j&d9  _&| j) | _)| jr| j*j-n| j*}t
.| j(/ }t
.|/ }|r|std| jdkr| j0| _1n| j02 | _1| S )a  Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)                 or (n_samples, n_samples)
            Training vectors, where `n_samples` is the number of samples
            and `n_features` is the number of features.
            For kernel="precomputed", the expected shape of X is
            (n_samples, n_samples).

        y : array-like of shape (n_samples,)
            Target values (class labels in classification, real numbers in
            regression).

        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights
            force the classifier to put more emphasis on these points.

        Returns
        -------
        self : object
            Fitted estimator.

        Notes
        -----
        If X and y are not C-ordered and contiguous arrays of np.float64 and
        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.

        If X is a dense array, then the other methods will not support sparse
        matrices as input.
        r.   z-Sparse precomputed kernels are not supported.r@   csrF)dtypeorderaccept_sparseaccept_large_sparseNrR   r   r   z"X and y have incompatible shapes.
zX has %s samples, but y has %s.r   zDPrecomputed matrix must be a square matrix. Input is a {}x{} matrix.zsample_weight and X have incompatible shapes: %r vs %r
Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).r3   r1   r4   r2   z[LibSVM] endi)random_seedr"   r    r!   r8   zxThe dual coefficients or intercepts are not finite. The input data may contain large values and need to be preprocessed.)3r   r9   spissparser;   	TypeErrorcallable_sparser   _validate_datar#   float64_validate_targetsasarrayrJ   indexrI   r   r"   rK   format_gamma
isinstancer=   strmultiplyZmeanvarr   _sparse_fit
_dense_fitr7   printrandintiinfomaxhasattr
shape_fit_
intercept_copy_intercept_
dual_coef__dual_coef_lenclasses_dataisfiniteall	_num_itern_iter_item)rL   Xysample_weightrndsparsesolver_typeZ	n_samplesr;   ZX_varfitseedr&   Zintercept_finitenessZdual_coef_finitenessr)   r)   r*   r      s    "



	
   

($

zBaseLibSVM.fitc                 C   s   t |ddjtjddS )zxValidation of y and class_weight.

        Default implementation for SVR and one-class; overridden in BaseSVC.
        TwarnF)rv   )r   Zastyper#   rc   )rL   r   r)   r)   r*   rd     s    zBaseLibSVM._validate_targetsc                 C   s.   | j dkst| j dkr*td| j t d S )Nr   r   r   znSolver terminated early (max_iter=%i).  Consider pre-processing your data with StandardScaler or MinMaxScaler.)fit_status_AssertionErrorwarningsr   rG   r
   rO   r)   r)   r*   _warn_from_fit_status&  s    
z BaseLibSVM._warn_from_fit_statusc                 C   s   t | jr6|| _| |}|jd |jd kr6tdt| j tj	||||t
| dtd|| j| j| j| j| j| j| j| j| j| j| j|d\	| _| _| _| _| _| _| _| _| _ | !  d S )Nr   r   z(X.shape[0] should be equal to X.shape[1]_class_weight)svm_typer   rF   r;   r@   rA   rD   r<   rC   r?   rE   r>   r=   rB   rG   r[   )"r`   r;   _BaseLibSVM__Xfit_compute_kernelr"   rK   libsvmset_verbosity_wrapr7   r   getattrr#   emptyr@   rA   rD   r<   rC   r?   rE   r>   rh   rB   rG   support_support_vectors_
_n_supportrx   ru   _probA_probBr   r   r   )rL   r   r   r   r   r;   r[   r)   r)   r*   rn   1  sJ    

zBaseLibSVM._dense_fitc                 C   sP  t j|jt jdd|_|  | j|}t| j	 t
|jd |j|j|j|||| j| j| j| j| jt| dt d|| j| j| jt| jt| j| j|\	| _| _}| _| _| _ | _!| _"| _#| $  t%| drt&| j'd }	nd}	| jjd }
t (t )|
|	}|
st*+g | _,n2t )d|j-d |j-|	 }t*+|||f|	|
f| _,d S )Nr@   rR   rS   r   r   r   r{   ).r#   re   r|   rc   sort_indices_sparse_kernelsrf   libsvm_sparser   r7   Zlibsvm_sparse_trainr"   indicesindptrr<   rh   r>   r?   r@   r   r   rA   rE   rB   intrC   rD   rG   r   r   ru   r   r   r   r   r   r   rs   rz   r{   ZtileZaranger]   
csr_matrixrx   size)rL   r   r   r   r   r;   r[   kernel_typeZdual_coef_datar'   Zn_SVZdual_coef_indicesZdual_coef_indptrr)   r)   r*   rm   a  sl    
   zBaseLibSVM._sparse_fitc                 C   s$   |  |}| jr| jn| j}||S )a  Perform regression on samples in X.

        For an one-class model, +1 (inlier) or -1 (outlier) is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            The predicted values.
        )_validate_for_predictra   _sparse_predict_dense_predict)rL   r   predictr)   r)   r*   r     s    
zBaseLibSVM.predictc                 C   s   |  |}|jdkr"t|ddd}| j}t| jrfd}|jd | jd krftd|jd | jd f t	| j
}tj|| j| j| j| j| j| j| j||| j| j| j| jdS )	Nr   r@   F)rS   rU   r.   r   MX.shape[1] = %d should be equal to %d, the number of samples at training time)r   r;   r<   r>   r=   rE   )r   ndimr   r;   r`   r"   rt   rK   rJ   rf   rI   r   r   r   r   r   ry   rw   r   r   r<   r>   rh   rE   )rL   r   r;   r   r)   r)   r*   r     s:    


zBaseLibSVM._dense_predictc                 C   s   | j }t|rd}| j|}d}t|j|j|j| j	j| j	j| j	j| j
j| jt| j|| j| j| j| j|t| dtd| j| j| j| j| j| j| jS )Nr.   r3   r   r   )r;   r`   r   rf   r   Zlibsvm_sparse_predictr|   r   r   r   ry   rw   rJ   rI   r<   rh   r>   r?   r   r#   r   rA   rB   rC   rD   r   r   r   )rL   r   r;   r   r@   r)   r)   r*   r     s<    
zBaseLibSVM._sparse_predictc                 C   s@   t | jr<| || j}t|r*| }tj|tjdd}|S )z0Return the data transformed by a callable kernelr@   r   )	r`   r;   r   r]   r^   Ztoarrayr#   re   rc   rL   r   r;   r)   r)   r*   r     s    

zBaseLibSVM._compute_kernelc                 C   sV   |  |}| |}| jr&| |}n
| |}| jdkrRt| jdkrR|  S |S )af  Evaluates the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)

        Returns
        -------
        X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
        r\   r   )	r   r   ra   _sparse_decision_function_dense_decision_functionrI   rz   r{   ravel)rL   r   Zdec_funcr)   r)   r*   _decision_function  s    



zBaseLibSVM._decision_functionc                 C   sh   t |tjddd}| j}t|r$d}tj|| j| j| j	| j
| j| j| jt| j|| j| j| j| jdS )Nr@   F)rR   rS   rU   r.   r   r;   r<   rE   r>   r=   )r   r#   rc   r;   r`   r   decision_functionr   r   r   ry   rw   r   r   rJ   rf   rI   r<   rE   r>   rh   r   r)   r)   r*   r   #  s(    
z#BaseLibSVM._dense_decision_functionc                 C   s   t j|jt jdd|_| j}t|dr*d}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )Nr@   r   __call__r.   r   r   )r#   re   r|   rc   r;   rs   r   rf   r   Zlibsvm_sparse_decision_functionr   r   r   ry   rw   rJ   rI   r<   rh   r>   r?   r@   r   r   rA   rB   rC   rD   r   r   r   rL   r   r;   r   r)   r)   r*   r   ;  s<    

z$BaseLibSVM._sparse_decision_functionc                 C   s   t |  t| js*| j|dtjdddd}| jrDt|sDt	|}| jrR|
  t|r~| js~t| js~tdt| j | jdkr|jd | jd krtd	|jd | jd f | j}| js|jdkr| j |jd krtd
| jj d|S )NrQ   r@   F)rT   rR   rS   rU   resetz3cannot use sparse input in %r trained on dense datar.   r   r   r   zThe internal representation of z was altered)r   r`   r;   rb   r#   rc   ra   r]   r^   r   r   rK   type__name__r"   rt   r   r   
n_support_sum	__class__)rL   r   svr)   r)   r*   r   _  sB    
	

$z BaseLibSVM._validate_for_predictc                 C   s<   | j dkrtd|  }t|r0d|jj_nd|j_|S )zWeights assigned to the features when `kernel="linear"`.

        Returns
        -------
        ndarray of shape (n_features, n_classes)
        r-   z2coef_ is only available when using a linear kernelF)r;   AttributeError	_get_coefr]   r^   r|   flagsZ	writeablerL   r(   r)   r)   r*   coef_  s    

zBaseLibSVM.coef_c                 C   s   t | j| jS )N)r   ry   r   rO   r)   r)   r*   r     s    zBaseLibSVM._get_coefc                 C   sV   zt |  W n tk
r$   tY nX t| j}|dkr@| jS t| jd gS dS )z)Number of support vectors for each class.r   r   N)	r   r   r   rJ   rf   rI   r   r#   array)rL   r   r)   r)   r*   r     s    
zBaseLibSVM.n_support_)N)"r   
__module____qualname____doc__r   r`   r   r   r   dictrH   __annotations__r   r   rM   rP   r	   r   rd   r   rn   rm   r   r   r   r   r   r   r   r   propertyr   r   r   r)   r)   r)   r*   r,   A   sZ   
	

' 0>"%$(
r,   )	metaclassc                       s   e Zd ZU dZejeddhgdgdZeed< dD ]Z	e
e	 q6e fdd	Zd
d Zdd Z fddZdd Zeedd Zeedd Zdd Zdd Zdd Zedd Zedd Zed d! Z  ZS )"BaseSVCz!ABC for LibSVM-based classifiers.ovrovor6   )decision_function_shape
break_tiesrH   )rB   rA   c                    s:   || _ || _t j|||||||d||	|
||||d d S )Nr3   r:   )r   r   superrM   )rL   r;   r<   r=   r>   r?   r@   rA   rC   rD   rE   rF   r7   rG   r   r9   r   r   r)   r*   rM     s&    zBaseSVC.__init__c                 C   sl   t |dd}t| tj|dd\}}t| j||d| _t|dk rTtdt| || _	tj
|tjddS )	NTr   )Zreturn_inverseclassesr   r   z>The number of classes has to be greater than one; got %d classr@   r   )r   r   r#   uniquer   rF   class_weight_rz   rK   r{   re   rc   )rL   r   Zy_clsr)   r)   r*   rd     s    zBaseSVC._validate_targetsc                 C   s>   |  |}| jdkr:t| jdkr:t|dk | t| jS |S )a4  Evaluate the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The input samples.

        Returns
        -------
        X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
            If decision_function_shape='ovr', the shape is (n_samples,
            n_classes).

        Notes
        -----
        If decision_function_shape='ovo', the function values are proportional
        to the distance of the samples X to the separating hyperplane. If the
        exact distances are required, divide the function values by the norm of
        the weight vector (``coef_``). See also `this question
        <https://stats.stackexchange.com/questions/14876/
        interpreting-distance-from-hyperplane-in-svm>`_ for further details.
        If decision_function_shape='ovr', the decision function is a monotonic
        transformation of ovo decision function.
        r   r   r   )r   r   rz   r{   r   )rL   r   decr)   r)   r*   r     s    
zBaseSVC.decision_functionc                    sx   t |  | jr | jdkr td| jrT| jdkrTt| jdkrTtj| |dd}nt	 
|}| jtj|tjdS )a  Perform classification on samples in X.

        For an one-class model, +1 or -1 is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            Class labels for samples in X.
        r   z>break_ties must be False when decision_function_shape is 'ovo'r   r   r   )ZaxisrV   )r   r   r   rK   rz   r{   r#   Zargmaxr   r   r   Ztakere   Zintp)rL   r   r   r   r)   r*   r     s    zBaseSVC.predictc                 C   s$   | j std| jdkr tddS )Nz5predict_proba is not available when probability=Falser\   z0predict_proba only implemented for SVC and NuSVCT)rD   r   rI   rO   r)   r)   r*   _check_proba9  s    
zBaseSVC._check_probac                 C   sD   |  |}| jjdks"| jjdkr*td| jr6| jn| j}||S )a  Compute probabilities of possible outcomes for samples in X.

        The model needs to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the probability of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        r   zApredict_proba is not available when fitted with probability=False)r   probA_r   probB_r   ra   _sparse_predict_proba_dense_predict_proba)rL   r   Z
pred_probar)   r)   r*   predict_probaB  s    
zBaseSVC.predict_probac                 C   s   t | |S )a  Compute log probabilities of possible outcomes for samples in X.

        The model need to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the log-probabilities of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        )r#   logr   )rL   r   r)   r)   r*   predict_log_probag  s    zBaseSVC.predict_log_probac                 C   sh   |  |}| j}t|rd}t| j}tj|| j| j	| j
| j| j| j| j||| j| j| j| jd}|S )Nr.   r   )r   r;   r`   rJ   rf   rI   r   r   r   r   r   ry   rw   r   r   r<   rE   r>   rh   )rL   r   r;   r   Zpprobr)   r)   r*   r     s,    
zBaseSVC._dense_predict_probac                 C   s   t j|jt jdd|_| j}t|r(d}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )Nr@   r   r.   r   r   )r#   re   r|   rc   r;   r`   r   rf   r   Zlibsvm_sparse_predict_probar   r   r   ry   rw   rJ   rI   r<   rh   r>   r?   r@   r   r   rA   rB   rC   rD   r   r   r   r   r)   r)   r*   r     s<    
zBaseSVC._sparse_predict_probac                 C   s^   | j jd dkr t| j | j}n:t| j | j| j}t|d rPt|	 }n
t
|}|S )Nr   r   )rx   r"   r   r   r+   r   r]   r^   ZvstackZtocsrr#   r   r)   r)   r*   r     s      
zBaseSVC._get_coefc                 C   s   | j S zParameter learned in Platt scaling when `probability=True`.

        Returns
        -------
        ndarray of shape  (n_classes * (n_classes - 1) / 2)
        )r   rO   r)   r)   r*   r     s    zBaseSVC.probA_c                 C   s   | j S r   )r   rO   r)   r)   r*   r     s    zBaseSVC.probB_c                 C   s   | j S )zWeights per class)r   rO   r)   r)   r*   r     s    zBaseSVC._class_weight)r   r   r   r   r,   rH   r   r   r   Zunused_parampopr   rM   rd   r   r   r   r   r   r   r   r   r   r   r   r   r   __classcell__r)   r)   r   r*   r     s4   
' %	
$
$
	

r   c           	      C   s   ddiddddddd	iidd
idddddddiiddddidd}| dkr\||  S | dkrpt d|  ||d}|dkrd| }nJ||d}|dkrd||f }n(||d}|dkrd|||f }n|S t d||||f dS )a  Find the liblinear magic number for the solver.

    This number depends on the values of the following attributes:
      - multi_class
      - penalty
      - loss
      - dual

    The same number is also internally used by LibLinear to determine
    which solver to use.
    F   r      )FT)l1l2r   T      r   r               )logistic_regressionZhingeZsquared_hingeepsilon_insensitivesquared_epsilon_insensitivecrammer_singerr   r   z<`multi_class` must be one of `ovr`, `crammer_singer`, got %rNzloss='%s' is not supportedz>The combination of penalty='%s' and loss='%s' is not supportedzLThe combination of penalty='%s' and loss='%s' are not supported when dual=%szJUnsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r)rK   get)	multi_classpenaltylossdualZ_solver_type_dictZ_solver_penZerror_stringZ_solver_dualZ
solver_numr)   r)   r*   _get_liblinear_solver_type  sD    

	

r   r   r   皙?c                 C   s  |dkrJt  }||}|j}t|dk r:td|d  t|||d}ntjdtjd}|}t	
| t|}|rtddd	 d
}|r|dkrtd| n|}t
| t
| t	
| t| rt|  tj|tjd }tj|dd}t|| tjd}t||||}t	| |t| ||
||||	|tdj||\}}t|}||	krhtdt |r|ddddf }||dddf  }n|}d}|||fS )a  Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.

    Preprocessing is done in this function before supplying it to liblinear.

    Parameters
    ----------
    X : {array-like, sparse matrix} of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    y : array-like of shape (n_samples,)
        Target vector relative to X

    C : float
        Inverse of cross-validation parameter. The lower the C, the higher
        the penalization.

    fit_intercept : bool
        Whether or not to fit an intercept. If set to True, the feature vector
        is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
        1 corresponds to the intercept. If set to False, no intercept will be
        used in calculations (i.e. data is expected to be already centered).

    intercept_scaling : float
        Liblinear internally penalizes the intercept, treating it like any
        other term in the feature vector. To reduce the impact of the
        regularization on the intercept, the `intercept_scaling` parameter can
        be set to a value greater than 1; the higher the value of
        `intercept_scaling`, the lower the impact of regularization on it.
        Then, the weights become `[w_x_1, ..., w_x_n,
        w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
        the feature weights and the intercept weight is scaled by
        `intercept_scaling`. This scaling allows the intercept term to have a
        different regularization behavior compared to the other features.

    class_weight : dict or 'balanced', default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

    penalty : {'l1', 'l2'}
        The norm of the penalty used in regularization.

    dual : bool
        Dual or primal formulation,

    verbose : int
        Set verbose to any positive number for verbosity.

    max_iter : int
        Number of iterations.

    tol : float
        Stopping condition.

    random_state : int, RandomState instance or None, default=None
        Controls the pseudo random number generation for shuffling the data.
        Pass an int for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.

    multi_class : {'ovr', 'crammer_singer'}, default='ovr'
        `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
        optimizes a joint objective over all classes.
        While `crammer_singer` is interesting from an theoretical perspective
        as it is consistent it is seldom used in practice and rarely leads to
        better accuracy and is more expensive to compute.
        If `crammer_singer` is chosen, the options loss, penalty and dual will
        be ignored.

    loss : {'logistic_regression', 'hinge', 'squared_hinge',             'epsilon_insensitive', 'squared_epsilon_insensitive},             default='logistic_regression'
        The loss function used to fit the model.

    epsilon : float, default=0.1
        Epsilon parameter in the epsilon-insensitive loss function. Note
        that the value of this parameter depends on the scale of the target
        variable y. If unsure, set epsilon=0.

    sample_weight : array-like of shape (n_samples,), default=None
        Weights assigned to each sample.

    Returns
    -------
    coef_ : ndarray of shape (n_features, n_features + 1)
        The coefficient vector got by minimizing the objective function.

    intercept_ : float
        The intercept term added to the vector.

    n_iter_ : array of int
        Number of iterations run across for each class.
    )r   r   r   zeThis solver needs samples of at least 2 classes in the data, but the data contains only one class: %rr   r   rV   z[LibLinear]rW   rX   g      zqIntercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.W)requirementsrZ   z@Liblinear failed to converge, increase the number of iterations.Nr8   r3   )r   Zfit_transformr{   rz   rK   r   r#   r   rc   	liblinearr   r   ro   r   r   r]   r^   r   re   r   requirer   r   Z
train_wraprp   rq   rr   r   r   r
   )r   r   r@   Zfit_interceptZintercept_scalingrF   r   r   r7   rG   r?   r9   r   r   rB   r   encZy_indr{   r   r   Zbiasr   Z	raw_coef_r   Z
n_iter_maxr   ru   r)   r)   r*   _fit_liblinear*  sz    t






r   )Nr   r   r   N)7r   abcr   r   numbersr   r   numpyr#   Zscipy.sparser   r]   baser   r   r	   
exceptionsr
   r   Zpreprocessingr   utilsr   r   r   r   Zutils._param_validationr   r   Zutils.extmathr   Zutils.metaestimatorsr   Zutils.multiclassr   r   Zutils.validationr   r   r   r   r   rW   r   r   r   r   r   r   rJ   r+   r,   r   r   r   r)   r)   r)   r*   <module>   sB        u  AE     