U
    9%e                  
   @   sx  d Z ddlZddlmZmZ ddlmZmZ ddlZ	ddl
mZ ddlmZmZmZmZmZmZ ddlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZ ddl m!Z!m"Z" dddgZ#eedkrddl
m$Z% nddl
m%Z% dd Z&d+ddZ'dd Z(d,dd Z)d!d" Z*G d#d$ d$eeeeeed%Z+G d&d de+Z,G d'd de+Z-G d(d) d)e+Z.G d*d deeeZ/dS )-zG
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
    N)ABCMetaabstractmethod)IntegralReal)svd   )BaseEstimatorClassNamePrefixFeaturesOutMixinMultiOutputMixinRegressorMixinTransformerMixin_fit_context)ConvergenceWarning)check_arraycheck_consistent_length)Interval
StrOptions)svd_flip)parse_version
sp_version)FLOAT_DTYPEScheck_is_fittedPLSCanonicalPLSRegressionPLSSVDz1.7)pinv)pinv2c              
   C   s   t | ddd\}}}|jj }ddd}t|||  t|j }t||k}|d d d |f }||d |  }t	t
t||d | S )NF)full_matricescheck_finiteg     @@g    .A)fd)r   dtypecharlowernpmaxfinfoepssumZ	transpose	conjugatedot)ausZvhtfactorZcondZrank r0   _/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/sklearn/cross_decomposition/_pls.py
_pinv2_old)   s    
r2   A  ư>Fc              
      s  t | jj zt fdd|jD }W n, tk
rV } ztd|W 5 d}~X Y nX d}|dkrvt| t| }	}
t|D ]}|dkrt 	|	|}nt 	| j|t 	|| }|t 
t 	||   }t 	| |}|dkrt 	|
|}nt 	|j|t 	|j| }|r*|t 
t 	||   }t 	||t 	||   }|| }t 	|||k sp|jd dkrv q||}q~|d }||krtdt |||fS )	a?  Return the first left and right singular vectors of X'Y.

    Provides an alternative to the svd(X'Y) and uses the power method instead.
    With norm_y_weights to True and in mode A, this corresponds to the
    algorithm section 11.3 of the Wegelin's review, except this starts at the
    "update saliences" part.
    c                 3   s&   | ]}t t | kr|V  qd S N)r$   anyabs).0colr'   r0   r1   	<genexpr>H   s      z;_get_first_singular_vectors_power_method.<locals>.<genexpr>Y residual is constantNd   B   z$Maximum number of iterations reached)r$   r&   r!   r'   nextTStopIterationr2   ranger*   sqrtshapewarningswarnr   )XYmodemax_itertolnorm_y_weightsZy_scoreeZx_weights_oldZX_pinvZY_pinvi	x_weightsZx_score	y_weightsZx_weights_diffZn_iterr0   r;   r1   (_get_first_singular_vectors_power_method;   s8    "
rS   c                 C   s@   t | j|}t|dd\}}}|dddf |dddf fS )zbReturn the first left and right singular vectors of X'Y.

    Here the whole SVD is computed.
    Fr   Nr   )r$   r*   rB   r   )rI   rJ   CU_Vtr0   r0   r1   _get_first_singular_vectors_svdv   s    rY   Tc                 C   s   | j dd}| |8 } |j dd}||8 }|rr| jddd}d||dk< | | } |jddd}d||dk< || }n t| jd }t|jd }| |||||fS )z{Center X, Y and scale if the scale parameter==True

    Returns
    -------
        X, Y, x_mean, y_mean, x_std, y_std
    r   axisr@   )r[   Zddofg      ?        )ZmeanZstdr$   ZonesrF   )rI   rJ   scaleZx_meanZy_meanZx_stdZy_stdr0   r0   r1   _center_scale_xy   s    
r^   c                 C   s2   t t | }t | | }| |9 } ||9 }dS )z7Same as svd_flip but works on 1d arrays, and is inplaceN)r$   Zargmaxr8   sign)r,   vZbiggest_abs_val_idxr_   r0   r0   r1   _svd_flip_1d   s    ra   c                   @   s   e Zd ZU dZeeddddgdgeddhged	d
hgeddhgeeddddgeeddddgdgdZe	e
d< ed$ddd	dddddddZedddd Zd%ddZd&ddZd'ddZd(d d!Zd"d# ZdS ))_PLSa  Partial Least Squares (PLS)

    This class implements the generic PLS algorithm.

    Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
    with emphasis on the two-block case
    https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
    r@   Nleftclosedboolean
regression	canonicalr3   r?   r   nipalsr   n_componentsr]   deflation_moderK   	algorithmrL   rM   copy_parameter_constraintsr   Tr4   r5   )r]   rl   rK   rm   rL   rM   rn   c          	      C   s4   || _ || _|| _|| _|| _|| _|| _|| _d S r6   )rk   rl   rK   r]   rm   rL   rM   rn   )	selfrk   r]   rl   rK   rm   rL   rM   rn   r0   r0   r1   __init__   s    z_PLS.__init__Zprefer_skip_nested_validationc                 C   s
  t || | j|tj| jdd}t|dtj| jdd}|jdkrTd| _|dd}nd| _|j	d	 }|j	d }|j	d }| j
}| jd
kr|n
t|||}||krtd| d| d| jdk| _| j}t||| j\}	}
| _| _| _| _t||f| _t||f| _t||f| _t||f| _t||f| _t||f| _g | _t|
jj}t |D ]}| j!dkr(tj"t#|
d| k d	d}d|
dd|f< z$t$|	|
| j%| j&| j'|d\}}}W nP t(k
r } z0t)|dkr t*+d|  W Y 
 qW 5 d}~X Y nX | j,| n| j!dkrBt-|	|
\}}t.|| t/|	|}|rdd}nt/||}t/|
|| }t/||	t/|| }|	t0||8 }	| jdkrt/||
t/|| }|
t0||8 }
| jd
krt/||
t/|| }|
t0||8 }
|| jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< qft/| jt1t/| jj2| jdd| _3t/| jt1t/| jj2| jdd| _4t/| j3| jj2| _5| j5| j j2| _5| j| _6| j3j	d | _7| S )  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : object
            Fitted model.
        r   r!   rn   Zensure_min_samplesrJ   F
input_namer!   rn   	ensure_2dr@   Tr   rg   `n_components` upper bound is . Got   instead. Reduce `n_components`.rh   ri   
   rZ   r\   N)rK   rL   rM   rN   r=   z$Y residual is constant at iteration r   )r   )8r   _validate_datar$   float64rn   r   ndim_predict_1dreshaperF   rk   rl   min
ValueErrorZ_norm_y_weightsr^   r]   _x_mean_y_mean_x_std_y_stdZzeros
x_weights_
y_weights_	_x_scores	_y_scoresx_loadings_y_loadings_n_iter_r&   r!   r'   rD   rm   allr8   rS   rK   rL   rM   rC   strrG   rH   appendrY   ra   r*   outerr   rB   x_rotations_y_rotations_coef_
intercept__n_features_out)rp   rI   rJ   npqrk   rank_upper_boundrN   ZXkZYkZY_epskZYk_maskrQ   rR   r   rO   x_scoresZy_ssy_scoresZ
x_loadingsZ
y_loadingsr0   r0   r1   fit   s    
       



  
	z_PLS.fitc                 C   s   t |  | j||tdd}|| j8 }|| j }t|| j}|dk	rt|dd|td}|j	dkrl|
dd}|| j8 }|| j }t|| j}||fS |S )a.  Apply the dimension reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to transform.

        Y : array-like of shape (n_samples, n_targets), default=None
            Target vectors.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        x_scores, y_scores : array-like or tuple of array-like
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        Frn   r!   resetNrJ   )rv   rw   rn   r!   r@   rx   )r   r}   r   r   r   r$   r*   r   r   r   r   r   r   r   )rp   rI   rJ   rn   r   r   r0   r0   r1   	transformm  s(    

    


z_PLS.transformc                 C   s   t |  t|dtd}t|| jj}|| j9 }|| j7 }|dk	r|t|dtd}t|| j	j}|| j
9 }|| j7 }||fS |S )ae  Transform data back to its original space.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_components)
            New data, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Y : array-like of shape (n_samples, n_components)
            New target, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Returns
        -------
        X_reconstructed : ndarray of shape (n_samples, n_features)
            Return the reconstructed `X` data.

        Y_reconstructed : ndarray of shape (n_samples, n_targets)
            Return the reconstructed `X` target. Only returned when `Y` is given.

        Notes
        -----
        This transformation will only be exact if `n_components=n_features`.
        rI   )rv   r!   NrJ   )r   r   r   r$   matmulr   rB   r   r   r   r   r   )rp   rI   rJ   ZX_reconstructedZY_reconstructedr0   r0   r1   inverse_transform  s    



z_PLS.inverse_transformc                 C   sR   t |  | j||tdd}|| j8 }|| j }|| jj | j }| jrN|	 S |S )aU  Predict targets of given samples.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Returns predicted values.

        Notes
        -----
        This call requires the estimation of a matrix of shape
        `(n_features, n_targets)`, which may be an issue in high dimensional
        space.
        Fr   )
r   r}   r   r   r   r   rB   r   r   Zravel)rp   rI   rn   ZYpredr0   r0   r1   predict  s    

z_PLS.predictc                 C   s   |  ||||S )a  Learn and apply the dimension reduction on the train data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        y : array-like of shape (n_samples, n_targets), default=None
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : ndarray of shape (n_samples, n_components)
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        r   r   rp   rI   yr0   r0   r1   fit_transform  s    z_PLS.fit_transformc                 C   s
   dddS )NTF)Z
poor_scoreZ
requires_yr0   )rp   r0   r0   r1   
_more_tags  s    z_PLS._more_tags)r   )NT)N)T)N)__name__
__module____qualname____doc__r   r   r   r   ro   dict__annotations__r   rq   r   r   r   r   r   r   r   r0   r0   r0   r1   rb      s:   

 
 
'
,

rb   )	metaclassc                       s`   e Zd ZU dZejZeed< dD ]Ze	e q"dddddd fd	d
Z
 fddZ  ZS )r   a  PLS regression.

    PLSRegression is also known as PLS2 or PLS1, depending on the number of
    targets.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in :term:`fit` before applying centering,
        and potentially scaling. If `False`, these operations will be done
        inplace, modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training samples.

    y_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training targets.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_target, n_features)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSRegression
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> pls2 = PLSRegression(n_components=2)
    >>> pls2.fit(X, Y)
    PLSRegression()
    >>> Y_pred = pls2.predict(X)
    ro   rl   rK   rm   r   Tr4   r5   r]   rL   rM   rn   c             
      s    t  j||ddd|||d d S )Nrg   r3   ri   rj   superrq   rp   rk   r]   rL   rM   rn   	__class__r0   r1   rq   c  s    zPLSRegression.__init__c                    s"   t  || | j| _| j| _| S )rs   )r   r   r   Z	x_scores_r   Z	y_scores_)rp   rI   rJ   r   r0   r1   r   q  s    zPLSRegression.fit)r   )r   r   r   r   rb   ro   r   r   parampoprq   r   __classcell__r0   r0   r   r1   r     s   
b	    c                       sV   e Zd ZU dZejZeed< dD ]Ze	e q"ddddddd	 fd
dZ
  ZS )r   a  Partial Least Squares transformer and regressor.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    algorithm : {'nipals', 'svd'}, default='nipals'
        The algorithm used to estimate the first singular vectors of the
        cross-covariance matrix. 'nipals' uses the power method while 'svd'
        will compute the whole SVD.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_targets, n_features)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component. Empty if `algorithm='svd'`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    CCA : Canonical Correlation Analysis.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSCanonical
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> plsca = PLSCanonical(n_components=2)
    >>> plsca.fit(X, Y)
    PLSCanonical()
    >>> X_c, Y_c = plsca.transform(X, Y)
    ro   )rl   rK   r   Tri   r4   r5   )r]   rm   rL   rM   rn   c             
      s    t  j||dd||||d d S )Nrh   r3   rj   r   )rp   rk   r]   rm   rL   rM   rn   r   r0   r1   rq     s    
zPLSCanonical.__init__)r   r   r   r   r   rb   ro   r   r   r   r   rq   r   r0   r0   r   r1   r     s   
_ c                       sT   e Zd ZU dZejZeed< dD ]Ze	e q"dddddd fd	d
Z
  ZS )CCAat  Canonical Correlation Analysis, also known as "Mode B" PLS.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_targets, n_features)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import CCA
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> cca = CCA(n_components=1)
    >>> cca.fit(X, Y)
    CCA(n_components=1)
    >>> X_c, Y_c = cca.transform(X, Y)
    ro   r   r   Tr4   r5   r   c             
      s    t  j||ddd|||d d S )Nrh   r?   ri   rj   r   r   r   r0   r1   rq   h  s    zCCA.__init__)r   r   r0   r0   r   r1   r     s   
W    r   c                   @   sp   e Zd ZU dZeeddddgdgdgdZeed< dd
d
dddZ	e
d
ddd ZdddZdddZdS )r   a  Partial Least Square SVD.

    This transformer simply performs a SVD on the cross-covariance matrix
    `X'Y`. It is able to project both the training data `X` and the targets
    `Y`. The training data `X` is projected on the left singular vectors, while
    the targets are projected on the right singular vectors.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        The number of components to keep. Should be in `[1,
        min(n_samples, n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If `False`, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    y_weights_ : ndarray of (n_targets, n_components)
        The right singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    CCA : Canonical Correlation Analysis.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.cross_decomposition import PLSSVD
    >>> X = np.array([[0., 0., 1.],
    ...               [1., 0., 0.],
    ...               [2., 2., 2.],
    ...               [2., 5., 4.]])
    >>> Y = np.array([[0.1, -0.2],
    ...               [0.9, 1.1],
    ...               [6.2, 5.9],
    ...               [11.9, 12.3]])
    >>> pls = PLSSVD(n_components=2).fit(X, Y)
    >>> X_c, Y_c = pls.transform(X, Y)
    >>> X_c.shape, Y_c.shape
    ((4, 2), (4, 2))
    r@   Nrc   rd   rf   rk   r]   rn   ro   r   T)r]   rn   c                C   s   || _ || _|| _d S r6   r   )rp   rk   r]   rn   r0   r0   r1   rq     s    zPLSSVD.__init__rr   c           
      C   s"  t || | j|tj| jdd}t|dtj| jdd}|jdkrL|dd}| j}t	|j
d |j
d |j
d }||krtd	| d
| dt||| j\}}| _| _| _| _t|j|}t|dd\}}}|ddd|f }|d| }t||\}}|j}	|| _|	| _| jj
d | _| S )aJ  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Targets.

        Returns
        -------
        self : object
            Fitted estimator.
        r   rt   rJ   Fru   r@   rx   r   ry   rz   r{   rT   N)r   r}   r$   r~   rn   r   r   r   rk   r   rF   r   r^   r]   r   r   r   r   r*   rB   r   r   r   r   r   )
rp   rI   rJ   rk   r   rU   rV   r-   rX   Vr0   r0   r1   r     sJ    
       
  z
PLSSVD.fitc                 C   s   t |  | j|tjdd}|| j | j }t|| j}|dk	rt|ddtjd}|j	dkrh|
dd}|| j | j }t|| j}||fS |S )a	  
        Apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to be transformed.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        x_scores : array-like or tuple of array-like
            The transformed data `X_transformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        F)r!   r   NrJ   )rv   rw   r!   r@   rx   )r   r}   r$   r~   r   r   r*   r   r   r   r   r   r   r   )rp   rI   rJ   ZXrr   ZYrr   r0   r0   r1   r     s    
zPLSSVD.transformc                 C   s   |  ||||S )a  Learn and apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        out : array-like or tuple of array-like
            The transformed data `X_transformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        r   r   r0   r0   r1   r     s    zPLSSVD.fit_transform)r   )N)N)r   r   r   r   r   r   ro   r   r   rq   r   r   r   r   r0   r0   r0   r1   r   w  s   
D
6
 )r3   r4   r5   F)T)0r   rG   abcr   r   numbersr   r   numpyr$   Zscipy.linalgr   baser   r	   r
   r   r   r   
exceptionsr   utilsr   r   Zutils._param_validationr   r   Zutils.extmathr   Zutils.fixesr   r   Zutils.validationr   r   __all__r   r   r2   rS   rY   r^   ra   rb   r   r   r   r   r0   r0   r0   r1   <module>   sR    
       
;



  T  k