U
    3d                  
   @   s  d Z ddlmZmZ ddlZddlmZmZ ddlZ	ddl
mZ ddlmZmZmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddlmZ ddlmZmZ ddlmZmZ ddl m!Z! dddgZ"eedkrddl
m#Z$ nddl
m$Z$ dd Z%d.ddZ&dd  Z'd/d"d#Z(d$d% Z)G d&d' d'eeeeeed(Z*G d)d de*Z+G d*d de*Z,G d+d, d,e*Z-G d-d deeeZ.dS )0zG
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
    )IntegralRealN)ABCMetaabstractmethod)svd   )BaseEstimatorRegressorMixinTransformerMixin)MultiOutputMixin)ClassNamePrefixFeaturesOutMixin)check_arraycheck_consistent_length)
sp_version)parse_version)svd_flip)check_is_fittedFLOAT_DTYPES)Interval
StrOptions)ConvergenceWarningPLSCanonicalPLSRegressionPLSSVDz1.7)pinv)pinv2c              
   C   s   t | ddd\}}}|jj }ddd}t|||  t|j }t||k}|d d d |f }||d |  }t	t
t||d | S )NF)full_matricescheck_finiteg     @@g    .A)fd)r   dtypecharlowernpmaxfinfoepssumZ	transpose	conjugatedot)ausZvhtZfactorZcondZrank r.   D/tmp/pip-unpacked-wheel-zrfo1fqw/sklearn/cross_decomposition/_pls.py
_pinv2_old&   s    
r0   A  ư>Fc              
      s  t | jj zt fdd|jD }W n, tk
rV } ztd|W 5 d}~X Y nX d}|dkrvt| t| }	}
t|D ]}|dkrt 	|	|}nt 	| j|t 	|| }|t 
t 	||   }t 	| |}|dkrt 	|
|}nt 	|j|t 	|j| }|r*|t 
t 	||   }t 	||t 	||   }|| }t 	|||k sp|jd dkrv q||}q~|d }||krtdt |||fS )	a?  Return the first left and right singular vectors of X'Y.

    Provides an alternative to the svd(X'Y) and uses the power method instead.
    With norm_y_weights to True and in mode A, this corresponds to the
    algorithm section 11.3 of the Wegelin's review, except this starts at the
    "update saliences" part.
    c                 3   s&   | ]}t t | kr|V  qd S N)r#   anyabs).0colr&   r.   r/   	<genexpr>E   s      z;_get_first_singular_vectors_power_method.<locals>.<genexpr>Y residual is constantNd   B   z$Maximum number of iterations reached)r#   r%   r    r&   nextTStopIterationr0   ranger)   sqrtshapewarningswarnr   )XYmodemax_itertolnorm_y_weightsZy_scoreeZx_weights_oldZX_pinvZY_pinvi	x_weightsZx_score	y_weightsZx_weights_diffZn_iterr.   r9   r/   (_get_first_singular_vectors_power_method8   s8    "
rQ   c                 C   s@   t | j|}t|dd\}}}|dddf |dddf fS )zbReturn the first left and right singular vectors of X'Y.

    Here the whole SVD is computed.
    Fr   Nr   )r#   r)   r@   r   )rG   rH   CU_Vtr.   r.   r/   _get_first_singular_vectors_svds   s    rW   Tc                 C   s   | j dd}| |8 } |j dd}||8 }|rr| jddd}d||dk< | | } |jddd}d||dk< || }n t| jd }t|jd }| |||||fS )z{Center X, Y and scale if the scale parameter==True

    Returns
    -------
        X, Y, x_mean, y_mean, x_std, y_std
    r   axisr>   )rY   Zddofg      ?        )ZmeanZstdr#   ZonesrD   )rG   rH   scaleZx_meanZy_meanZx_stdZy_stdr.   r.   r/   _center_scale_xy}   s    
r\   c                 C   s2   t t | }t | | }| |9 } ||9 }dS )z7Same as svd_flip but works on 1d arrays, and is inplaceN)r#   Zargmaxr6   sign)r+   vZbiggest_abs_val_idxr]   r.   r.   r/   _svd_flip_1d   s    r_   c                   @   s   e Zd ZU dZeeddddgdgeddhged	d
hgeddhgeeddddgeeddddgdgdZe	e
d< ed%ddd	dddddddZdd Zd&ddZd'ddZd(ddZd)dd Zed!d" Zd#d$ ZdS )*_PLSa  Partial Least Squares (PLS)

    This class implements the generic PLS algorithm.

    Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
    with emphasis on the two-block case
    https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
    r>   Nleftclosedboolean
regression	canonicalr1   r=   r   nipalsr   n_componentsr[   deflation_moderI   	algorithmrJ   rK   copy_parameter_constraintsr   Tr2   r3   )r[   rj   rI   rk   rJ   rK   rl   c          	      C   s4   || _ || _|| _|| _|| _|| _|| _|| _d S r4   )ri   rj   rI   r[   rk   rJ   rK   rl   )	selfri   r[   rj   rI   rk   rJ   rK   rl   r.   r.   r/   __init__   s    z_PLS.__init__c                 C   s  |    t|| | j|tj| jdd}t|dtj| jdd}|jdkrT|dd}|j	d }|j	d }|j	d }| j
}| jd	kr|n
t|||}||krtd
| d| d| jdk| _| j}t||| j\}	}
| _| _| _| _t||f| _t||f| _t||f| _t||f| _t||f| _t||f| _g | _t|
jj}t |D ]}| j!dkr"tj"t#|
d| k dd}d|
dd|f< z$t$|	|
| j%| j&| j'|d\}}}W nP t(k
r } z0t)|dkr t*+d|  W Y 
 qzW 5 d}~X Y nX | j,| n| j!dkr<t-|	|
\}}t.|| t/|	|}|r^d}nt/||}t/|
|| }t/||	t/|| }|	t0||8 }	| jdkrt/||
t/|| }|
t0||8 }
| jd	kr
t/||
t/|| }|
t0||8 }
|| jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< || jdd|f< q`t/| jt1t/| jj2| jdd| _3t/| jt1t/| jj2| jdd| _4t/| j3| jj2| _5| j5| j j2| _5| j| _6| j3j	d | _7| S )  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : object
            Fitted model.
        r   r    rl   Zensure_min_samplesrH   F
input_namer    rl   	ensure_2dr>   r   re   `n_components` upper bound is . Got   instead. Reduce `n_components`.rf   rg   
   rX   rZ   N)rI   rJ   rK   rL   r;   z$Y residual is constant at iteration r   )r   )8_validate_paramsr   _validate_datar#   float64rl   r   ndimreshaperD   ri   rj   min
ValueErrorZ_norm_y_weightsr\   r[   _x_mean_y_mean_x_std_y_stdzeros
x_weights_
y_weights_	_x_scores	_y_scoresx_loadings_y_loadings_n_iter_r%   r    r&   rB   rk   allr6   rQ   rI   rJ   rK   rA   strrE   rF   appendrW   r_   r)   outerr   r@   x_rotations_y_rotations__coef_
intercept__n_features_out)rn   rG   rH   npqri   rank_upper_boundrL   ZXkZYkZY_epskZYk_maskrO   rP   r   rM   x_scoresZy_ssy_scoresZ
x_loadingsZ
y_loadingsr.   r.   r/   fit   s    
       



  
	z_PLS.fitc                 C   s   t |  | j||tdd}|| j8 }|| j }t|| j}|dk	rt|dd|td}|j	dkrl|
dd}|| j8 }|| j }t|| j}||fS |S )a.  Apply the dimension reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to transform.

        Y : array-like of shape (n_samples, n_targets), default=None
            Target vectors.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        x_scores, y_scores : array-like or tuple of array-like
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        Frl   r    resetNrH   )rs   rt   rl   r    r>   ru   )r   r{   r   r   r   r#   r)   r   r   r}   r~   r   r   r   )rn   rG   rH   rl   r   r   r.   r.   r/   	transformi  s(    

    


z_PLS.transformc                 C   s   t |  t|dtd}t|| jj}|| j9 }|| j7 }|dk	r|t|dtd}t|| j	j}|| j
9 }|| j7 }||fS |S )ae  Transform data back to its original space.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_components)
            New data, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Y : array-like of shape (n_samples, n_components)
            New target, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Returns
        -------
        X_reconstructed : ndarray of shape (n_samples, n_features)
            Return the reconstructed `X` data.

        Y_reconstructed : ndarray of shape (n_samples, n_targets)
            Return the reconstructed `X` target. Only returned when `Y` is given.

        Notes
        -----
        This transformation will only be exact if `n_components=n_features`.
        rG   )rs   r    NrH   )r   r   r   r#   matmulr   r@   r   r   r   r   r   )rn   rG   rH   ZX_reconstructedZY_reconstructedr.   r.   r/   inverse_transform  s    



z_PLS.inverse_transformc                 C   sD   t |  | j||tdd}|| j8 }|| j }|| jj }|| j S )aU  Predict targets of given samples.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples.

        copy : bool, default=True
            Whether to copy `X` and `Y`, or perform in-place normalization.

        Returns
        -------
        y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Returns predicted values.

        Notes
        -----
        This call requires the estimation of a matrix of shape
        `(n_features, n_targets)`, which may be an issue in high dimensional
        space.
        Fr   )r   r{   r   r   r   r   r@   r   )rn   rG   rl   ZYpredr.   r.   r/   predict  s    

z_PLS.predictc                 C   s   |  ||||S )a  Learn and apply the dimension reduction on the train data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        y : array-like of shape (n_samples, n_targets), default=None
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : ndarray of shape (n_samples, n_components)
            Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
        r   r   rn   rG   yr.   r.   r/   fit_transform  s    z_PLS.fit_transformc                 C   s0   t | dr(t| ddr(tdt d| _| jjS )z%The coefficients of the linear model.r   _coef_warningTzThe attribute `coef_` will be transposed in version 1.3 to be consistent with other linear models in scikit-learn. Currently, `coef_` has a shape of (n_features, n_targets) and in the future it will have a shape of (n_targets, n_features).F)hasattrgetattrrE   rF   FutureWarningr   r   r@   rn   r.   r.   r/   coef_  s    z
_PLS.coef_c                 C   s
   dddS )NTF)Z
poor_scoreZ
requires_yr.   r   r.   r.   r/   
_more_tags  s    z_PLS._more_tags)r   )NT)N)T)N)__name__
__module____qualname____doc__r   r   r   r   rm   dict__annotations__r   ro   r   r   r   r   r   propertyr   r   r.   r.   r.   r/   r`      s<   

  
'
,


r`   )	metaclassc                       s`   e Zd ZU dZejZeed< dD ]Ze	e q"dddddd fd	d
Z
 fddZ  ZS )r   a  PLS regression.

    PLSRegression is also known as PLS2 or PLS1, depending on the number of
    targets.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in :term:`fit` before applying centering,
        and potentially scaling. If `False`, these operations will be done
        inplace, modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training samples.

    y_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training targets.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSRegression
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> pls2 = PLSRegression(n_components=2)
    >>> pls2.fit(X, Y)
    PLSRegression()
    >>> Y_pred = pls2.predict(X)
    rm   rj   rI   rk   r   Tr2   r3   r[   rJ   rK   rl   c             
      s    t  j||ddd|||d d S )Nre   r1   rg   rh   superro   rn   ri   r[   rJ   rK   rl   	__class__r.   r/   ro   t  s    zPLSRegression.__init__c                    s"   t  || | j| _| j| _| S )rp   )r   r   r   Z	x_scores_r   Z	y_scores_)rn   rG   rH   r   r.   r/   r     s    zPLSRegression.fit)r   )r   r   r   r   r`   rm   r   r   parampopro   r   __classcell__r.   r.   r   r/   r     s   
b	    c                       sV   e Zd ZU dZejZeed< dD ]Ze	e q"ddddddd	 fd
dZ
  ZS )r   a  Partial Least Squares transformer and regressor.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    algorithm : {'nipals', 'svd'}, default='nipals'
        The algorithm used to estimate the first singular vectors of the
        cross-covariance matrix. 'nipals' uses the power method while 'svd'
        will compute the whole SVD.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component. Empty if `algorithm='svd'`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    CCA : Canonical Correlation Analysis.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSCanonical
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> plsca = PLSCanonical(n_components=2)
    >>> plsca.fit(X, Y)
    PLSCanonical()
    >>> X_c, Y_c = plsca.transform(X, Y)
    rm   )rj   rI   r   Trg   r2   r3   )r[   rk   rJ   rK   rl   c             
      s    t  j||dd||||d d S )Nrf   r1   rh   r   )rn   ri   r[   rk   rJ   rK   rl   r   r.   r/   ro     s    
zPLSCanonical.__init__)r   r   r   r   r   r`   rm   r   r   r   r   ro   r   r.   r.   r   r/   r     s   
_ c                       sT   e Zd ZU dZejZeed< dD ]Ze	e q"dddddd fd	d
Z
  ZS )CCAap  Canonical Correlation Analysis, also known as "Mode B" PLS.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `Y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `Y`.

    coef_ : ndarray of shape (n_features, n_targets)
        The coefficients of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `Y` is approximated as
        `Y = X @ coef_ + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import CCA
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> cca = CCA(n_components=1)
    >>> cca.fit(X, Y)
    CCA(n_components=1)
    >>> X_c, Y_c = cca.transform(X, Y)
    rm   r   r   Tr2   r3   r   c             
      s    t  j||ddd|||d d S )Nrf   r=   rg   rh   r   r   r   r.   r/   ro   y  s    zCCA.__init__)r   r   r.   r.   r   r/   r     s   
W    r   c                   @   sf   e Zd ZU dZeeddddgdgdgdZeed< dd
d
dddZ	dd Z
dddZdddZdS )r   a  Partial Least Square SVD.

    This transformer simply performs a SVD on the cross-covariance matrix
    `X'Y`. It is able to project both the training data `X` and the targets
    `Y`. The training data `X` is projected on the left singular vectors, while
    the targets are projected on the right singular vectors.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        The number of components to keep. Should be in `[1,
        min(n_samples, n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `Y`.

    copy : bool, default=True
        Whether to copy `X` and `Y` in fit before applying centering, and
        potentially scaling. If `False`, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    y_weights_ : ndarray of (n_targets, n_components)
        The right singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    CCA : Canonical Correlation Analysis.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.cross_decomposition import PLSSVD
    >>> X = np.array([[0., 0., 1.],
    ...               [1., 0., 0.],
    ...               [2., 2., 2.],
    ...               [2., 5., 4.]])
    >>> Y = np.array([[0.1, -0.2],
    ...               [0.9, 1.1],
    ...               [6.2, 5.9],
    ...               [11.9, 12.3]])
    >>> pls = PLSSVD(n_components=2).fit(X, Y)
    >>> X_c, Y_c = pls.transform(X, Y)
    >>> X_c.shape, Y_c.shape
    ((4, 2), (4, 2))
    r>   Nra   rb   rd   ri   r[   rl   rm   r   T)r[   rl   c                C   s   || _ || _|| _d S r4   r   )rn   ri   r[   rl   r.   r.   r/   ro     s    zPLSSVD.__init__c           
      C   s*  |    t|| | j|tj| jdd}t|dtj| jdd}|jdkrT|dd}| j	}t
|jd |jd |jd }||krtd	| d
| dt||| j\}}| _| _| _| _t|j|}t|dd\}}}|ddd|f }|d| }t||\}}|j}	|| _|	| _| jjd | _| S )aJ  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Targets.

        Returns
        -------
        self : object
            Fitted estimator.
        r   rq   rH   Frr   r>   ru   r   rv   rw   rx   rR   N)rz   r   r{   r#   r|   rl   r   r}   r~   ri   r   rD   r   r\   r[   r   r   r   r   r)   r@   r   r   r   r   r   )
rn   rG   rH   ri   r   rS   rT   r,   rV   Vr.   r.   r/   r     sL    
       
  z
PLSSVD.fitc                 C   s   t |  | j|tjdd}|| j | j }t|| j}|dk	rt|ddtjd}|j	dkrh|
dd}|| j | j }t|| j}||fS |S )a	  
        Apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to be transformed.

        Y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        x_scores : array-like or tuple of array-like
            The transformed data `X_transformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        F)r    r   NrH   )rs   rt   r    r>   ru   )r   r{   r#   r|   r   r   r)   r   r   r}   r~   r   r   r   )rn   rG   rH   ZXrr   ZYrr   r.   r.   r/   r     s    
zPLSSVD.transformc                 C   s   |  ||||S )a  Learn and apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        out : array-like or tuple of array-like
            The transformed data `X_transformed` if `Y is not None`,
            `(X_transformed, Y_transformed)` otherwise.
        r   r   r.   r.   r/   r   /  s    zPLSSVD.fit_transform)r   )N)N)r   r   r   r   r   r   rm   r   r   ro   r   r   r   r.   r.   r.   r/   r     s   
D8
 )r1   r2   r3   F)T)/r   Znumbersr   r   rE   abcr   r   Znumpyr#   Zscipy.linalgr   baser   r	   r
   r   r   utilsr   r   Zutils.fixesr   r   Zutils.extmathr   Zutils.validationr   r   Zutils._param_validationr   r   
exceptionsr   __all__r   r   r0   rQ   rW   r\   r_   r`   r   r   r   r   r.   r.   r.   r/   <module>   sX   
       
;



  h  k