U
    3‰d1  ã                   @   sh   d Z ddlZddlZddlmZmZ ddlmZ G dd„ de	ƒZ
d	d
„ Zdd„ Zddd„Zddd„ZdS )a’  
Our own implementation of the Newton algorithm

Unlike the scipy.optimize version, this version of the Newton conjugate
gradient solver uses only one function call to retrieve the
func value, the gradient value and a callable for the Hessian matvec
product. If the function call is very expensive (e.g. for logistic
regression with large design matrix), this approach gives very
significant speedups.
é    Né   )Úline_search_wolfe1Úline_search_wolfe2é   )ÚConvergenceWarningc                   @   s   e Zd ZdS )Ú_LineSearchErrorN)Ú__name__Ú
__module__Ú__qualname__© r   r   ú:/tmp/pip-unpacked-wheel-zrfo1fqw/sklearn/utils/optimize.pyr      s   r   c           	      K   sR   t | ||||||f|Ž}|d dkr<t| ||||||f|Ž}|d dkrNtƒ ‚|S )a  
    Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
    suitable step length is not found, and raise an exception if a
    suitable step length is not found.

    Raises
    ------
    _LineSearchError
        If no suitable step size is found.

    r   N)r   r   r   )	ÚfZfprimeÚxkÚpkZgfkÚold_fvalÚold_old_fvalÚkwargsÚretr   r   r   Ú_line_search_wolfe12   s           ÿÿr   c                 C   s  t jt|ƒ|jd}|}| }d}t  ||¡}||kr
t  t  |¡¡|krPq
| |ƒ}	t  ||	¡}
d|
  kr†dt  t j¡j	 krn nq
n*|
dk rº|dkr¦q
n|||
 | 7 }q
||
 }||| 7 }|||	  }t  ||¡}|| }| ||  }|d }|}q.|S )aP  
    Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
    with a conjugate gradient descent.

    Parameters
    ----------
    fhess_p : callable
        Function that takes the gradient as a parameter and returns the
        matrix product of the Hessian and gradient.

    fgrad : ndarray of shape (n_features,) or (n_features + 1,)
        Gradient vector.

    maxiter : int
        Number of CG iterations.

    tol : float
        Stopping criterion.

    Returns
    -------
    xsupi : ndarray of shape (n_features,) or (n_features + 1,)
        Estimated solution.
    )Údtyper   é   r   )
ÚnpÚzerosÚlenr   ÚdotÚsumÚabsZfinfoZfloat64Zeps)Úfhess_pÚfgradÚmaxiterÚtolÚxsupiÚriZpsupiÚiZdri0ZApZcurvZalphaiZdri1Zbetair   r   r   Ú_cg5   s4    
&r$   r   ç-Cëâ6?éd   éÈ   Tc
              
   C   s"  t  |¡ ¡ }|}
d}|r,||f|žŽ }d}||k rþ| |
f|žŽ \}}t  |¡}t  |¡|kr`qþt  |¡}tdt  |¡gƒ}|| }t||||d}d}|rèz(t	|||
|||||d\}}}}}}W n" t
k
ræ   t d¡ Y qþY nX |
||  }
|d7 }q,|	r||krt d	t¡ |
|fS )
aŒ  
    Minimization of scalar function of one or more variables using the
    Newton-CG algorithm.

    Parameters
    ----------
    grad_hess : callable
        Should return the gradient and a callable returning the matvec product
        of the Hessian.

    func : callable
        Should return the value of the function.

    grad : callable
        Should return the function value and the gradient. This is used
        by the linesearch functions.

    x0 : array of float
        Initial guess.

    args : tuple, default=()
        Arguments passed to func_grad_hess, func and grad.

    tol : float, default=1e-4
        Stopping criterion. The iteration will stop when
        ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    maxiter : int, default=100
        Number of Newton iterations.

    maxinner : int, default=200
        Number of CG iterations.

    line_search : bool, default=True
        Whether to use a line search or not.

    warn : bool, default=True
        Whether to warn when didn't converge.

    Returns
    -------
    xk : ndarray of float
        Estimated minimum.
    r   Ng      à?)r   r    g      ð?)ÚargszLine Search failedr   z@newton-cg failed to converge. Increase the number of iterations.)r   ZasarrayÚflattenr   Úmaxr   ÚminÚsqrtr$   r   r   ÚwarningsÚwarnr   )Z	grad_hessÚfuncZgradZx0r(   r    r   ZmaxinnerZline_searchr.   r   Úkr   r   r   r   ZabsgradZmaggradÚetaZtermcondr!   ZalphakZfcÚgcZgfkp1r   r   r   Ú
_newton_cgp   sN    9

       ÿ


þr3   c                 C   s˜   | dkr|j dkrrz|j d¡}W n tk
r<   |j}Y nX d | |j |¡}|dk	rb|d| 7 }tj|tdd |dk	rˆt|j	|ƒ}q”|j	}nt
‚|S )	aÖ  Check the OptimizeResult for successful convergence

    Parameters
    ----------
    solver : str
       Solver name. Currently only `lbfgs` is supported.

    result : OptimizeResult
       Result of the scipy.optimize.minimize function.

    max_iter : int, default=None
       Expected maximum number of iterations.

    extra_warning_msg : str, default=None
        Extra warning message.

    Returns
    -------
    n_iter : int
       Number of iterations.
    Zlbfgsr   Úlatin1z²{} failed to converge (status={}):
{}.

Increase the number of iterations (max_iter) or scale the data as shown in:
    https://scikit-learn.org/stable/modules/preprocessing.htmlNÚ
r   )Ú
stacklevel)ÚstatusÚmessageÚdecodeÚAttributeErrorÚformatr-   r.   r   r+   ZnitÚNotImplementedError)ZsolverÚresultZmax_iterZextra_warning_msgZresult_messageZwarning_msgZn_iter_ir   r   r   Ú_check_optimize_resultÙ   s(    
ÿ  úr>   )r   r%   r&   r'   TT)NN)Ú__doc__Znumpyr   r-   Úfixesr   r   Ú
exceptionsr   ÚRuntimeErrorr   r   r$   r3   r>   r   r   r   r   Ú<module>   s   @      ö
i