U
    /d13                     @   s   d Z ddlmZmZmZmZ ddlmZ ddlZ	ddlZ	zddl
mZ dZW n  ek
rl   ddlZdZY nX ddlZddlmZ d	d
gZdd	 Zdd Zdd Zdd Zdd Zddd
ZdS )z1Basic linear factorizations needed by the solver.    )bmat
csc_matrixeyeissparse)LinearOperatorNcholesky_AAtTF)warnorthogonalityprojectionsc                 C   sn   t j|}t| r(tjjj| dd}nt jj| dd}|dksH|dkrLdS t j| |}|||  }|S )a  Measure orthogonality between a vector and the null space of a matrix.

    Compute a measure of orthogonality between the null space
    of the (possibly sparse) matrix ``A`` and a given vector ``g``.

    The formula is a simplified (and cheaper) version of formula (3.13)
    from [1]_.
    ``orth =  norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.

    References
    ----------
    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
           "On the solution of equality constrained quadratic
            programming problems arising in optimization."
            SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
    Zfro)ordr   )nplinalgnormr   scipysparsedot)AgZnorm_gZnorm_AZnorm_A_gZorth r   R/tmp/pip-unpacked-wheel-9gxwnfpp/scipy/optimize/_trustregion_constr/projections.pyr
      s    c           	         s@   t   fdd} fdd} fdd}|||fS )zLReturn linear operators for matrix A using ``NormalEquation`` approach.
    c                    sf     | }|  j | }d}t |krb|kr:qb  |}| j | }|d7 }q"|S Nr      r   Tr
   )xvzkr   factor	max_refinorth_tolr   r   
null_space@   s    
z/normal_equation_projections.<locals>.null_spacec                    s     | S Nr   r   r   r    r   r   least_squaresR   s    z2normal_equation_projections.<locals>.least_squaresc                    s    j | S r$   r   r   r&   r'   r   r   	row_spaceV   s    z.normal_equation_projections.<locals>.row_spacer   	r   mnr"   r!   tolr#   r(   r*   r   r   r   normal_equation_projections9   s
    r/   c           	   
      s   t tt jg dggztjjW n2 tk
rb   t	d t
  | Y S X  fdd}fdd}fdd}|||fS )	z;Return linear operators for matrix A - ``AugmentedSystem``.NzVSingular Jacobian matrix. Using dense SVD decomposition to perform the factorizations.c                    s|   t | t g}|}|d  }d}t |krx|krDqx|| }|}||7 }|d  }|d7 }q,|S r   )r   hstackzerosr
   r   )r   r   lu_solr   r   Znew_vZ	lu_updater   Kr,   r!   r-   r"   solver   r   r#   q   s    
z0augmented_system_projections.<locals>.null_spacec                    s,   t | t  g}|}|   S r$   r   r0   r1   r   r   r2   )r,   r-   r5   r   r   r(      s    z3augmented_system_projections.<locals>.least_squaresc                    s(   t t  | g}|}|d   S r$   r6   r7   )r-   r5   r   r   r*      s    z/augmented_system_projections.<locals>.row_space)r   r   r   r   r   r   r   Z
factorizedRuntimeErrorr	   svd_factorization_projectionsZtoarrayr+   r   r3   r   augmented_system_projections\   s        
"
r:   c           	         s   t jj jddd\tjdddf tj|k rTtd t ||S  fdd}fd	d
}fdd}|||fS )zMReturn linear operators for matrix A using ``QRFactorization`` approach.
    TZeconomic)ZpivotingmodeNzPSingular Jacobian matrix. Using SVD decomposition to perform the factorizations.c                    s   j | }tjj|dd}t}||< |  j | }d}t |kr|kr\qj |}tjj|dd}||< | j | }|d7 }qD|S )NFlowerr   r   )r   r   r   r   solve_triangularr   r1   r
   r   aux1aux2r   r   r   r   PQRr,   r!   r"   r   r   r#      s    

z0qr_factorization_projections.<locals>.null_spacec                    s4   j | }tjj|dd}t}|| < |S )NFr=   )r   r   r   r   r?   r   r1   r   rA   rB   r   )rD   rE   rF   r,   r   r   r(      s
    
z3qr_factorization_projections.<locals>.least_squaresc                    s*   |   }t jj|ddd}|}|S )NFr   )r>   Ztrans)r   r   r?   r   rG   )rD   rE   rF   r   r   r*      s    

z/qr_factorization_projections.<locals>.row_space)	r   r   Zqrr   r   r   infr	   r9   r+   r   rC   r   qr_factorization_projections   s     		rI   c           	         s   t jj dd\dd|kf |kddf |k  fdd}fdd}fdd	}|||fS )
zNReturn linear operators for matrix A using ``SVDFactorization`` approach.
    F)Zfull_matricesNc                    s    | }d | } |}|  j | }d}t |kr|krLq |}d | } |}| j | }|d7 }q4|S )Nr   r   r   r@   r   UVtr!   r"   sr   r   r#      s    




z1svd_factorization_projections.<locals>.null_spacec                    s$    | }d | }  |}|S Nr   r%   rG   rK   rL   rM   r   r   r(     s    

z4svd_factorization_projections.<locals>.least_squaresc                    s(    j | }d | }j |}|S rN   r)   rG   rO   r   r   r*     s    z0svd_factorization_projections.<locals>.row_space)r   r   Zsvdr+   r   rJ   r   r9      s    r9   -q=   V瞯<c                 C   s<  t | \}}|| dkr"t| } t| rd|dkr6d}|dkrFtd|dkrtstdt d}n|dkrpd}|d	krtd
|dkrt	| |||||\}}}	nf|dkrt
| |||||\}}}	nD|dkrt| |||||\}}}	n"|dkrt| |||||\}}}	t||f|}
t||f|}t||f|	}|
||fS )a  Return three linear operators related with a given matrix A.

    Parameters
    ----------
    A : sparse matrix (or ndarray), shape (m, n)
        Matrix ``A`` used in the projection.
    method : string, optional
        Method used for compute the given linear
        operators. Should be one of:

            - 'NormalEquation': The operators
               will be computed using the
               so-called normal equation approach
               explained in [1]_. In order to do
               so the Cholesky factorization of
               ``(A A.T)`` is computed. Exclusive
               for sparse matrices.
            - 'AugmentedSystem': The operators
               will be computed using the
               so-called augmented system approach
               explained in [1]_. Exclusive
               for sparse matrices.
            - 'QRFactorization': Compute projections
               using QR factorization. Exclusive for
               dense matrices.
            - 'SVDFactorization': Compute projections
               using SVD factorization. Exclusive for
               dense matrices.

    orth_tol : float, optional
        Tolerance for iterative refinements.
    max_refin : int, optional
        Maximum number of iterative refinements.
    tol : float, optional
        Tolerance for singular values.

    Returns
    -------
    Z : LinearOperator, shape (n, n)
        Null-space operator. For a given vector ``x``,
        the null space operator is equivalent to apply
        a projection matrix ``P = I - A.T inv(A A.T) A``
        to the vector. It can be shown that this is
        equivalent to project ``x`` into the null space
        of A.
    LS : LinearOperator, shape (m, n)
        Least-squares operator. For a given vector ``x``,
        the least-squares operator is equivalent to apply a
        pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
        to the vector. It can be shown that this vector
        ``pinv(A.T) x`` is the least_square solution to
        ``A.T y = x``.
    Y : LinearOperator, shape (n, m)
        Row-space operator. For a given vector ``x``,
        the row-space operator is equivalent to apply a
        projection matrix ``Q = A.T inv(A A.T)``
        to the vector.  It can be shown that this
        vector ``y = Q x``  the minimum norm solution
        of ``A y = x``.

    Notes
    -----
    Uses iterative refinements described in [1]
    during the computation of ``Z`` in order to
    cope with the possibility of large roundoff errors.

    References
    ----------
    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
        "On the solution of equality constrained quadratic
        programming problems arising in optimization."
        SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
    r   NAugmentedSystem)NormalEquationrS   z%Method not allowed for sparse matrix.rT   zmOnly accepts 'NormalEquation' option when scikit-sparse is available. Using 'AugmentedSystem' option instead.QRFactorization)rU   SVDFactorizationz#Method not allowed for dense array.rV   )r   shaper   r   
ValueErrorsksparse_availablewarningsr	   ImportWarningr/   r:   rI   r9   r   )r   methodr"   r!   r.   r,   r-   r#   r(   r*   ZZLSYr   r   r   r   !  sB    J



)NrP   rQ   rR   )__doc__Zscipy.sparser   r   r   r   Zscipy.sparse.linalgr   Zscipy.linalgr   Zsksparse.cholmodr   rY   ImportErrorrZ   Znumpyr   r	   __all__r
   r/   r:   rI   r9   r   r   r   r   r   <module>   s*   
##R=6