U
    -eyn                  	   @   s   d Z ddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
mZmZmZ ddlmZmZ d	d
 Ze dd Zdd Zdd Zd ddZddddej ejfdddi f	ddZdd Zdd Zdd Zej ejfdi fddZdS )!z'Routines for numerical differentiation.    N)norm)LinearOperator   )issparse
csc_matrix
csr_matrix
coo_matrixfind   )group_densegroup_sparsec                 C   s  |dkrt j|td}n*|dkr:t |}t j|td}ntdt |t j k|t jk@ rf||fS || }| }| | }	||  }
|dkr| | }||k ||kB }t |t 	|	|
k}|||@   d9  < |
|	k| @ }|
| | ||< |
|	k | @ }|	|  | ||< n|dkr|	|k|
|k@ }|
|	k| @ }t 
|| d|
|  | ||< d||< |
|	k | @ }t 
|| d|	|  |  ||< d||< t 
|
|	| }| t ||k@ }|| ||< d||< ||fS )	a  Adjust final difference scheme to the presence of bounds.

    Parameters
    ----------
    x0 : ndarray, shape (n,)
        Point at which we wish to estimate derivative.
    h : ndarray, shape (n,)
        Desired absolute finite difference steps.
    num_steps : int
        Number of `h` steps in one direction required to implement finite
        difference scheme. For example, 2 means that we need to evaluate
        f(x0 + 2 * h) or f(x0 - 2 * h)
    scheme : {'1-sided', '2-sided'}
        Whether steps in one or both directions are required. In other
        words '1-sided' applies to forward and backward schemes, '2-sided'
        applies to center schemes.
    lb : ndarray, shape (n,)
        Lower bounds on independent variables.
    ub : ndarray, shape (n,)
        Upper bounds on independent variables.

    Returns
    -------
    h_adjusted : ndarray, shape (n,)
        Adjusted absolute step sizes. Step size decreases only if a sign flip
        or switching to one-sided scheme doesn't allow to take a full step.
    use_one_sided : ndarray of bool, shape (n,)
        Whether to switch to one-sided scheme. Informative only for
        ``scheme='2-sided'``.
    1-sideddtype2-sidedz(`scheme` must be '1-sided' or '2-sided'.      ?TF)npZ	ones_likeboolabs
zeros_like
ValueErrorallinfcopymaximumminimum)x0hZ	num_stepsschemelbubuse_one_sidedZh_totalZ
h_adjustedZ
lower_distZ
upper_distxZviolatedZfittingforwardZbackwardZcentralZmin_distZadjusted_central r%   X/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/scipy/optimize/_numdiff.py_adjust_scheme_to_bounds   sP    


  
r'   c                 C   s   t t jj}d}t | t jr<t | j}t | j}d}t |t jrnt |j}|rn||k rnt |j}|dkr~|d S |dkr|d S tddS )	a  
    Calculates relative EPS step to use for a given data type
    and numdiff step method.

    Progressively smaller steps are used for larger floating point types.

    Parameters
    ----------
    f0_dtype: np.dtype
        dtype of function evaluation

    x0_dtype: np.dtype
        dtype of parameter vector

    method: {'2-point', '3-point', 'cs'}

    Returns
    -------
    EPS: float
        relative step size. May be np.float16, np.float32, np.float64

    Notes
    -----
    The default relative step will be np.float64. However, if x0 or f0 are
    smaller floating point types (np.float16, np.float32), then the smallest
    floating point type is chosen.
    FT)2-pointcsr   )3-pointgUUUUUU?zBUnknown step method, should be one of {'2-point', '3-point', 'cs'}N)	r   ZfinfoZfloat64ZepsZ
issubdtypeZinexactr   itemsizeRuntimeError)Zx0_dtypeZf0_dtypemethodZEPSZx0_is_fpZx0_itemsizeZf0_itemsizer%   r%   r&   _eps_for_methodZ   s    r.   c              
   C   s   |dk td d }t|j|j|}| dkrJ|| tdt| }nF| | t| }|| | }t|dk|| tdt| |}|S )az  
    Computes an absolute step from a relative step for finite difference
    calculation.

    Parameters
    ----------
    rel_step: None or array-like
        Relative step for the finite difference calculation
    x0 : np.ndarray
        Parameter vector
    f0 : np.ndarray or scalar
    method : {'2-point', '3-point', 'cs'}

    Returns
    -------
    h : float
        The absolute step size

    Notes
    -----
    `h` will always be np.float64. However, if `x0` or `f0` are
    smaller floating point dtypes (e.g. np.float32), then the absolute
    step size will be calculated from the smallest floating point size.
    r   r   r
   N      ?)astypefloatr.   r   r   r   r   where)rel_stepr   f0r-   sign_x0Zrstepabs_stepdxr%   r%   r&   _compute_absolute_step   s    
r8   c                 C   sJ   dd | D \}}|j dkr*t||j}|j dkrBt||j}||fS )aa  
    Prepares new-style bounds from a two-tuple specifying the lower and upper
    limits for values in x0. If a value is not bound then the lower/upper bound
    will be expected to be -np.inf/np.inf.

    Examples
    --------
    >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
    (array([0., 1., 2.]), array([ 1.,  2., inf]))
    c                 S   s   g | ]}t j|td qS )r   )r   asarrayr1   ).0br%   r%   r&   
<listcomp>   s     z#_prepare_bounds.<locals>.<listcomp>r   )ndimr   resizeshape)boundsr   r    r!   r%   r%   r&   _prepare_bounds   s    

rA   c                 C   s   t | rt| } nt| } | dktj} | jdkr>td| j\}}|dksZt	|rrtj
|}||}nt|}|j|fkrtd| dd|f } t | rt||| j| j}nt||| }| ||< |S )a  Group columns of a 2-D matrix for sparse finite differencing [1]_.

    Two columns are in the same group if in each row at least one of them
    has zero. A greedy sequential algorithm is used to construct groups.

    Parameters
    ----------
    A : array_like or sparse matrix, shape (m, n)
        Matrix of which to group columns.
    order : int, iterable of int with shape (n,) or None
        Permutation array which defines the order of columns enumeration.
        If int or None, a random permutation is used with `order` used as
        a random seed. Default is 0, that is use a random permutation but
        guarantee repeatability.

    Returns
    -------
    groups : ndarray of int, shape (n,)
        Contains values from 0 to n_groups-1, where n_groups is the number
        of found groups. Each value ``groups[i]`` is an index of a group to
        which ith column assigned. The procedure was helpful only if
        n_groups is significantly less than n.

    References
    ----------
    .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.
    r   r   z`A` must be 2-dimensional.Nz`order` has incorrect shape.)r   r   r   
atleast_2dr0   Zint32r=   r   r?   ZisscalarrandomZRandomStateZpermutationr9   r   indicesZindptrr   r   )Aordermnrnggroupsr%   r%   r&   group_columns   s&    




rK   r*   Fr%   c              
      sh  |dkrt d| t|}|jdkr0t dt||\}}|j|jksV|j|jkr^t d|rtt|rtt|st d fdd}|d	kr||}nt|}|jdkrt d
t||k ||kB rt d|r|d	krt	|j
|j
|}t|||||S |d	kr0t||||}nZ|dktd d }|}|| | }t|dkt	|j
|j
|| tdt| |}|dkrt||dd||\}}n0|dkrt||dd||\}}n|dkrd}|d	krt||||||S t|st|dkr|\}}n|}t|}t|r:t|}n
t|}t|}t||||||||S d	S )aJ  Compute finite difference approximation of the derivatives of a
    vector-valued function.

    If a function maps from R^n to R^m, its derivatives form m-by-n matrix
    called the Jacobian, where an element (i, j) is a partial derivative of
    f[i] with respect to x[j].

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to a 1-D array.
    method : {'3-point', '2-point', 'cs'}, optional
        Finite difference method to use:
            - '2-point' - use the first order accuracy forward or backward
                          difference.
            - '3-point' - use central difference in interior points and the
                          second order accuracy forward or backward difference
                          near the boundary.
            - 'cs' - use a complex-step finite difference scheme. This assumes
                     that the user function is real-valued and can be
                     analytically continued to the complex plane. Otherwise,
                     produces bogus results.
    rel_step : None or array_like, optional
        Relative step size to use. If None (default) the absolute step size is
        computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
        `rel_step` being selected automatically, see Notes. Otherwise
        ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
        sign of `h` is ignored. The calculated step size is possibly adjusted
        to fit into the bounds.
    abs_step : array_like, optional
        Absolute step size to use, possibly adjusted to fit into the bounds.
        For ``method='3-point'`` the sign of `abs_step` is ignored. By default
        relative steps are used, only if ``abs_step is not None`` are absolute
        steps used.
    f0 : None or array_like, optional
        If not None it is assumed to be equal to ``fun(x0)``, in this case
        the ``fun(x0)`` is not called. Default is None.
    bounds : tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation. Bounds checking is not implemented
        when `as_linear_operator` is True.
    sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
        Defines a sparsity structure of the Jacobian matrix. If the Jacobian
        matrix is known to have only few non-zero elements in each row, then
        it's possible to estimate its several columns by a single function
        evaluation [3]_. To perform such economic computations two ingredients
        are required:

        * structure : array_like or sparse matrix of shape (m, n). A zero
          element means that a corresponding element of the Jacobian
          identically equals to zero.
        * groups : array_like of shape (n,). A column grouping for a given
          sparsity structure, use `group_columns` to obtain it.

        A single array or a sparse matrix is interpreted as a sparsity
        structure, and groups are computed inside the function. A tuple is
        interpreted as (structure, groups). If None (default), a standard
        dense differencing will be used.

        Note, that sparse differencing makes sense only for large Jacobian
        matrices where each row contains few non-zero elements.
    as_linear_operator : bool, optional
        When True the function returns an `scipy.sparse.linalg.LinearOperator`.
        Otherwise it returns a dense array or a sparse matrix depending on
        `sparsity`. The linear operator provides an efficient way of computing
        ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
        direct access to individual elements of the matrix. By default
        `as_linear_operator` is False.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)``.

    Returns
    -------
    J : {ndarray, sparse matrix, LinearOperator}
        Finite difference approximation of the Jacobian matrix.
        If `as_linear_operator` is True returns a LinearOperator
        with shape (m, n). Otherwise it returns a dense array or sparse
        matrix depending on how `sparsity` is defined. If `sparsity`
        is None then a ndarray with shape (m, n) is returned. If
        `sparsity` is not None returns a csr_matrix with shape (m, n).
        For sparse matrices and linear operators it is always returned as
        a 2-D structure, for ndarrays, if m=1 it is returned
        as a 1-D gradient array with shape (n,).

    See Also
    --------
    check_derivative : Check correctness of a function computing derivatives.

    Notes
    -----
    If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
    determined from the smallest floating point dtype of `x0` or `fun(x0)`,
    ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
    s=3 for '3-point' method. Such relative step approximately minimizes a sum
    of truncation and round-off errors, see [1]_. Relative steps are used by
    default. However, absolute steps are used when ``abs_step is not None``.
    If any of the absolute or relative steps produces an indistinguishable
    difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
    automatic step size is substituted for that particular entry.

    A finite difference scheme for '3-point' method is selected automatically.
    The well-known central difference scheme is used for points sufficiently
    far from the boundary, and 3-point forward or backward scheme is used for
    points near the boundary. Both schemes have the second-order accuracy in
    terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
    forward and backward difference schemes.

    For dense differencing when m=1 Jacobian is returned with a shape (n,),
    on the other hand when n=1 Jacobian is returned with a shape (m, 1).
    Our motivation is the following: a) It handles a case of gradient
    computation (m=1) in a conventional way. b) It clearly separates these two
    different cases. b) In all cases np.atleast_2d can be called to get 2-D
    Jacobian with correct dimensions.

    References
    ----------
    .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
           Computing. 3rd edition", sec. 5.7.

    .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.

    .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
           Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize._numdiff import approx_derivative
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> approx_derivative(f, x0, args=(1, 2))
    array([[ 1.,  0.],
           [-1.,  0.]])

    Bounds can be used to limit the region of function evaluation.
    In the example below we compute left and right derivative at point 1.0.

    >>> def g(x):
    ...     return x**2 if x >= 1 else x
    ...
    >>> x0 = 1.0
    >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
    array([ 1.])
    >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
    array([ 2.])
    )r(   r*   r)   zUnknown method '%s'. r
   z#`x0` must have at most 1 dimension.z,Inconsistent shapes between bounds and `x0`.z7Bounds not supported when `as_linear_operator` is True.c                    s,   t | f }|jdkr(td|S )Nr
   z-`fun` return value has more than 1 dimension.)r   
atleast_1dr=   r,   )r#   fargsfunkwargsr%   r&   fun_wrapped  s    
z&approx_derivative.<locals>.fun_wrappedNz&`f0` passed has more than 1 dimension.z `x0` violates bound constraints.r   r   r/   r(   r   r*   r   r)   F)r   r   rL   r=   rA   r?   r   isinfanyr.   r   _linear_operator_differencer8   r0   r1   r2   r   r   r'   _dense_differencer   lenrK   r   rB   _sparse_difference)rP   r   r-   r3   r6   r4   r@   sparsityZas_linear_operatorrO   rQ   r    r!   rR   r   r5   r7   r"   	structurerJ   r%   rN   r&   approx_derivative  s     $





  


     

     



 





  r[   c                    sx    j j }|dkr* fdd}n@|dkrFfdd}n$|dkrbfdd}ntdt|f|S )	Nr(   c                    sH   t | t | rt S t|  }||   }|  }|| S )Nr   array_equalr   zerosr   )pr7   r#   dfr4   rP   r   rG   r   r%   r&   matvec  s    
z+_linear_operator_difference.<locals>.matvecr*   c                    sl   t | t | rt S d t|  }|d |   }|d |   } |} |}|| }|| S )Nr   r\   )r_   r7   x1x2f1f2r`   rP   r   rG   r   r%   r&   rb     s    
r)   c                    sN   t | t | rt S t|  }||  d  } |}|j}|| S )N              ?)r   r]   r   r^   r   imag)r_   r7   r#   re   r`   rg   r%   r&   rb   '  s    
Never be here.)sizer,   r   )rP   r   r4   r   r-   rH   rb   r%   ra   r&   rU     s    
rU   c                 C   s  |j }|j }t||f}t|}	t|j D ]4}
|dkrf||	|
  }||
 ||
  }| || }n|dkr||
 r||	|
  }|d|	|
   }||
 ||
  }| |}| |}d| d|  | }n|dkr||
 s||	|
  }||	|
  }||
 ||
  }| |}| |}|| }n:|dkrP| ||	|
 d  }|j}|	|
|
f }ntd|| ||
< q.|d	krzt|}|jS )
Nr(   r*   r   g         r)   rh   rj   r
   )	rk   r   emptyZdiagrangeri   r,   ravelT)rP   r   r4   r   r"   r-   rG   rH   ZJ_transposedZh_vecsir#   r7   r`   rc   rd   re   rf   r%   r%   r&   rV   6  s@    




rV   c           !      C   s  |j }|j }	g }
g }g }t|d }t|D ]B}t||}|| }|dkr|| }|| }| || }t|\}t|d d |f \}}}|| }n|dkr| }| }||@ }||  || 7  < ||  d||  7  < | |@ }||  || 8  < ||  || 7  < t|	}|| ||  ||< || ||  ||< | |}| |}t|\}t|d d |f \}}}|| }|| }t	|}|| }d||  d||   ||  ||< ||  }|| ||  ||< n\|dkr@| ||d  }|j
}|}t|\}t|d d |f \}}}|| }ntd	|
| || ||| ||   q.t|
}
t|}t|}t||
|ff||	fd
} t| S )Nr
   r(   r*   r   rl   r)   rh   rj   )r?   )rk   r   maxrn   equalZnonzeror	   r   r^   rm   ri   r   appendZhstackr   r   )!rP   r   r4   r   r"   rZ   rJ   r-   rG   rH   Zrow_indicesZcol_indices	fractionsZn_groupsgroupeZh_vecr#   r7   r`   colsrq   j_rc   rd   Zmask_1Zmask_2re   rf   maskrowsJr%   r%   r&   rX   ^  sn    



$







rX   c              	   C   s   ||f||}t |rt| |||||d}t|}|| }t|\}	}
}t||	|
f  }tt|t	dt| S t| ||||d}t|| }t|t	dt| S dS )aT	  Check correctness of a function computing derivatives (Jacobian or
    gradient) by comparison with a finite difference approximation.

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
    jac : callable
        Function which computes Jacobian matrix of `fun`. It must work with
        argument x the same way as `fun`. The return value must be array_like
        or sparse matrix with an appropriate shape.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to 1-D array.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun` and `jac`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)`` and the same
        for `jac`.

    Returns
    -------
    accuracy : float
        The maximum among all relative errors for elements with absolute values
        higher than 1 and absolute errors for elements with absolute values
        less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
        then it is likely that your `jac` implementation is correct.

    See Also
    --------
    approx_derivative : Compute finite difference approximation of derivative.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize._numdiff import check_derivative
    >>>
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> def jac(x, c1, c2):
    ...     return np.array([
    ...         [np.sin(c1 * x[1]),  c1 * x[0] * np.cos(c1 * x[1])],
    ...         [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
    ...     ])
    ...
    >>>
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> check_derivative(f, jac, x0, args=(1, 2))
    2.4492935982947064e-16
    )r@   rY   rO   rQ   r
   )r@   rO   rQ   N)
r   r[   r   r	   r   r9   ro   rs   r   r   )rP   Zjacr   r@   rO   rQ   Z	J_to_testZJ_diffZabs_errrq   rz   Zabs_err_dataZJ_diff_datar%   r%   r&   check_derivative  s&    =
  r   )r   )__doc__	functoolsnumpyr   Znumpy.linalgr   Zscipy.sparse.linalgr   sparser   r   r   r   r	   Z_group_columnsr   r   r'   	lru_cacher.   r8   rA   rK   r   r[   rU   rV   rX   r   r%   r%   r%   r&   <module>   s6   O
51
=    
 {)(P