U
    -e~P                     @   sF  d Z ddlmZ ddlZddlmZ ddlmZm	Z	m
Z
 ddlmZ ddlmZmZ eejZdd	 Zd@ddZdd Zdd ZdAddZdBddZdCddZdd Zdd ZdDddZdEdd Zd!d" Zd#d$ Z d%d& Z!d'd( Z"d)d* Z#d+d, Z$d-d. Z%dFd/d0Z&d1d2 Z'd3d4 Z(d5d6 Z)dGd8d9Z*dHd:d;Z+d<d= Z,d>d? Z-dS )Iz+Functions used by least-squares algorithms.    )copysignN)norm)
cho_factor	cho_solveLinAlgError)issparse)LinearOperatoraslinearoperatorc           
      C   s   t ||}|dkrtdt | |}t | | |d  }|dkrLtdt || ||  }|t||  }|| }|| }	||	k r||	fS |	|fS dS )aq  Find the intersection of a line with the boundary of a trust region.

    This function solves the quadratic equation with respect to t
    ||(x + s*t)||**2 = Delta**2.

    Returns
    -------
    t_neg, t_pos : tuple of float
        Negative and positive roots.

    Raises
    ------
    ValueError
        If `s` is zero or `x` is not within the trust region.
    r   z`s` is zero.   z#`x` is not within the trust region.N)npdot
ValueErrorsqrtr   )
xsDeltaabcdqt1t2 r   [/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/scipy/optimize/_lsq/common.pyintersect_trust_region   s    r   {Gz?
   c	                 C   s  dd }	|| }
|| kr6t | |d  }|d |k}nd}|rd|||  }t||krd|ddfS t|
| }|r|	d|
||\}}| | }nd}|dks|s|dkrtd| || d	 }n|}t|D ]}||k s||krtd| || d	 }|	||
||\}}|dk r|}|| }t||| }||| | | 8 }t||| k r q\q||
|d
 |   }||t| 9 }|||d fS )a  Solve a trust-region problem arising in least-squares minimization.

    This function implements a method described by J. J. More [1]_ and used
    in MINPACK, but it relies on a single SVD of Jacobian instead of series
    of Cholesky decompositions. Before running this function, compute:
    ``U, s, VT = svd(J, full_matrices=False)``.

    Parameters
    ----------
    n : int
        Number of variables.
    m : int
        Number of residuals.
    uf : ndarray
        Computed as U.T.dot(f).
    s : ndarray
        Singular values of J.
    V : ndarray
        Transpose of VT.
    Delta : float
        Radius of a trust region.
    initial_alpha : float, optional
        Initial guess for alpha, which might be available from a previous
        iteration. If None, determined automatically.
    rtol : float, optional
        Stopping tolerance for the root-finding procedure. Namely, the
        solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
    max_iter : int, optional
        Maximum allowed number of iterations for the root-finding procedure.

    Returns
    -------
    p : ndarray, shape (n,)
        Found solution of a trust-region problem.
    alpha : float
        Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
        Sometimes called Levenberg-Marquardt parameter.
    n_iter : int
        Number of iterations made by root-finding procedure. Zero means
        that Gauss-Newton step was selected as the solution.

    References
    ----------
    .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
           and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
           in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
    c                 S   sD   |d |  }t || }|| }t|d |d   | }||fS )zFunction of which to find zero.

        It is defined as "norm of regularized (by alpha) least-squares
        solution minus `Delta`". Refer to [1]_.
        r
      )r   r   sum)alphasufr   r   denomZp_normphi	phi_primer   r   r   phi_and_derivativej   s
    z2solve_lsq_trust_region.<locals>.phi_and_derivativer   Fg        NgMbP?      ?r
      )EPSr   r   maxranger   abs)nmZufr   Vr   Zinitial_alphartolZmax_iterr%   r!   	thresholdZ	full_rankpZalpha_upperr#   r$   Zalpha_lowerr    itratior   r   r   solve_lsq_trust_region9   s@    1

r5   c                 C   sx  z>t | \}}t||f| }t|||d kr<|dfW S W n tk
rR   Y nX | d |d  }| d |d  }| d |d  }|d | }	|d | }
t| |	 d|| |
  d| d| | |
  | |	 g}t|}t|t| }|t	d| d|d   d|d  d|d   f }d	tj
|| | dd
 t|| }t|}|dd|f }|dfS )az  Solve a general trust-region problem in 2 dimensions.

    The problem is reformulated as a 4th order algebraic equation,
    the solution of which is found by numpy.roots.

    Parameters
    ----------
    B : ndarray, shape (2, 2)
        Symmetric matrix, defines a quadratic term of the function.
    g : ndarray, shape (2,)
        Defines a linear term of the function.
    Delta : float
        Radius of a trust region.

    Returns
    -------
    p : ndarray, shape (2,)
        Found solution.
    newton_step : bool
        Whether the returned solution is the Newton step which lies within
        the trust region.
    r
   T)r   r   )r   r(   )r(   r(   r   r(      r'   ZaxisNF)r   r   r   r   r   arrayrootsrealZisrealZvstackr   argmin)Bgr   Rlowerr2   r   r   r   r   fZcoeffstvalueir   r   r   solve_trust_region_2d   s,    6
6(
rD   c                 C   sb   |dkr|| }n"||  kr&dkr0n nd}nd}|dk rFd| } n|dkrZ|rZ| d9 } | |fS )zUpdate the radius of a trust region based on the cost reduction.

    Returns
    -------
    Delta : float
        New radius.
    ratio : float
        Ratio between actual and predicted reductions.
    r   r(         ?g      ?g       @r   )r   Zactual_reductionZpredicted_reduction	step_normZ	bound_hitr4   r   r   r   update_tr_radius   s    

rG   c           
      C   s   |  |}t ||}|dk	r2|t || |7 }|d9 }t ||}|dk	r|  |}|t ||7 }dt || t || }	|dk	r|t || |7 }|	dt || | 7 }	|||	fS ||fS dS )a  Parameterize a multivariate quadratic function along a line.

    The resulting univariate quadratic function is given as follows::

        f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
               g.T * (s0 + s*t)

    Parameters
    ----------
    J : ndarray, sparse matrix or LinearOperator shape (m, n)
        Jacobian matrix, affects the quadratic term.
    g : ndarray, shape (n,)
        Gradient, defines the linear term.
    s : ndarray, shape (n,)
        Direction vector of a line.
    diag : None or ndarray with shape (n,), optional
        Addition diagonal part, affects the quadratic term.
        If None, assumed to be 0.
    s0 : None or ndarray with shape (n,), optional
        Initial point. If None, assumed to be 0.

    Returns
    -------
    a : float
        Coefficient for t**2.
    b : float
        Coefficient for t.
    c : float
        Free term. Returned only if `s0` is provided.
    Nr'   )r   r   )
Jr=   r   diags0vr   r   ur   r   r   r   build_quadratic_1d   s    


rM   c           	      C   sv   ||g}| dkr>d| |  }||  k r0|k r>n n
| | t|}|| | |  | }t|}|| || fS )zMinimize a 1-D quadratic function subject to bounds.

    The free term `c` is 0 by default. Bounds must be finite.

    Returns
    -------
    t : float
        Minimum point.
    y : float
        Minimum value.
    r   g      )appendr   asarrayr;   )	r   r   lbubr   rA   Zextremumy	min_indexr   r   r   minimize_quadratic_1d.  s    


rT   c                 C   s   |j dkr>| |}t||}|dk	r~|t|| |7 }n@| |j}tj|d dd}|dk	r~|tj||d  dd7 }t||}d| | S )a  Compute values of a quadratic function arising in least squares.

    The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.

    Parameters
    ----------
    J : ndarray, sparse matrix or LinearOperator, shape (m, n)
        Jacobian matrix, affects the quadratic term.
    g : ndarray, shape (n,)
        Gradient, defines the linear term.
    s : ndarray, shape (k, n) or (n,)
        Array containing steps as rows.
    diag : ndarray, shape (n,), optional
        Addition diagonal part, affects the quadratic term.
        If None, assumed to be 0.

    Returns
    -------
    values : ndarray with shape (k,) or float
        Values of the function. If `s` was 2-D, then ndarray is
        returned, otherwise, float is returned.
    r(   Nr
   r   r7   r'   )ndimr   r   Tr   )rH   r=   r   rI   ZJsr   lr   r   r   evaluate_quadraticE  s    

rX   c                 C   s   t | |k| |k@ S )z$Check if a point lies within bounds.)r   all)r   rP   rQ   r   r   r   	in_boundso  s    rZ   c              	   C   s   t |}|| }t | }|t j t jdd. t ||  | | ||  | | ||< W 5 Q R X t |}|t ||t 	|
t fS )a  Compute a min_step size required to reach a bound.

    The function computes a positive scalar t, such that x + s * t is on
    the bound.

    Returns
    -------
    step : float
        Computed step. Non-negative value.
    hits : ndarray of int with shape of x
        Each element indicates whether a corresponding variable reaches the
        bound:

             *  0 - the bound was not hit.
             * -1 - the lower bound was hit.
             *  1 - the upper bound was hit.
    ignore)Zover)r   ZnonzeroZ
empty_likefillinfZerrstatemaximumminequalsignZastypeint)r   r   rP   rQ   Znon_zeroZ
s_non_zeroZstepsZmin_stepr   r   r   step_size_to_boundt  s    


rc   绽|=c                 C   s   t j| td}|dkr2d|| |k< d|| |k< |S | | }||  }|t dt | }|t dt | }t ||t ||k@ }	d||	< t ||t ||k@ }
d||
< |S )a  Determine which constraints are active in a given point.

    The threshold is computed using `rtol` and the absolute value of the
    closest bound.

    Returns
    -------
    active : ndarray of int with shape of x
        Each component shows whether the corresponding constraint is active:

             *  0 - a constraint is not active.
             * -1 - a lower bound is active.
             *  1 - a upper bound is active.
    Zdtyper   r&   r(   )r   
zeros_likerb   r^   r,   isfiniteminimum)r   rP   rQ   r0   activeZ
lower_distZ
upper_distZlower_thresholdZupper_thresholdZlower_activeZupper_activer   r   r   find_active_constraints  s$    rj   c           	   	   C   s   |   }t| |||}t|d}t|d}|dkrht|| || ||< t|| || ||< nL|| |tdt||   ||< || |tdt||   ||< ||k ||kB }d|| ||   ||< |S )zShift a point to the interior of a feasible region.

    Each element of the returned vector is at least at a relative distance
    `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
    r&   r(   r   r'   )copyrj   r   r`   Z	nextafterr^   r,   )	r   rP   rQ   ZrstepZx_newri   Z
lower_maskZ
upper_maskZtight_boundsr   r   r   make_strictly_feasible  s     rl   c                 C   sx   t | }t | }|dk t |@ }|| | |  ||< d||< |dkt |@ }| | ||  ||< d||< ||fS )a4  Compute Coleman-Li scaling vector and its derivatives.

    Components of a vector v are defined as follows::

               | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
        v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
               | 1,           otherwise

    According to this definition v[i] >= 0 for all i. It differs from the
    definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
    used. Both definitions are equivalent down the line.
    Derivatives of v with respect to x take value 1, -1 or 0 depending on a
    case.

    Returns
    -------
    v : ndarray with shape of x
        Scaling vector.
    dv : ndarray with shape of x
        Derivatives of v[i] with respect to x[i], diagonal elements of v's
        Jacobian.

    References
    ----------
    .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
           and Conjugate Gradient Method for Large-Scale Bound-Constrained
           Minimization Problems," SIAM Journal on Scientific Computing,
           Vol. 21, Number 1, pp 1-23, 1999.
    r   r&   r(   )r   	ones_likerf   rg   )r   r=   rP   rQ   rK   Zdvmaskr   r   r   CL_scaling_vector  s    

ro   c                 C   sF  t | ||r| t| fS t|}t|}|  }tj| td}|| @ }t| | d||  | |  ||< | | || k ||< | |@ }t| | d||  | |  ||< | | || k||< ||@ }|| }t	| | ||  d||  }	|| t|	d||  |	  ||< |	|| k||< t| }
d|
|< ||
fS )z3Compute reflective transformation and its gradient.re   r
   r&   )
rZ   r   rm   rg   rk   rf   boolr^   rh   	remainder)rR   rP   rQ   Z	lb_finiteZ	ub_finiter   Z
g_negativern   r   rA   r=   r   r   r   reflective_transformation  s(    


$
$ $
rr   c                	   C   s   t ddddddd d S )Nz*{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}	Iterationz
Total nfevCostCost reduction	Step norm
Optimalityprintformatr   r   r   r   print_header_nonlinear!  s        r{   c              	   C   sL   |d krd}n
d |}|d kr&d}n
d |}td | ||||| d S )N               
{0:^15.2e}z({0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}rz   ry   )	iterationZnfevcostcost_reductionrF   
optimalityr   r   r   print_iteration_nonlinear'  s    

    r   c                   C   s   t dddddd d S )Nz#{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}rs   rt   ru   rv   rw   rx   r   r   r   r   print_header_linear8  s       r   c                 C   sJ   |d krd}n
d |}|d kr&d}n
d |}td | |||| d S )Nr|   r}   z!{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}r~   )r   r   r   rF   r   r   r   r   print_iteration_linear>  s    

    r   c                 C   s$   t | tr| |S | j|S dS )z4Compute gradient of the least-squares cost function.N)
isinstancer   rmatvecrV   r   )rH   r@   r   r   r   compute_gradQ  s    

r   c                 C   sn   t | r*t| djdd d }ntj| d ddd }|dkrVd||dk< nt||}d| |fS )z5Compute variables scale based on the Jacobian matrix.r
   r   r7   r'   Nr(   )r   r   rO   powerr   ravelr^   )rH   Zscale_inv_oldZ	scale_invr   r   r   compute_jac_scaleY  s    "r   c                    sD   t    fdd} fdd} fdd}t j|||dS )z#Return diag(d) J as LinearOperator.c                    s     |  S N)matvecr   rH   r   r   r   r   l  s    z(left_multiplied_operator.<locals>.matvecc                    s   d d t jf  |  S r   )r   newaxismatmatXr   r   r   r   o  s    z(left_multiplied_operator.<locals>.matmatc                    s     |   S r   )r   r   r   r   r   r   r   r  s    z)left_multiplied_operator.<locals>.rmatvecr   r   r   r	   r   shaperH   r   r   r   r   r   r   r   left_multiplied_operatorh  s    
r   c                    sD   t    fdd} fdd} fdd}t j|||dS )z#Return J diag(d) as LinearOperator.c                    s     t|  S r   )r   r   r   r   r   r   r   r   }  s    z)right_multiplied_operator.<locals>.matvecc                    s     | d d tjf  S r   )r   r   r   r   r   r   r   r     s    z)right_multiplied_operator.<locals>.matmatc                    s     |  S r   r   r   r   r   r   r     s    z*right_multiplied_operator.<locals>.rmatvecr   r   r   r   r   r   right_multiplied_operatory  s    
r   c                    sF   t    j\} fdd} fdd}t| |f||dS )zReturn a matrix arising in regularized least squares as LinearOperator.

    The matrix is
        [ J ]
        [ D ]
    where D is diagonal matrix with elements from `diag`.
    c                    s   t  | |  fS r   )r   Zhstackr   r   )rH   rI   r   r   r     s    z(regularized_lsq_operator.<locals>.matvecc                    s*   | d  }| d  }  ||  S r   r   )r   x1Zx2rH   rI   r.   r   r   r     s    z)regularized_lsq_operator.<locals>.rmatvec)r   r   )r	   r   r   )rH   rI   r-   r   r   r   r   r   regularized_lsq_operator  s
    
r   Tc                 C   s\   |rt | ts|  } t| r:|  j|j| jdd9  _nt | trPt| |} n| |9 } | S )zhCompute J diag(d).

    If `copy` is False, `J` is modified in place (unless being LinearOperator).
    Zclip)mode)r   r   rk   r   dataZtakeindicesr   rH   r   rk   r   r   r   right_multiply  s    
r   c                 C   sn   |rt | ts|  } t| r>|  jt|t| j9  _n,t | trTt	| |} n| |ddtj
f 9 } | S )zhCompute diag(d) J.

    If `copy` is False, `J` is modified in place (unless being LinearOperator).
    N)r   r   rk   r   r   r   repeatdiffZindptrr   r   r   r   r   r   left_multiply  s     
r   c           	      C   sH   | || k o|dk}||||  k }|r0|r0dS |r8dS |r@dS dS dS )z8Check termination condition for nonlinear least squares.rE      r
   r   Nr   )	ZdFFZdx_normZx_normr4   ZftolZxtolZftol_satisfiedZxtol_satisfiedr   r   r   check_termination  s    r   c                 C   sR   |d d|d  |d   }t ||t k < |dC }||d | 9 }t| |dd|fS )z`Scale Jacobian and residuals for a robust loss function.

    Arrays are modified in place.
    r(   r
   r'   F)rk   )r)   r   )rH   r@   rhoZJ_scaler   r   r   scale_for_robust_loss_function  s
    r   )Nr   r   )NN)r   )N)rd   )rd   )N)T)T).__doc__mathr   numpyr   Znumpy.linalgr   Zscipy.linalgr   r   r   Zscipy.sparser   Zscipy.sparse.linalgr   r	   ZfinfofloatZepsr)   r   r5   rD   rG   rM   rT   rX   rZ   rc   rj   rl   ro   rr   r{   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   <module>   sH   '    
r3
3

*
'
,"


