Class mroot_hybrids_base (o2scl)

O2scl : Class List

class mroot_hybrids_base

Base functions for mroot_hybrids.

Subclassed by o2scl::mroot_hybrids< func_t, vec_t, mat_t, jfunc_t >, o2scl::mroot_hybrids< mm_funct >, o2scl::mroot_hybrids< mm_funct, boost::numeric::ublas::vector< double >, boost::numeric::ublas::matrix< double >, jac_funct >, o2scl::mroot_hybrids< mm_funct, vector< double >, matrix< double >, jac_funct >, o2scl::mroot_hybrids< func_t, vec_t, mat_t, jfunc_t >

Public Types

typedef boost::numeric::ublas::vector<double> ubvector
typedef boost::numeric::ublas::matrix<double> ubmatrix

Public Functions

inline double compute_actual_reduction(double fnorm0, double fnorm1)

Compute the actual reduction.

inline double compute_predicted_reduction(double fnorm0, double fnorm1)

Compute the predicted reduction phi1p=|Q^T f + R dx|.

template<class vec2_t, class mat_t>
inline void compute_Rg(size_t N, const mat_t &r2, const ubvector &gradient2, vec2_t &Rg)

Compute \( R \cdot g \) where g is the .

template<class vec2_t>
inline void compute_wv(size_t n, const ubvector &qtdf2, const ubvector &rdx2, const vec2_t &dx2, const ubvector &diag2, double pnorm, ubvector &w2, ubvector &v2)

Compute w and v.

template<class vec2_t, class mat_t>
inline void compute_rdx(size_t N, const mat_t &r2, const vec2_t &dx2, ubvector &rdx2)

Compute \( R \cdot \mathrm{dx} \).

template<class vec2_t, class vec3_t>
inline double scaled_enorm(size_t n, const vec2_t &d, const vec3_t &ff)

Compute the norm of the vector \( \vec{v} \) defined by \( v_i = d_i ff_i \).

template<class vec2_t>
inline double compute_delta(size_t n, ubvector &diag2, vec2_t &x2)

Compute delta.

template<class vec2_t>
inline double enorm(size_t N, const vec2_t &ff)

Compute the Euclidean norm of ff.

Todo

class mroot_hybrids

Future: Replace this with c dnrm2 from ref cblas_base.h

inline double enorm_sum(size_t n, const ubvector &a, const ubvector &b)

Compute the Euclidean norm of the sum of a and b.

template<class vec2_t>
inline int compute_trial_step(size_t N, vec2_t &xl, vec2_t &dxl, vec2_t &xx_trial)

Compute a trial step and put the result in xx_trial.

Todo

class mroot_hybrids

Future: Replace this function with daxpy?

template<class vec2_t>
inline int compute_df(size_t n, const vec2_t &ff_trial, const vec2_t &fl, ubvector &dfl)

Compute the change in the function value.

Todo

class mroot_hybrids

Future: Replace this function with daxpy?

template<class mat2_t>
inline void compute_diag(size_t n, const mat2_t &J2, ubvector &diag2)

Compute diag, the norm of the columns of the Jacobian.

template<class vec2_t, class vec3_t, class vec4_t>
inline void compute_qtf(size_t N, const vec2_t &q2, const vec3_t &f2, vec4_t &qtf2)

Compute \( Q^{T} f \).

Todo

class mroot_hybrids

Future: This function is just right-multiplication, so we could use the O2scl cblas routines instead.

template<class mat2_t>
inline void update_diag(size_t n, const mat2_t &J2, ubvector &diag2)

Update diag.

template<class vec2_t>
inline void scaled_addition(size_t N, double alpha, ubvector &newton2, double beta, ubvector &gradient2, vec2_t &pp)

Form appropriate convex combination of the Gauss-Newton direction and the scaled gradient direction.

Using the Gauss-Newton direction given in newton (a vector of size N), and the gradient direction in gradient (also a vector of size N), this computes

\[ \mathrm{pp}=\alpha \mathrm{newton}+\beta \mathrm{gradient} \]

template<class mat_t>
inline int newton_direction(const size_t N, const mat_t &r2, const ubvector &qtf2, ubvector &p)

Compute the Gauss-Newton direction.

template<class mat_t>
inline void gradient_direction(const size_t M, const size_t N, const mat_t &r2, const ubvector &qtf2, const ubvector &diag2, ubvector &g)

Compute the gradient direction.

inline void minimum_step(const size_t N, double gnorm, const ubvector &diag2, ubvector &g)

Compute the point at which the gradient is minimized.

template<class vec2_t, class mat_t>
inline int dogleg(size_t n, const mat_t &r2, const ubvector &qtf2, const ubvector &diag2, double delta2, ubvector &newton2, ubvector &gradient2, vec2_t &p)

Take a dogleg step.

Given the QR decomposition of an n by n matrix “A”, a diagonal matrix diag, a vector b, and a positive number delta, this function determines the convex combination x of the Gauss-Newton and scaled gradient directions which minimizes \( A x-b \) in the least squares sense, subject to the restriction that the Euclidean norm of \( d x \) is smaller than the value of delta.