Пример #1
0
func (rk *ExplicitRungeKutta) Step(from *State, toTime float64, y, yDot []float64) error {
	h := toTime - from.Time

	for i := range rk.f {
		if i == 0 {
			copy(rk.f[0], from.YDot)
			continue
		}
		copy(rk.work, from.Y)
		for j, a := range rk.A[i-1] {
			if a != 0 {
				floats.AddScaled(rk.work, h*a, rk.f[j])
			}
		}
		rk.rhs(rk.f[i], from.Time+rk.C[i-1]*h, rk.work)
	}

	copy(y, from.Y)
	for i, b := range rk.B {
		if b != 0 {
			floats.AddScaled(y, b*h, rk.f[i])
		}
	}
	rk.rhs(yDot, toTime, y)

	return nil
}
Пример #2
0
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
	// Uses two-loop correction as described in
	// Nocedal, J., Wright, S.: Numerical Optimization (2nd ed). Springer (2006), chapter 7, page 178.

	if len(loc.X) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}
	if len(loc.Gradient) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}
	if len(dir) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}

	y := l.y[l.oldest]
	floats.SubTo(y, loc.Gradient, l.grad)
	s := l.s[l.oldest]
	floats.SubTo(s, loc.X, l.x)
	sDotY := floats.Dot(s, y)
	l.rho[l.oldest] = 1 / sDotY

	l.oldest = (l.oldest + 1) % l.Store

	copy(l.x, loc.X)
	copy(l.grad, loc.Gradient)
	copy(dir, loc.Gradient)

	// Start with the most recent element and go backward,
	for i := 0; i < l.Store; i++ {
		idx := l.oldest - i - 1
		if idx < 0 {
			idx += l.Store
		}
		l.a[idx] = l.rho[idx] * floats.Dot(l.s[idx], dir)
		floats.AddScaled(dir, -l.a[idx], l.y[idx])
	}

	// Scale the initial Hessian.
	gamma := sDotY / floats.Dot(y, y)
	floats.Scale(gamma, dir)

	// Start with the oldest element and go forward.
	for i := 0; i < l.Store; i++ {
		idx := i + l.oldest
		if idx >= l.Store {
			idx -= l.Store
		}
		beta := l.rho[idx] * floats.Dot(l.y[idx], dir)
		floats.AddScaled(dir, l.a[idx]-beta, l.s[idx])
	}

	// dir contains H^{-1} * g, so flip the direction for minimization.
	floats.Scale(-1, dir)

	return 1
}
Пример #3
0
func (l *LBFGS) NextDirection(loc *Location, dir []float64) (stepSize float64) {
	if len(loc.X) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}
	if len(loc.Gradient) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}
	if len(dir) != l.dim {
		panic("lbfgs: unexpected size mismatch")
	}

	// Update direction. Uses two-loop correction as described in
	// Nocedal, Wright (2006), Numerical Optimization (2nd ed.). Chapter 7, page 178.
	copy(dir, loc.Gradient)
	floats.SubTo(l.y, loc.Gradient, l.grad)
	floats.SubTo(l.s, loc.X, l.x)
	copy(l.sHist[l.oldest], l.s)
	copy(l.yHist[l.oldest], l.y)
	sDotY := floats.Dot(l.y, l.s)
	l.rhoHist[l.oldest] = 1 / sDotY

	l.oldest++
	l.oldest = l.oldest % l.Store
	copy(l.x, loc.X)
	copy(l.grad, loc.Gradient)

	// two loop update. First loop starts with the most recent element
	// and goes backward, second starts with the oldest element and goes
	// forward. At the end have computed H^-1 * g, so flip the direction for
	// minimization.
	for i := 0; i < l.Store; i++ {
		idx := l.oldest - i - 1
		if idx < 0 {
			idx += l.Store
		}
		l.a[idx] = l.rhoHist[idx] * floats.Dot(l.sHist[idx], dir)
		floats.AddScaled(dir, -l.a[idx], l.yHist[idx])
	}

	// Scale the initial Hessian.
	gamma := sDotY / floats.Dot(l.y, l.y)
	floats.Scale(gamma, dir)

	for i := 0; i < l.Store; i++ {
		idx := i + l.oldest
		if idx >= l.Store {
			idx -= l.Store
		}
		beta := l.rhoHist[idx] * floats.Dot(l.yHist[idx], dir)
		floats.AddScaled(dir, l.a[idx]-beta, l.sHist[idx])
	}
	floats.Scale(-1, dir)

	return 1
}
Пример #4
0
func (rk *EmbeddedRungeKutta) Step(from *State, toTime float64, y, yDot []float64) error {
	h := toTime - from.Time

	for i := range rk.f {
		if i == 0 {
			copy(rk.f[0], from.YDot)
			continue
		}
		copy(rk.work, from.Y)
		for j, a := range rk.A[i-1] {
			if a == 0 {
				continue
			}
			floats.AddScaled(rk.work, h*a, rk.f[j])
		}
		rk.rhs(rk.f[i], from.Time+rk.C[i-1]*h, rk.work)
	}

	if rk.FSAL {
		copy(y, rk.work)
		copy(yDot, rk.f[len(rk.f)-1])
	} else {
		copy(y, from.Y)
		for i, b := range rk.B {
			if b == 0 {
				continue
			}
			floats.AddScaled(y, b*h, rk.f[i])
		}
		rk.rhs(yDot, toTime, y)
	}

	for i := range rk.e {
		rk.e[i] = 0
	}
	for i, e := range rk.E {
		if e != 0 {
			floats.AddScaled(rk.e, h*e, rk.f[i])
		}
	}

	return nil
}
Пример #5
0
// replaceWorst removes the worst location in the simplex and adds the new
// {x, f} pair maintaining sorting.
func (n *NelderMead) replaceWorst(x []float64, f float64) {
	dim := len(x)
	if f >= n.values[dim] {
		panic("increase in simplex value")
	}
	copy(n.vertices[dim], x)
	n.values[dim] = f

	// Sort the newly-added value.
	for i := dim - 1; i >= 0; i-- {
		if n.values[i] < f {
			break
		}
		n.vertices[i], n.vertices[i+1] = n.vertices[i+1], n.vertices[i]
		n.values[i], n.values[i+1] = n.values[i+1], n.values[i]
	}

	// Update the location of the centroid. Only one point has been replaced, so
	// subtract the worst point and add the new one.
	floats.AddScaled(n.centroid, -1/float64(dim), n.vertices[dim])
	floats.AddScaled(n.centroid, 1/float64(dim), x)
}
Пример #6
0
// isLeftEigenvectorOf returns whether the vector yRe+i*yIm, where i is the
// imaginary unit, is the left eigenvector of A corresponding to the eigenvalue
// lambda.
//
// A left eigenvector corresponding to a complex eigenvalue λ is a complex
// non-zero vector y such that
//  y^H A = λ y^H,
// which is equivalent for real A to
//  A^T y = conj(λ) y,
func isLeftEigenvectorOf(a blas64.General, yRe, yIm []float64, lambda complex128, tol float64) bool {
	if a.Rows != a.Cols {
		panic("matrix not square")
	}

	if imag(lambda) != 0 && yIm == nil {
		// Complex eigenvalue of a real matrix cannot have a real
		// eigenvector.
		return false
	}

	n := a.Rows

	// Compute A^T real(y) and store the result into yReAns.
	yReAns := make([]float64, n)
	blas64.Gemv(blas.Trans, 1, a, blas64.Vector{1, yRe}, 0, blas64.Vector{1, yReAns})

	if imag(lambda) == 0 && yIm == nil {
		// Real eigenvalue and eigenvector.

		// Compute λy and store the result into lambday.
		lambday := make([]float64, n)
		floats.AddScaled(lambday, real(lambda), yRe)

		if floats.Distance(yReAns, lambday, math.Inf(1)) > tol {
			return false
		}
		return true
	}

	// Complex eigenvector, and real or complex eigenvalue.

	// Compute A^T imag(y) and store the result into yImAns.
	yImAns := make([]float64, n)
	blas64.Gemv(blas.Trans, 1, a, blas64.Vector{1, yIm}, 0, blas64.Vector{1, yImAns})

	// Compute conj(λ)y and store the result into lambday.
	lambda = cmplx.Conj(lambda)
	lambday := make([]complex128, n)
	for i := range lambday {
		lambday[i] = lambda * complex(yRe[i], yIm[i])
	}

	for i, v := range lambday {
		ay := complex(yReAns[i], yImAns[i])
		if cmplx.Abs(v-ay) > tol {
			return false
		}
	}
	return true
}
Пример #7
0
// isRightEigenvectorOf returns whether the vector xRe+i*xIm, where i is the
// imaginary unit, is the right eigenvector of A corresponding to the eigenvalue
// lambda.
//
// A right eigenvector corresponding to a complex eigenvalue λ is a complex
// non-zero vector x such that
//  A x = λ x.
func isRightEigenvectorOf(a blas64.General, xRe, xIm []float64, lambda complex128, tol float64) bool {
	if a.Rows != a.Cols {
		panic("matrix not square")
	}

	if imag(lambda) != 0 && xIm == nil {
		// Complex eigenvalue of a real matrix cannot have a real
		// eigenvector.
		return false
	}

	n := a.Rows

	// Compute A real(x) and store the result into xReAns.
	xReAns := make([]float64, n)
	blas64.Gemv(blas.NoTrans, 1, a, blas64.Vector{1, xRe}, 0, blas64.Vector{1, xReAns})

	if imag(lambda) == 0 && xIm == nil {
		// Real eigenvalue and eigenvector.

		// Compute λx and store the result into lambdax.
		lambdax := make([]float64, n)
		floats.AddScaled(lambdax, real(lambda), xRe)

		if floats.Distance(xReAns, lambdax, math.Inf(1)) > tol {
			return false
		}
		return true
	}

	// Complex eigenvector, and real or complex eigenvalue.

	// Compute A imag(x) and store the result into xImAns.
	xImAns := make([]float64, n)
	blas64.Gemv(blas.NoTrans, 1, a, blas64.Vector{1, xIm}, 0, blas64.Vector{1, xImAns})

	// Compute λx and store the result into lambdax.
	lambdax := make([]complex128, n)
	for i := range lambdax {
		lambdax[i] = lambda * complex(xRe[i], xIm[i])
	}

	for i, v := range lambdax {
		ax := complex(xReAns[i], xImAns[i])
		if cmplx.Abs(v-ax) > tol {
			return false
		}
	}
	return true
}
Пример #8
0
func (cg *CG) Iterate(ctx *Context) Operation {
	switch cg.resume {
	case 1:
		cg.resume = 2
		return SolvePreconditioner
		// Solve M z = r_{i-1}
	case 2:
		// ρ_i = r_{i-1} · z
		cg.rho = floats.Dot(ctx.Residual, ctx.Z)
		if !cg.first {
			// β = ρ_i / ρ_{i-1}
			beta := cg.rho / cg.rho1
			// z = z + β p_{i-1}
			floats.AddScaled(ctx.Z, beta, ctx.P)
		}
		cg.first = false
		// p_i = z
		copy(ctx.P, ctx.Z)

		cg.resume = 3
		return ComputeAp
		// Compute Ap
	case 3:
		// α = ρ_i / (p_i · Ap_i)
		alpha := cg.rho / floats.Dot(ctx.P, ctx.Ap)
		// x_i = x_{i-1} + α p_i
		floats.AddScaled(ctx.X, alpha, ctx.P)
		// r_i = r_{i-1} - α Ap_i
		floats.AddScaled(ctx.Residual, -alpha, ctx.Ap)

		cg.rho1 = cg.rho

		cg.resume = 1
		return CheckConvergence
	}
	panic("unreachable")
}
Пример #9
0
// Scale computes the product of an image with a scalar.
// Does not modify the input.
func (f *Multi) Scale(alpha float64) *Multi {
	dst := NewMulti(f.Width, f.Height, f.Channels)
	floats.AddScaled(dst.Elems, alpha, f.Elems)
	return dst
}
Пример #10
0
// Midpoint computes a fourth-order estimate of the solution at the midpoint of
// the last step and stores it in y. The result together with the values and
// derivates at both ends of the step can be used for local quartic
// interpolation to this data.
// TODO(vladimir-ch): WIP.
func (rk *DOPRI45) Midpoint(y []float64) {
	copy(y, rk.yStart)
	for i, d := range rk.D {
		floats.AddScaled(y, 0.5*rk.hLast*d, rk.f[i])
	}
}
Пример #11
0
// Scale computes the product of an image with a scalar.
// Does not modify the input.
func (f *Image) Scale(alpha float64) *Image {
	dst := New(f.Width, f.Height)
	floats.AddScaled(dst.Elems, alpha, f.Elems)
	return dst
}