Example #1
0
func TestMinimalSurface(t *testing.T) {
	for _, size := range [][2]int{
		{20, 30},
		{30, 30},
		{50, 40},
	} {
		f := NewMinimalSurface(size[0], size[1])
		x0 := f.InitX()
		grad := make([]float64, len(x0))
		f.Grad(grad, x0)
		fdGrad := fd.Gradient(nil, f.Func, x0, &fd.Settings{Formula: fd.Central})

		// Test that the numerical and analytical gradients agree.
		dist := floats.Distance(grad, fdGrad, math.Inf(1))
		if dist > 1e-9 {
			t.Errorf("grid %v x %v: numerical and analytical gradient do not match. |fdGrad - grad|_∞ = %v",
				size[0], size[1], dist)
		}

		// Test that the gradient at the minimum is small enough.
		// In some sense this test is not completely correct because ExactX
		// returns the exact solution to the continuous problem projected on the
		// grid, not the exact solution to the discrete problem which we are
		// solving. This is the reason why a relatively loose tolerance 1e-4
		// must be used.
		xSol := f.ExactX()
		f.Grad(grad, xSol)
		norm := floats.Norm(grad, math.Inf(1))
		if norm > 1e-4 {
			t.Errorf("grid %v x %v: gradient at the minimum not small enough. |grad|_∞ = %v",
				size[0], size[1], norm)
		}
	}
}
Example #2
0
func testDerivParam(t *testing.T, d derivParamTester) {
	// Tests that the derivative matches for a number of different quantiles
	// along the distribution.
	nTest := 10
	quantiles := make([]float64, nTest)
	floats.Span(quantiles, 0.1, 0.9)

	deriv := make([]float64, d.NumParameters())
	fdDeriv := make([]float64, d.NumParameters())

	initParams := d.parameters(nil)
	init := make([]float64, d.NumParameters())
	for i, v := range initParams {
		init[i] = v.Value
	}
	for _, v := range quantiles {
		d.setParameters(initParams)
		x := d.Quantile(v)
		d.DLogProbDParam(x, deriv)
		f := func(p []float64) float64 {
			params := d.parameters(nil)
			for i, v := range p {
				params[i].Value = v
			}
			d.setParameters(params)
			return d.LogProb(x)
		}
		fd.Gradient(fdDeriv, f, init, nil)
		if !floats.EqualApprox(deriv, fdDeriv, 1e-6) {
			t.Fatal("Derivative mismatch. Want", fdDeriv, ", got", deriv, ".")
		}
	}
}
Example #3
0
// testFunction checks that the function can evaluate itself (and its gradient)
// correctly.
func testFunction(f function, ftests []funcTest, t *testing.T) {
	// Make a copy of tests because we may append to the slice.
	tests := make([]funcTest, len(ftests))
	copy(tests, ftests)

	// Get information about the function.
	fMinima, isMinimumer := f.(minimumer)
	fGradient, isGradient := f.(gradient)

	// If the function is a Minimumer, append its minima to the tests.
	if isMinimumer {
		for _, minimum := range fMinima.Minima() {
			// Allocate gradient only if the function can evaluate it.
			var grad []float64
			if isGradient {
				grad = make([]float64, len(minimum.X))
			}
			tests = append(tests, funcTest{
				X:        minimum.X,
				F:        minimum.F,
				Gradient: grad,
			})
		}
	}

	for i, test := range tests {
		F := f.Func(test.X)

		// Check that the function value is as expected.
		if math.Abs(F-test.F) > defaultTol {
			t.Errorf("Test #%d: function value given by Func is incorrect. Want: %v, Got: %v",
				i, test.F, F)
		}

		if test.Gradient == nil {
			continue
		}

		// Evaluate the finite difference gradient.
		fdGrad := fd.Gradient(nil, f.Func, test.X, nil)

		// Check that the finite difference and expected gradients match.
		if !floats.EqualApprox(fdGrad, test.Gradient, defaultFDGradTol) {
			dist := floats.Distance(fdGrad, test.Gradient, math.Inf(1))
			t.Errorf("Test #%d: numerical and expected gradients do not match. |fdGrad - WantGrad|_∞ = %v",
				i, dist)
		}

		// If the function is a Gradient, check that it computes the gradient correctly.
		if isGradient {
			grad := make([]float64, len(test.Gradient))
			fGradient.Grad(grad, test.X)

			if !floats.EqualApprox(grad, test.Gradient, defaultGradTol) {
				dist := floats.Distance(grad, test.Gradient, math.Inf(1))
				t.Errorf("Test #%d: gradient given by Grad is incorrect. |grad - WantGrad|_∞ = %v",
					i, dist)
			}
		}
	}
}