func TestMinimalSurface(t *testing.T) {
	for _, size := range [][2]int{
		{20, 30},
		{30, 30},
		{50, 40},
	} {
		f := NewMinimalSurface(size[0], size[1])
		x0 := f.InitX()
		grad := make([]float64, len(x0))
		f.Grad(grad, x0)
		fdGrad := fd.Gradient(nil, f.Func, x0, &fd.Settings{Formula: fd.Central})

		// Test that the numerical and analytical gradients agree.
		dist := floats.Distance(grad, fdGrad, math.Inf(1))
		if dist > 1e-9 {
			t.Errorf("grid %v x %v: numerical and analytical gradient do not match. |fdGrad - grad|_∞ = %v",
				size[0], size[1], dist)
		}

		// Test that the gradient at the minimum is small enough.
		// In some sense this test is not completely correct because ExactX
		// returns the exact solution to the continuous problem projected on the
		// grid, not the exact solution to the discrete problem which we are
		// solving. This is the reason why a relatively loose tolerance 1e-4
		// must be used.
		xSol := f.ExactX()
		f.Grad(grad, xSol)
		norm := floats.Norm(grad, math.Inf(1))
		if norm > 1e-4 {
			t.Errorf("grid %v x %v: gradient at the minimum not small enough. |grad|_∞ = %v",
				size[0], size[1], norm)
		}
	}
}
Exemple #2
0
// isLeftEigenvectorOf returns whether the vector yRe+i*yIm, where i is the
// imaginary unit, is the left eigenvector of A corresponding to the eigenvalue
// lambda.
//
// A left eigenvector corresponding to a complex eigenvalue λ is a complex
// non-zero vector y such that
//  y^H A = λ y^H,
// which is equivalent for real A to
//  A^T y = conj(λ) y,
func isLeftEigenvectorOf(a blas64.General, yRe, yIm []float64, lambda complex128, tol float64) bool {
	if a.Rows != a.Cols {
		panic("matrix not square")
	}

	if imag(lambda) != 0 && yIm == nil {
		// Complex eigenvalue of a real matrix cannot have a real
		// eigenvector.
		return false
	}

	n := a.Rows

	// Compute A^T real(y) and store the result into yReAns.
	yReAns := make([]float64, n)
	blas64.Gemv(blas.Trans, 1, a, blas64.Vector{1, yRe}, 0, blas64.Vector{1, yReAns})

	if imag(lambda) == 0 && yIm == nil {
		// Real eigenvalue and eigenvector.

		// Compute λy and store the result into lambday.
		lambday := make([]float64, n)
		floats.AddScaled(lambday, real(lambda), yRe)

		if floats.Distance(yReAns, lambday, math.Inf(1)) > tol {
			return false
		}
		return true
	}

	// Complex eigenvector, and real or complex eigenvalue.

	// Compute A^T imag(y) and store the result into yImAns.
	yImAns := make([]float64, n)
	blas64.Gemv(blas.Trans, 1, a, blas64.Vector{1, yIm}, 0, blas64.Vector{1, yImAns})

	// Compute conj(λ)y and store the result into lambday.
	lambda = cmplx.Conj(lambda)
	lambday := make([]complex128, n)
	for i := range lambday {
		lambday[i] = lambda * complex(yRe[i], yIm[i])
	}

	for i, v := range lambday {
		ay := complex(yReAns[i], yImAns[i])
		if cmplx.Abs(v-ay) > tol {
			return false
		}
	}
	return true
}
Exemple #3
0
// isRightEigenvectorOf returns whether the vector xRe+i*xIm, where i is the
// imaginary unit, is the right eigenvector of A corresponding to the eigenvalue
// lambda.
//
// A right eigenvector corresponding to a complex eigenvalue λ is a complex
// non-zero vector x such that
//  A x = λ x.
func isRightEigenvectorOf(a blas64.General, xRe, xIm []float64, lambda complex128, tol float64) bool {
	if a.Rows != a.Cols {
		panic("matrix not square")
	}

	if imag(lambda) != 0 && xIm == nil {
		// Complex eigenvalue of a real matrix cannot have a real
		// eigenvector.
		return false
	}

	n := a.Rows

	// Compute A real(x) and store the result into xReAns.
	xReAns := make([]float64, n)
	blas64.Gemv(blas.NoTrans, 1, a, blas64.Vector{1, xRe}, 0, blas64.Vector{1, xReAns})

	if imag(lambda) == 0 && xIm == nil {
		// Real eigenvalue and eigenvector.

		// Compute λx and store the result into lambdax.
		lambdax := make([]float64, n)
		floats.AddScaled(lambdax, real(lambda), xRe)

		if floats.Distance(xReAns, lambdax, math.Inf(1)) > tol {
			return false
		}
		return true
	}

	// Complex eigenvector, and real or complex eigenvalue.

	// Compute A imag(x) and store the result into xImAns.
	xImAns := make([]float64, n)
	blas64.Gemv(blas.NoTrans, 1, a, blas64.Vector{1, xIm}, 0, blas64.Vector{1, xImAns})

	// Compute λx and store the result into lambdax.
	lambdax := make([]complex128, n)
	for i := range lambdax {
		lambdax[i] = lambda * complex(xRe[i], xIm[i])
	}

	for i, v := range lambdax {
		ax := complex(xReAns[i], xImAns[i])
		if cmplx.Abs(v-ax) > tol {
			return false
		}
	}
	return true
}
Exemple #4
0
func testStepper(stepper Stepper, t *testing.T) {
	errorStepper, isErrorStepper := stepper.(ErrorStepper)

	var (
		r   *Result
		err error
	)
	for _, test := range ivpTests {
		s := DefaultSettings()

		tol := 1e-5
		if test.tol != 0 {
			tol = test.tol
		}

		var yEnd []float64
		if test.exact != nil {
			yEnd = make([]float64, len(test.p.InitialValue))
			test.exact(test.p.To, yEnd)
		} else {
			yEnd = test.yEnd
		}

		if isErrorStepper {
			if test.atol != 0 {
				s.Absolute = test.atol
			}
			if test.rtol != 0 {
				s.Relative = test.rtol
			}

			r, err = Integrate(test.p, s, errorStepper)
			if err != nil {
				t.Errorf("adaptive integration of test '%v' failed (%v)\n", test.name, err)
			}

			if r.Time != test.p.To {
				t.Errorf("adaptive integration of '%v' did not integrate to time To\n", test.name)
			}

			d := floats.Distance(r.Y, yEnd, math.Inf(1))
			if d > tol {
				t.Errorf("adaptive solution of '%v' too far from the reference at t = %v: dist = %v", test.name, r.Time, d)
			}
		}
	}
}
Exemple #5
0
func (k *KMeans) Train(inputs *common.RowMatrix) {
	nSamples, inputDim := inputs.Dims()
	centroids := make([][]float64, k.Clusters)

	// Assign the centroids to random data point to start
	perm := rand.Perm(nSamples)
	for i := 0; i < k.Clusters; i++ {
		data := make([]float64, inputDim)
		idx := perm[i]
		inputs.Row(data, idx)
		centroids[i] = data
	}

	distances := mat64.NewDense(nSamples, k.Clusters, nil)
	row := make([]float64, inputDim)
	for i := 0; i < nSamples; i++ {
		inputs.Row(row, i)
		for j := 0; j < k.Clusters; j++ {
			d := floats.Distance(row, centroids[j])
			distances.Set(i, j, d)
		}
	}

}
Exemple #6
0
// testFunction checks that the function can evaluate itself (and its gradient)
// correctly.
func testFunction(f function, ftests []funcTest, t *testing.T) {
	// Make a copy of tests because we may append to the slice.
	tests := make([]funcTest, len(ftests))
	copy(tests, ftests)

	// Get information about the function.
	fMinima, isMinimumer := f.(minimumer)
	fGradient, isGradient := f.(gradient)

	// If the function is a Minimumer, append its minima to the tests.
	if isMinimumer {
		for _, minimum := range fMinima.Minima() {
			// Allocate gradient only if the function can evaluate it.
			var grad []float64
			if isGradient {
				grad = make([]float64, len(minimum.X))
			}
			tests = append(tests, funcTest{
				X:        minimum.X,
				F:        minimum.F,
				Gradient: grad,
			})
		}
	}

	for i, test := range tests {
		F := f.Func(test.X)

		// Check that the function value is as expected.
		if math.Abs(F-test.F) > defaultTol {
			t.Errorf("Test #%d: function value given by Func is incorrect. Want: %v, Got: %v",
				i, test.F, F)
		}

		if test.Gradient == nil {
			continue
		}

		// Evaluate the finite difference gradient.
		fdGrad := fd.Gradient(nil, f.Func, test.X, nil)

		// Check that the finite difference and expected gradients match.
		if !floats.EqualApprox(fdGrad, test.Gradient, defaultFDGradTol) {
			dist := floats.Distance(fdGrad, test.Gradient, math.Inf(1))
			t.Errorf("Test #%d: numerical and expected gradients do not match. |fdGrad - WantGrad|_∞ = %v",
				i, dist)
		}

		// If the function is a Gradient, check that it computes the gradient correctly.
		if isGradient {
			grad := make([]float64, len(test.Gradient))
			fGradient.Grad(grad, test.X)

			if !floats.EqualApprox(grad, test.Gradient, defaultGradTol) {
				dist := floats.Distance(grad, test.Gradient, math.Inf(1))
				t.Errorf("Test #%d: gradient given by Grad is incorrect. |grad - WantGrad|_∞ = %v",
					i, dist)
			}
		}
	}
}
Exemple #7
0
func Dlaqr1Test(t *testing.T, impl Dlaqr1er) {
	rnd := rand.New(rand.NewSource(1))

	for _, n := range []int{2, 3} {
		for _, ldh := range []int{n, n + 1, n + 10} {
			for _, cas := range []int{1, 2} {
				for k := 0; k < 100; k++ {
					v := make([]float64, n)
					for i := range v {
						v[i] = math.NaN()
					}
					h := make([]float64, n*(n-1)*ldh)
					for i := range h {
						h[i] = math.NaN()
					}
					for i := 0; i < n; i++ {
						for j := 0; j < n; j++ {
							h[i*ldh+j] = rnd.NormFloat64()
						}
					}
					var sr1, sr2, si1, si2 float64
					if cas == 1 {
						sr1 = rnd.NormFloat64()
						sr2 = sr1
						si1 = rnd.NormFloat64()
						si2 = -si1
					} else {
						sr1 = rnd.NormFloat64()
						sr2 = rnd.NormFloat64()
						si1 = 0
						si2 = 0
					}
					impl.Dlaqr1(n, h, ldh, sr1, si1, sr2, si2, v)

					// Matrix H - s1*I.
					h1 := make([]complex128, n*n)
					for i := 0; i < n; i++ {
						for j := 0; j < n; j++ {
							h1[i*n+j] = complex(h[i*ldh+j], 0)
							if i == j {
								h1[i*n+j] -= complex(sr1, si1)
							}
						}
					}
					// First column of H - s2*I.
					h2 := make([]complex128, n)
					for i := 0; i < n; i++ {
						h2[i] = complex(h[i*ldh], 0)
					}
					h2[0] -= complex(sr2, si2)

					wantv := make([]float64, n)
					// Multiply (H-s1*I)*(H-s2*I) to get a tentative
					// wantv.
					for i := 0; i < n; i++ {
						for j := 0; j < n; j++ {
							wantv[i] += real(h1[i*n+j] * h2[j])
						}
					}
					// Get the unknown scale.
					scale := v[0] / wantv[0]
					// Compute the actual wantv.
					floats.Scale(scale, wantv)

					// The scale must be the same for all elements.
					if floats.Distance(wantv, v, math.Inf(1)) > 1e-13 {
						t.Errorf("n = %v, ldh = %v, case = %v: Unexpected value of v: got %v, want %v", n, ldh, cas, v, wantv)
					}
				}
			}
		}
	}
}