Beispiel #1
1
func TestUpdate(t *testing.T) {
	neuralNetwork := CreateSimpleNetwork(t)
	inputs := mat64.NewDense(1, 2, []float64{0.05, 0.10})
	neuralNetwork.Forward(inputs)
	values := mat64.NewDense(1, 2, []float64{0.01, 0.99})
	neuralNetwork.Backward(values)
	learningConfiguration := neural.LearningConfiguration{
		Epochs:    proto.Int32(1),
		Rate:      proto.Float64(0.5),
		Decay:     proto.Float64(0),
		BatchSize: proto.Int32(1),
	}
	neuralNetwork.Update(learningConfiguration)
	expected_weights_0 := mat64.NewDense(
		3, 2, []float64{0.149780716, 0.24975114, 0.19956143, 0.29950229, 0.35,
			0.35})
	if !mat64.EqualApprox(
		neuralNetwork.Layers[0].Weight, expected_weights_0, 0.0001) {
		t.Errorf("weights 0 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[0].Weight))
	}
	expected_weights_1 := mat64.NewDense(
		3, 2, []float64{0.35891648, 0.51130127, 0.408666186, 0.561370121, 0.6,
			0.6})
	if !mat64.EqualApprox(
		neuralNetwork.Layers[1].Weight, expected_weights_1, 0.0001) {
		t.Errorf("weights 1 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[1].Weight))
	}
}
Beispiel #2
0
func TestCovarianceMatrix(t *testing.T) {
	for _, test := range []struct {
		mu    []float64
		sigma *mat64.SymDense
	}{
		{
			mu:    []float64{2, 3, 4},
			sigma: mat64.NewSymDense(3, []float64{1, 0.5, 3, 0.5, 8, -1, 3, -1, 15}),
		},
	} {
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Fatalf("Bad test, covariance matrix not positive definite")
		}
		cov := normal.CovarianceMatrix(nil)
		if !mat64.EqualApprox(cov, test.sigma, 1e-14) {
			t.Errorf("Covariance mismatch with nil input")
		}
		dim := test.sigma.Symmetric()
		cov = mat64.NewSymDense(dim, nil)
		normal.CovarianceMatrix(cov)
		if !mat64.EqualApprox(cov, test.sigma, 1e-14) {
			t.Errorf("Covariance mismatch with supplied input")
		}
	}
}
Beispiel #3
0
func TestNormRand(t *testing.T) {
	for _, test := range []struct {
		mean []float64
		cov  []float64
	}{
		{
			mean: []float64{0, 0},
			cov: []float64{
				1, 0,
				0, 1,
			},
		},
		{
			mean: []float64{0, 0},
			cov: []float64{
				1, 0.9,
				0.9, 1,
			},
		},
		{
			mean: []float64{6, 7},
			cov: []float64{
				5, 0.9,
				0.9, 2,
			},
		},
	} {
		dim := len(test.mean)
		cov := mat64.NewSymDense(dim, test.cov)
		n, ok := NewNormal(test.mean, cov, nil)
		if !ok {
			t.Errorf("bad covariance matrix")
		}

		nSamples := 1000000
		samps := mat64.NewDense(nSamples, dim, nil)
		for i := 0; i < nSamples; i++ {
			n.Rand(samps.RawRowView(i))
		}
		estMean := make([]float64, dim)
		for i := range estMean {
			estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil)
		}
		if !floats.EqualApprox(estMean, test.mean, 1e-2) {
			t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean)
		}
		estCov := stat.CovarianceMatrix(nil, samps, nil)
		if !mat64.EqualApprox(estCov, cov, 1e-2) {
			t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov)
		}
	}
}
Beispiel #4
0
func TestLinearRegression(t *testing.T) {
	t.Skip("Skipping for now")
	for _, test := range []struct {
		x          *mat64.Dense
		y          *mat64.Vector
		result     *mat64.Vector
		test       *mat64.Vector
		testResult float64
	}{
		{
			mat64.NewDense(3, 4, []float64{
				1, 3, 5, 6,
				1, 1, 2, 3,
				1, 9, 4, 2}),
			mat64.NewVector(3, []float64{1, 6, 4}),
			mat64.NewVector(4, []float64{8.0918, 0.8920, -3.7990, 1.5379}),
			mat64.NewVector(4, []float64{1, 1, 2, 3}),
			6.0,
		}, {
			mat64.NewDense(10, 4, []float64{
				1, 2, 3, 4,
				1, 3, 4, 5,
				1, 4, 5, 6,
				1, 5, 6, 7,
				1, 6, 7, 8,
				1, 7, 8, 9,
				1, 8, 9, 10,
				1, 9, 10, 11,
				1, 10, 11, 12,
				1, 11, 12, 13}),
			mat64.NewVector(10, []float64{20, 26, 32, 38, 44, 50, 56, 62, 68, 74}),
			mat64.NewVector(4, []float64{0, 1, 2, 3}),
			mat64.NewVector(4, []float64{1, 10, 11, 12}),
			68.0,
		},
	} {

		lr := NewLinearRegression(test.x, test.y)
		lr.Fit()
		if !mat64.EqualApprox(test.result, lr.Theta, 0.0001) {
			t.Errorf("LinearRegressions's return theta is expected to be equal to %v, found %v", test.result, lr.Theta)
		}
		predicted := lr.Predict(test.test)

		if math.Abs(test.testResult-predicted) > 0.0001 {
			t.Errorf("LinearRegression predict values are expected to be equal to %f, found %f", test.testResult, predicted)
		}

	}

}
Beispiel #5
0
func TestBackward(t *testing.T) {
	neuralNetwork := CreateSimpleNetwork(t)
	inputs := mat64.NewDense(1, 2, []float64{0.05, 0.10})
	neuralNetwork.Forward(inputs)
	values := mat64.NewDense(1, 2, []float64{0.01, 0.99})
	neuralNetwork.Backward(values)
	expected_gradient_1 := mat64.NewDense(2, 1, []float64{0.13849856, -0.03809824})
	if !mat64.EqualApprox(
		neuralNetwork.Layers[1].Deltas, expected_gradient_1, 0.0001) {
		t.Errorf("gradient 1 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[1].Deltas))
	}
	// TODO(ariw): Fill in the other value of layer 0's gradient when known.
	if !equalsApprox(0.00877136, neuralNetwork.Layers[0].Deltas.At(0, 0),
		0.0001) {
		t.Errorf("gradient 0 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[0].Deltas))
	}
}
Beispiel #6
0
func TestGradientDescent(t *testing.T) {

	alpha := 0.01
	maxIters := 15000
	tolerance := 0.0001
	for _, test := range []struct {
		x      *mat64.Dense
		y      *mat64.Vector
		result *mat64.Vector
	}{
		{
			mat64.NewDense(3, 4, []float64{
				1, 3, 5, 6,
				1, 1, 2, 3,
				1, 9, 4, 2}),
			mat64.NewVector(3, []float64{1, 6, 4}),
			mat64.NewVector(4, []float64{8.0918, 0.8920, -3.7990, 1.5379}),
		}, {
			mat64.NewDense(10, 4, []float64{
				1, 2, 3, 4,
				1, 3, 4, 5,
				1, 4, 5, 6,
				1, 5, 6, 7,
				1, 6, 7, 8,
				1, 7, 8, 9,
				1, 8, 9, 10,
				1, 9, 10, 11,
				1, 10, 11, 12,
				1, 11, 12, 13}),
			mat64.NewVector(10, []float64{20, 26, 32, 38, 44, 50, 56, 62, 68, 74}),
			mat64.NewVector(4, []float64{0.6665, 1.3335, 2.0000, 2.6665}),
		},
	} {

		theta := GradientDescent(test.x, test.y, alpha, tolerance, maxIters)

		if !mat64.EqualApprox(test.result, theta, 0.0001) {
			t.Error("Expected:", test.result)
			t.Error("Actual:  ", theta)
		}
	}
}
Beispiel #7
0
func compareNormal(t *testing.T, want *distmv.Normal, batch *mat64.Dense, weights []float64) {
	dim := want.Dim()
	mu := want.Mean(nil)
	sigma := want.CovarianceMatrix(nil)
	n, _ := batch.Dims()
	if weights == nil {
		weights = make([]float64, n)
		for i := range weights {
			weights[i] = 1
		}
	}
	for i := 0; i < dim; i++ {
		col := mat64.Col(nil, i, batch)
		ev := stat.Mean(col, weights)
		if math.Abs(ev-mu[i]) > 1e-2 {
			t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev)
		}
	}

	cov := stat.CovarianceMatrix(nil, batch, weights)
	if !mat64.EqualApprox(cov, sigma, 1.5e-1) {
		t.Errorf("Covariance matrix mismatch")
	}
}
Beispiel #8
0
func TestMoveCentroids(t *testing.T) {
	Mu := mat.NewDense(2, 3, []float64{
		4, 9, 2,
		3, 1, 4,
	})
	X := mat.NewDense(4, 3, []float64{
		2, 6, 1,
		3, 4, 9,
		9, 8, 2,
		6, 4, 0,
	})
	ExpectedMu := mat.NewDense(2, 3, []float64{
		17 / float64(3), 6, 1,
		3, 4, 9,
	})

	idx := AssignCentroid(X, Mu)
	ResultMu := MoveCentroids(idx, X, Mu)

	if !mat.EqualApprox(ExpectedMu, ResultMu, 1e-7) {
		t.Errorf("Expected \n%v, got\n%v",
			printMatrix(ExpectedMu), printMatrix(ResultMu))
	}
}
Beispiel #9
0
func TestConditionNormal(t *testing.T) {
	// Uncorrelated values shouldn't influence the updated values.
	for _, test := range []struct {
		mu       []float64
		sigma    *mat64.SymDense
		observed []int
		values   []float64

		newMu    []float64
		newSigma *mat64.SymDense
	}{
		{
			mu:       []float64{2, 3},
			sigma:    mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
			observed: []int{0},
			values:   []float64{10},

			newMu:    []float64{3},
			newSigma: mat64.NewSymDense(1, []float64{5}),
		},
		{
			mu:       []float64{2, 3},
			sigma:    mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
			observed: []int{1},
			values:   []float64{10},

			newMu:    []float64{2},
			newSigma: mat64.NewSymDense(1, []float64{2}),
		},
		{
			mu:       []float64{2, 3, 4},
			sigma:    mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
			observed: []int{1},
			values:   []float64{10},

			newMu:    []float64{2, 4},
			newSigma: mat64.NewSymDense(2, []float64{2, 0, 0, 10}),
		},
		{
			mu:       []float64{2, 3, 4},
			sigma:    mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
			observed: []int{0, 1},
			values:   []float64{10, 15},

			newMu:    []float64{4},
			newSigma: mat64.NewSymDense(1, []float64{10}),
		},
		{
			mu:       []float64{2, 3, 4, 5},
			sigma:    mat64.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}),
			observed: []int{0, 1},
			values:   []float64{10, 15},

			newMu:    []float64{4, 5},
			newSigma: mat64.NewSymDense(2, []float64{10, 2, 2, 3}),
		},
	} {
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Fatalf("Bad test, original sigma not positive definite")
		}
		newNormal, ok := normal.ConditionNormal(test.observed, test.values, nil)
		if !ok {
			t.Fatalf("Bad test, update failure")
		}

		if !floats.EqualApprox(test.newMu, newNormal.mu, 1e-12) {
			t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu)
		}

		var sigma mat64.SymDense
		sigma.FromCholesky(&newNormal.chol)
		if !mat64.EqualApprox(test.newSigma, &sigma, 1e-12) {
			t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma)
		}
	}

	// Test bivariate case where the update rule is analytic
	for _, test := range []struct {
		mu    []float64
		std   []float64
		rho   float64
		value float64
	}{
		{
			mu:    []float64{2, 3},
			std:   []float64{3, 5},
			rho:   0.9,
			value: 1000,
		},
		{
			mu:    []float64{2, 3},
			std:   []float64{3, 5},
			rho:   -0.9,
			value: 1000,
		},
	} {
		std := test.std
		rho := test.rho
		sigma := mat64.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]})
		normal, ok := NewNormal(test.mu, sigma, nil)
		if !ok {
			t.Fatalf("Bad test, original sigma not positive definite")
		}
		newNormal, ok := normal.ConditionNormal([]int{1}, []float64{test.value}, nil)
		if !ok {
			t.Fatalf("Bad test, update failed")
		}
		var newSigma mat64.SymDense
		newSigma.FromCholesky(&newNormal.chol)
		trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1])
		if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 {
			t.Errorf("Mean mismatch. Want %v, got %v", trueMean, newNormal.mu[0])
		}
		trueVar := (1 - rho*rho) * std[0] * std[0]
		if math.Abs(trueVar-newSigma.At(0, 0)) > 1e-14 {
			t.Errorf("Std mismatch. Want %v, got %v", trueMean, newNormal.mu[0])
		}
	}

	// Test via sampling.
	for _, test := range []struct {
		mu         []float64
		sigma      *mat64.SymDense
		observed   []int
		unobserved []int
		value      []float64
	}{
		// The indices in unobserved must be in ascending order for this test.
		{
			mu:    []float64{2, 3, 4},
			sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),

			observed:   []int{0},
			unobserved: []int{1, 2},
			value:      []float64{1.9},
		},
		{
			mu:    []float64{2, 3, 4, 5},
			sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),

			observed:   []int{0, 3},
			unobserved: []int{1, 2},
			value:      []float64{1.9, 2.9},
		},
	} {
		totalSamp := 4000000
		var nSamp int
		samples := mat64.NewDense(totalSamp, len(test.mu), nil)
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Errorf("bad test")
		}
		sample := make([]float64, len(test.mu))
		for i := 0; i < totalSamp; i++ {
			normal.Rand(sample)
			isClose := true
			for i, v := range test.observed {
				if math.Abs(sample[v]-test.value[i]) > 1e-1 {
					isClose = false
					break
				}
			}
			if isClose {
				samples.SetRow(nSamp, sample)
				nSamp++
			}
		}

		if nSamp < 100 {
			t.Errorf("bad test, not enough samples")
			continue
		}
		samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat64.Dense)

		// Compute mean and covariance matrix.
		estMean := make([]float64, len(test.mu))
		for i := range estMean {
			estMean[i] = stat.Mean(mat64.Col(nil, i, samples), nil)
		}
		estCov := stat.CovarianceMatrix(nil, samples, nil)

		// Compute update rule.
		newNormal, ok := normal.ConditionNormal(test.observed, test.value, nil)
		if !ok {
			t.Fatalf("Bad test, update failure")
		}

		var subEstMean []float64
		for _, v := range test.unobserved {

			subEstMean = append(subEstMean, estMean[v])
		}
		subEstCov := mat64.NewSymDense(len(test.unobserved), nil)
		for i := 0; i < len(test.unobserved); i++ {
			for j := i; j < len(test.unobserved); j++ {
				subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j]))
			}
		}

		for i, v := range subEstMean {
			if math.Abs(newNormal.mu[i]-v) > 5e-2 {
				t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v)
			}
		}
		var sigma mat64.SymDense
		sigma.FromCholesky(&newNormal.chol)
		if !mat64.EqualApprox(&sigma, subEstCov, 1e-1) {
			t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma)
		}
	}
}
Beispiel #10
0
func testSimplex(t *testing.T, initialBasic []int, c []float64, a mat64.Matrix, b []float64, convergenceTol float64) error {
	primalOpt, primalX, _, errPrimal := simplex(initialBasic, c, a, b, convergenceTol)
	if errPrimal == nil {
		// No error solving the simplex, check that the solution is feasible.
		var bCheck mat64.Vector
		bCheck.MulVec(a, mat64.NewVector(len(primalX), primalX))
		if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(b), b), 1e-10) {
			t.Errorf("No error in primal but solution infeasible")
		}
	}

	primalInfeasible := errPrimal == ErrInfeasible
	primalUnbounded := errPrimal == ErrUnbounded
	primalBounded := errPrimal == nil
	primalASingular := errPrimal == ErrSingular
	primalZeroRow := errPrimal == ErrZeroRow
	primalZeroCol := errPrimal == ErrZeroColumn

	primalBad := !primalInfeasible && !primalUnbounded && !primalBounded && !primalASingular && !primalZeroRow && !primalZeroCol

	// It's an error if it's not one of the known returned errors. If it's
	// singular the problem is undefined and so the result cannot be compared
	// to the dual.
	if errPrimal == ErrSingular || primalBad {
		if primalBad {
			t.Errorf("non-known error returned: %s", errPrimal)
		}
		return errPrimal
	}

	// Compare the result to the answer found from solving the dual LP.

	// Construct and solve the dual LP.
	// Standard Form:
	//  minimize c^T * x
	//    subject to  A * x = b, x >= 0
	// The dual of this problem is
	//  maximize -b^T * nu
	//   subject to A^T * nu + c >= 0
	// Which is
	//   minimize b^T * nu
	//   subject to -A^T * nu <= c

	negAT := &mat64.Dense{}
	negAT.Clone(a.T())
	negAT.Scale(-1, negAT)
	cNew, aNew, bNew := Convert(b, negAT, c, nil, nil)

	dualOpt, dualX, _, errDual := simplex(nil, cNew, aNew, bNew, convergenceTol)
	if errDual == nil {
		// Check that the dual is feasible
		var bCheck mat64.Vector
		bCheck.MulVec(aNew, mat64.NewVector(len(dualX), dualX))
		if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(bNew), bNew), 1e-10) {
			t.Errorf("No error in dual but solution infeasible")
		}
	}

	// Check about the zero status.
	if errPrimal == ErrZeroRow || errPrimal == ErrZeroColumn {
		return errPrimal
	}

	// If the primal problem is feasible, then the primal and the dual should
	// be the same answer. We have flopped the sign in the dual (minimizing
	// b^T *nu instead of maximizing -b^T*nu), so flip it back.
	if errPrimal == nil {
		if errDual != nil {
			fmt.Println("errDual", errDual)
			panic("here")
			t.Errorf("Primal feasible but dual errored: %s", errDual)
		}
		dualOpt *= -1
		if !floats.EqualWithinAbsOrRel(dualOpt, primalOpt, convergenceTol, convergenceTol) {
			t.Errorf("Primal and dual value mismatch. Primal %v, dual %v.", primalOpt, dualOpt)
		}
	}
	// If the primal problem is unbounded, then the dual should be infeasible.
	if errPrimal == ErrUnbounded && errDual != ErrInfeasible {
		t.Errorf("Primal unbounded but dual not infeasible. ErrDual = %s", errDual)
	}

	// If the dual is unbounded, then the primal should be infeasible.
	if errDual == ErrUnbounded && errPrimal != ErrInfeasible {
		t.Errorf("Dual unbounded but primal not infeasible. ErrDual = %s", errPrimal)
	}

	// If the primal is infeasible, then the dual should be either infeasible
	// or unbounded.
	if errPrimal == ErrInfeasible {
		if errDual != ErrUnbounded && errDual != ErrInfeasible && errDual != ErrZeroColumn {
			t.Errorf("Primal infeasible but dual not infeasible or unbounded: %s", errDual)
		}
	}

	return errPrimal
}
Beispiel #11
0
func TestPrincipalComponents(t *testing.T) {
	for i, test := range []struct {
		data     mat64.Matrix
		weights  []float64
		wantVecs *mat64.Dense
		wantVars []float64
		epsilon  float64
	}{
		// Test results verified using R.
		{
			data: mat64.NewDense(3, 3, []float64{
				1, 2, 3,
				4, 5, 6,
				7, 8, 9,
			}),
			wantVecs: mat64.NewDense(3, 3, []float64{
				0.5773502691896258, 0.8164965809277261, 0,
				0.577350269189626, -0.4082482904638632, -0.7071067811865476,
				0.5773502691896258, -0.4082482904638631, 0.7071067811865475,
			}),
			wantVars: []float64{27, 0, 0},
			epsilon:  1e-12,
		},
		{ // Truncated iris data.
			data: mat64.NewDense(10, 4, []float64{
				5.1, 3.5, 1.4, 0.2,
				4.9, 3.0, 1.4, 0.2,
				4.7, 3.2, 1.3, 0.2,
				4.6, 3.1, 1.5, 0.2,
				5.0, 3.6, 1.4, 0.2,
				5.4, 3.9, 1.7, 0.4,
				4.6, 3.4, 1.4, 0.3,
				5.0, 3.4, 1.5, 0.2,
				4.4, 2.9, 1.4, 0.2,
				4.9, 3.1, 1.5, 0.1,
			}),
			wantVecs: mat64.NewDense(4, 4, []float64{
				-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
				-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
				-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
				-0.11415613655453069, -0.2714141920887426, 0.35664028439226514, -0.8866286823515034,
			}),
			wantVars: []float64{0.1665786313282786, 0.02065509475412993, 0.007944620317765855, 0.0019327647109368329},
			epsilon:  1e-12,
		},
		{ // Truncated iris data transposed to check for operation on fat input.
			data: mat64.NewDense(10, 4, []float64{
				5.1, 3.5, 1.4, 0.2,
				4.9, 3.0, 1.4, 0.2,
				4.7, 3.2, 1.3, 0.2,
				4.6, 3.1, 1.5, 0.2,
				5.0, 3.6, 1.4, 0.2,
				5.4, 3.9, 1.7, 0.4,
				4.6, 3.4, 1.4, 0.3,
				5.0, 3.4, 1.5, 0.2,
				4.4, 2.9, 1.4, 0.2,
				4.9, 3.1, 1.5, 0.1,
			}).T(),
			wantVecs: mat64.NewDense(10, 4, []float64{
				-0.3366602459946619, -0.1373634006401213, 0.3465102523547623, -0.10290179303893479,
				-0.31381852053861975, 0.5197145790632827, 0.5567296129086686, -0.15923062170153618,
				-0.30857197637565165, -0.07670930360819002, 0.36159923003337235, 0.3342301027853355,
				-0.29527124351656137, 0.16885455995353074, -0.5056204762881208, 0.32580913261444344,
				-0.3327611073694004, -0.39365834489416474, 0.04900050959307464, 0.46812879383236555,
				-0.34445484362044815, -0.2985206914561878, -0.1009714701361799, -0.16803618186050803,
				-0.2986246350957691, -0.4222037823717799, -0.11838613462182519, -0.580283530375069,
				-0.325911246223126, 0.024366468758217238, -0.12082035131864265, 0.16756027181337868,
				-0.2814284432361538, 0.240812316260054, -0.24061437569068145, -0.365034616264623,
				-0.31906138507685167, 0.4423912824105986, -0.2906412122303604, 0.027551046870337714,
			}),
			wantVars: []float64{41.8851906634233, 0.07762619213464989, 0.010516477775373585, 0},
			epsilon:  1e-12,
		},
		{ // Truncated iris data unitary weights.
			data: mat64.NewDense(10, 4, []float64{
				5.1, 3.5, 1.4, 0.2,
				4.9, 3.0, 1.4, 0.2,
				4.7, 3.2, 1.3, 0.2,
				4.6, 3.1, 1.5, 0.2,
				5.0, 3.6, 1.4, 0.2,
				5.4, 3.9, 1.7, 0.4,
				4.6, 3.4, 1.4, 0.3,
				5.0, 3.4, 1.5, 0.2,
				4.4, 2.9, 1.4, 0.2,
				4.9, 3.1, 1.5, 0.1,
			}),
			weights: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
			wantVecs: mat64.NewDense(4, 4, []float64{
				-0.6681110197952722, 0.7064764857539533, -0.14026590216895132, -0.18666578956412125,
				-0.7166344774801547, -0.6427036135482664, -0.135650285905254, 0.23444848208629923,
				-0.164411275166307, 0.11898477441068218, 0.9136367900709548, 0.35224901970831746,
				-0.11415613655453069, -0.2714141920887426, 0.35664028439226514, -0.8866286823515034,
			}),
			wantVars: []float64{0.1665786313282786, 0.02065509475412993, 0.007944620317765855, 0.0019327647109368329},
			epsilon:  1e-12,
		},
		{ // Truncated iris data non-unitary weights.
			data: mat64.NewDense(10, 4, []float64{
				5.1, 3.5, 1.4, 0.2,
				4.9, 3.0, 1.4, 0.2,
				4.7, 3.2, 1.3, 0.2,
				4.6, 3.1, 1.5, 0.2,
				5.0, 3.6, 1.4, 0.2,
				5.4, 3.9, 1.7, 0.4,
				4.6, 3.4, 1.4, 0.3,
				5.0, 3.4, 1.5, 0.2,
				4.4, 2.9, 1.4, 0.2,
				4.9, 3.1, 1.5, 0.1,
			}),
			weights: []float64{2, 3, 1, 1, 1, 1, 1, 1, 1, 2},
			wantVecs: mat64.NewDense(4, 4, []float64{
				-0.618936145422414, 0.763069301531647, 0.124857741232537, 0.138035623677211,
				-0.763958271606519, -0.603881770702898, 0.118267155321333, -0.194184052457746,
				-0.143552119754944, 0.090014599564871, -0.942209377020044, -0.289018426115945,
				-0.112599271966947, -0.212012782487076, -0.287515067921680, 0.927203898682805,
			}),
			wantVars: []float64{0.129621985550623, 0.022417487771598, 0.006454461065715, 0.002495076601075},
			epsilon:  1e-12,
		},
	} {
		vecs, vars, ok := PrincipalComponents(test.data, test.weights)
		if !ok {
			t.Errorf("unexpected SVD failure for test %d", i)
			continue
		}
		if !mat64.EqualApprox(vecs, test.wantVecs, test.epsilon) {
			t.Errorf("%d: unexpected PCA result got:\n%v\nwant:\n%v",
				i, mat64.Formatted(vecs), mat64.Formatted(test.wantVecs))
		}
		if !approxEqual(vars, test.wantVars, test.epsilon) {
			t.Errorf("%d: unexpected variance result got:%v, want:%v", i, vars, test.wantVars)
		}
	}
}
Beispiel #12
0
func TestCorrCov(t *testing.T) {
	// test both Cov2Corr and Cov2Corr
	for i, test := range []struct {
		data    *mat64.Dense
		weights []float64
	}{
		{
			data: mat64.NewDense(3, 3, []float64{
				1, 2, 3,
				3, 4, 5,
				5, 6, 7,
			}),
			weights: nil,
		},
		{
			data: mat64.NewDense(5, 2, []float64{
				-2, -4,
				-1, 2,
				0, 0,
				1, -2,
				2, 4,
			}),
			weights: nil,
		}, {
			data: mat64.NewDense(3, 2, []float64{
				1, 1,
				2, 4,
				3, 9,
			}),
			weights: []float64{
				1,
				1.5,
				1,
			},
		},
	} {
		corr := CorrelationMatrix(nil, test.data, test.weights)
		cov := CovarianceMatrix(nil, test.data, test.weights)

		r, _ := cov.Dims()

		// Get the diagonal elements from cov to determine the sigmas.
		sigmas := make([]float64, r)
		for i := range sigmas {
			sigmas[i] = math.Sqrt(cov.At(i, i))
		}

		covFromCorr := mat64.DenseCopyOf(corr)
		corrToCov(covFromCorr, sigmas)
		corrFromCov := mat64.DenseCopyOf(cov)
		covToCorr(corrFromCov)

		if !mat64.EqualApprox(corr, corrFromCov, 1e-14) {
			t.Errorf("%d: corrToCov did not match direct Correlation calculation.  Want: %v, got: %v. ", i, corr, corrFromCov)
		}
		if !mat64.EqualApprox(cov, covFromCorr, 1e-14) {
			t.Errorf("%d: covToCorr did not match direct Covariance calculation.  Want: %v, got: %v. ", i, cov, covFromCorr)
		}

		if !Panics(func() { corrToCov(mat64.NewDense(2, 2, nil), []float64{}) }) {
			t.Errorf("CorrelationMatrix did not panic with sigma size mismatch")
		}
	}
}