func RegularizerTest(t *testing.T, r Regularizer, name string, parameters []float64, trueLoss float64, trueDeriv []float64) { // Test that Loss works loss := r.Loss(parameters) if math.Abs(loss-trueLoss) > 1e-14 { t.Errorf("Loss doesn't match for case %v. Expected: %v, Found: %v", name, trueLoss, loss) } // Test that LossDeriv works derivative := make([]float64, len(trueDeriv)) lossDeriv := r.LossDeriv(parameters, derivative) if math.Abs(lossDeriv-trueLoss) > 1e-14 { t.Errorf("Loss doesn't match from LossDeriv for case %v. Expected: %v, Found: %v", name, trueLoss, lossDeriv) } if !floats.EqualApprox(trueDeriv, derivative, 1e-14) { t.Errorf("Derivative doesn't match from LossDeriv for case %v", name) } for i := range derivative { derivative[i] = float64(i) } lossAddDeriv := r.LossAddDeriv(parameters, derivative) if math.Abs(lossAddDeriv-trueLoss) > 1e-14 { t.Errorf("Loss doesn't match from LossAddDeriv for case %v. Expected: %v, Found: %v", name, trueLoss, lossAddDeriv) } for i := range derivative { derivative[i] -= float64(i) } if !floats.EqualApprox(trueDeriv, derivative, 1e-14) { t.Errorf("Derivative doesn't match from LossAddDeriv for case %v", name) } }
func (s *S) TestVectorMul(c *check.C) { for i, test := range []struct { m int n int }{ { m: 10, n: 5, }, { m: 5, n: 5, }, { m: 5, n: 10, }, } { vData := make([]float64, test.n) for i := range vData { vData[i] = rand.Float64() } vDataCopy := make([]float64, test.n) copy(vDataCopy, vData) v := NewVector(test.n, vData) aData := make([]float64, test.n*test.m) for i := range aData { aData[i] = rand.Float64() } a := NewDense(test.m, test.n, aData) var v2 Vector v2.MulVec(a, false, v) var v2M Dense v2M.Mul(a, v) same := floats.EqualApprox(v2.mat.Data, v2M.mat.Data, 1e-14) c.Check(same, check.Equals, true, check.Commentf("Test %d", i)) var aT Dense aT.TCopy(a) v2.MulVec(&aT, true, v) same = floats.EqualApprox(v2.mat.Data, v2M.mat.Data, 1e-14) c.Check(same, check.Equals, true, check.Commentf("Test %d", i)) /* v.MulVec(&aT, true, v) same = floats.EqualApprox(v.mat.Data, v2M.mat.Data, 1e-14) c.Check(same, check.Equals, true, check.Commentf("Test %d", i)) */ } }
func TestLogSquared(t *testing.T) { prediction := []float64{1, -2, 3} truth := []float64{1.1, -2.2, 2.7} trueloss := (math.Log(.1*.1+1) + math.Log(.2*.2+1) + math.Log(.3*.3+1)) / 3 derivative := []float64{0, 0, 0} sq := LogSquared{} loss := sq.Loss(prediction, truth) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from Loss(). Expected %v, Found: %v", trueloss, loss) } loss = sq.LossDeriv(prediction, truth, derivative) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from LossDeriv()") } derivative, fdDerivative := finiteDifferenceLosser(sq, prediction, truth) if !floats.EqualApprox(derivative, fdDerivative, FDTol) { t.Errorf("Derivative doesn't match. \n deriv: %v \n fdDeriv: %v ", derivative, fdDerivative) } err := common.InterfaceTestMarshalAndUnmarshal(sq) if err != nil { t.Errorf("Error marshaling and unmarshaling") } }
func TestRelativeLog(t *testing.T) { tol := 1e-2 prediction := []float64{1, -2, 3} truth := []float64{1.1, -2.2, 2.7} trueloss := ((.1/(1.1+tol))*(.1/(1.1+tol)) + (.2/(2.2+tol))*(.2/(2.2+tol)) + (.3/(2.7+tol))*(.3/(2.7+tol))) / 3 trueloss = math.Log(trueloss + 1) derivative := []float64{0, 0, 0} sq := RelativeLog(tol) loss := sq.Loss(prediction, truth) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from Loss(). Expected %v, Found: %v", trueloss, loss) } loss = sq.LossDeriv(prediction, truth, derivative) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from LossDeriv()") } derivative, fdDerivative := finiteDifferenceLosser(sq, prediction, truth) if !floats.EqualApprox(derivative, fdDerivative, FDTol) { t.Errorf("Derivative doesn't match. \n deriv: %v \n fdDeriv: %v ", derivative, fdDerivative) } err := common.InterfaceTestMarshalAndUnmarshal(sq) if err != nil { t.Errorf("Error marshaling and unmarshaling: " + err.Error()) } }
func DrsclTest(t *testing.T, impl Drscler) { for _, test := range []struct { x []float64 a float64 }{ { x: []float64{1, 2, 3, 4, 5}, a: 4, }, { x: []float64{1, 2, 3, 4, 5}, a: math.MaxFloat64, }, { x: []float64{1, 2, 3, 4, 5}, a: 1e-307, }, } { xcopy := make([]float64, len(test.x)) copy(xcopy, test.x) // Cannot test the scaling directly because of floating point scaling issues // (the purpose of Drscl). Instead, check that scaling and scaling back // yeilds approximately x. If overflow or underflow occurs then the scaling // won't match. impl.Drscl(len(test.x), test.a, xcopy, 1) if floats.Equal(xcopy, test.x) { t.Errorf("x unchanged during call to drscl. a = %v, x = %v.", test.a, test.x) } impl.Drscl(len(test.x), 1/test.a, xcopy, 1) if !floats.EqualApprox(xcopy, test.x, 1e-14) { t.Errorf("x not equal after scaling and unscaling. a = %v, x = %v.", test.a, test.x) } } }
func testDerivParam(t *testing.T, d derivParamTester) { // Tests that the derivative matches for a number of different quantiles // along the distribution. nTest := 10 quantiles := make([]float64, nTest) floats.Span(quantiles, 0.1, 0.9) deriv := make([]float64, d.NumParameters()) fdDeriv := make([]float64, d.NumParameters()) initParams := d.parameters(nil) init := make([]float64, d.NumParameters()) for i, v := range initParams { init[i] = v.Value } for _, v := range quantiles { d.setParameters(initParams) x := d.Quantile(v) d.DLogProbDParam(x, deriv) f := func(p []float64) float64 { params := d.parameters(nil) for i, v := range p { params[i].Value = v } d.setParameters(params) return d.LogProb(x) } fd.Gradient(fdDeriv, f, init, nil) if !floats.EqualApprox(deriv, fdDeriv, 1e-6) { t.Fatal("Derivative mismatch. Want", fdDeriv, ", got", deriv, ".") } } }
func TestPrivatePredictsMatch(t *testing.T) { for i, test := range netIniters { for j := 0; j < nRandSamp; j++ { n := testNets[i] input := make([]float64, test.inputDim) floats.Fill(rand.NormFloat64, input) outputSimple := make([]float64, test.outputDim) floats.Fill(rand.NormFloat64, outputSimple) outputCache := make([]float64, test.outputDim) floats.Fill(rand.NormFloat64, outputCache) // predict using uncached method tmp1, tmp2 := newPredictMemory(n.neurons) predict(input, n.neurons, n.parameters, tmp1, tmp2, outputSimple) // predict using cached method combinations := newPerNeuronMemory(n.neurons) outputs := newPerNeuronMemory(n.neurons) cachePredict(input, n.neurons, n.parameters, combinations, outputs, outputCache) if !floats.EqualApprox(outputSimple, outputCache, 1e-14) { t.Errorf("test %v: output mismatch between simple and cached predict. Simple: %v, Cached: %v", test.name, outputSimple, outputCache) break } } } }
func DspmvTest(t *testing.T, blasser Dspmver) { for i, test := range []struct { ul blas.Uplo n int a [][]float64 x []float64 y []float64 alpha float64 beta float64 ans []float64 }{ { ul: blas.Upper, n: 3, a: [][]float64{ {5, 6, 7}, {0, 8, 10}, {0, 0, 13}, }, x: []float64{3, 4, 5}, y: []float64{6, 7, 8}, alpha: 2.1, beta: -3, ans: []float64{137.4, 189, 240.6}, }, { ul: blas.Lower, n: 3, a: [][]float64{ {5, 0, 0}, {6, 8, 0}, {7, 10, 13}, }, x: []float64{3, 4, 5}, y: []float64{6, 7, 8}, alpha: 2.1, beta: -3, ans: []float64{137.4, 189, 240.6}, }, } { incTest := func(incX, incY, extra int) { x := makeIncremented(test.x, incX, extra) y := makeIncremented(test.y, incY, extra) aFlat := flattenTriangular(test.a, test.ul) ans := makeIncremented(test.ans, incY, extra) blasser.Dspmv(test.ul, test.n, test.alpha, aFlat, x, incX, test.beta, y, incY) if !floats.EqualApprox(ans, y, 1e-14) { t.Errorf("Case %v, incX=%v, incY=%v: Want %v, got %v.", i, incX, incY, ans, y) } } incTest(1, 1, 0) incTest(2, 3, 0) incTest(3, 2, 0) incTest(-3, 2, 0) incTest(-2, 4, 0) incTest(2, -1, 0) incTest(-3, -4, 3) } }
func Dlasv2Test(t *testing.T, impl Dlasv2er) { rnd := rand.New(rand.NewSource(1)) for i := 0; i < 100; i++ { f := rnd.NormFloat64() g := rnd.NormFloat64() h := rnd.NormFloat64() ssmin, ssmax, snr, csr, snl, csl := impl.Dlasv2(f, g, h) // tmp = // [ csl snl] [f g] // [-snl csl] [0 h] tmp11 := csl * f tmp12 := csl*g + snl*h tmp21 := -snl * f tmp22 := -snl*g + csl*h // lhs = // [tmp11 tmp12] [csr -snr] // [tmp21 tmp22] [snr csr] ans11 := tmp11*csr + tmp12*snr ans12 := tmp11*-snr + tmp12*csr ans21 := tmp21*csr + tmp22*snr ans22 := tmp21*-snr + tmp22*csr lhs := []float64{ans11, ans12, ans21, ans22} rhs := []float64{ssmax, 0, 0, ssmin} if !floats.EqualApprox(rhs, lhs, 1e-12) { t.Errorf("SVD mismatch. f = %v, g = %v, h = %v.\nLHS: %v\nRHS: %v", f, g, h, lhs, rhs) } } }
func testDpotf2(t *testing.T, impl Dpotf2er, testPos bool, a, ans [][]float64, stride int, ul blas.Uplo) { aFlat := flattenTri(a, stride, ul) ansFlat := flattenTri(ans, stride, ul) pos := impl.Dpotf2(ul, len(a[0]), aFlat, stride) if pos != testPos { t.Errorf("Positive definite mismatch: Want %v, Got %v", testPos, pos) return } if testPos && !floats.EqualApprox(ansFlat, aFlat, 1e-14) { t.Errorf("Result mismatch: Want %v, Got %v", ansFlat, aFlat) } }
func TestNormRand(t *testing.T) { for _, test := range []struct { mean []float64 cov []float64 }{ { mean: []float64{0, 0}, cov: []float64{ 1, 0, 0, 1, }, }, { mean: []float64{0, 0}, cov: []float64{ 1, 0.9, 0.9, 1, }, }, { mean: []float64{6, 7}, cov: []float64{ 5, 0.9, 0.9, 2, }, }, } { dim := len(test.mean) cov := mat64.NewSymDense(dim, test.cov) n, ok := NewNormal(test.mean, cov, nil) if !ok { t.Errorf("bad covariance matrix") } nSamples := 1000000 samps := mat64.NewDense(nSamples, dim, nil) for i := 0; i < nSamples; i++ { n.Rand(samps.RawRowView(i)) } estMean := make([]float64, dim) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil) } if !floats.EqualApprox(estMean, test.mean, 1e-2) { t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean) } estCov := stat.CovarianceMatrix(nil, samps, nil) if !mat64.EqualApprox(estCov, cov, 1e-2) { t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov) } } }
func testLinear(t *testing.T, kind linearTest) { u := &Linear{} data := flatten(kind.data) err := u.SetScale(data) if err != nil { if kind.eqDim != true { t.Errorf("Error where there shouldn't be for case " + kind.name + ": " + err.Error()) } } if !floats.EqualApprox(u.Min, kind.min, 1e-14) { t.Errorf("Min doesn't match for case " + kind.name) } if !floats.EqualApprox(u.Max, kind.max, 1e-14) { t.Errorf("Max doesn't match for case " + kind.name) } scaledData := flatten(kind.scaledData) testScaling(t, u, data, scaledData, kind.name) u2 := &Linear{} testGob(u, u2, t) }
func denseEqualApprox(a *Dense, acomp matComp, tol float64) bool { ar2, ac2 := a.Dims() if ar2 != acomp.r { return false } if ac2 != acomp.c { return false } if !floats.EqualApprox(a.mat.Data, acomp.data, tol) { return false } return true }
func testNormal(t *testing.T, kind normalTest) { u := &Normal{} data := flatten(kind.data) err := u.SetScale(data) if err != nil { if kind.eqDim != true { t.Errorf("Error where there shouldn't be for case " + kind.name + ": " + err.Error()) } } if !floats.EqualApprox(u.Mu, kind.mu, 1e-14) { t.Errorf("Mu doesn't match for case "+kind.name+". Expected: %v, Found: %v", kind.mu, u.Mu) } if !floats.EqualApprox(u.Sigma, kind.sigma, 1e-14) { t.Errorf("Sigma doesn't match for case "+kind.name+". Expected: %v, Found: %v", kind.sigma, u.Sigma) } scaledData := flatten(kind.scaledData) testScaling(t, u, data, scaledData, kind.name) u2 := &Normal{} testGob(u, u2, t) }
func DorgqlTest(t *testing.T, impl Dorgqler) { rnd := rand.New(rand.NewSource(1)) for _, test := range []struct { m, n, k, lda int }{ {5, 4, 3, 0}, {100, 100, 100, 0}, {200, 100, 50, 0}, {200, 200, 50, 0}, } { m := test.m n := test.n k := test.k lda := test.lda if lda == 0 { lda = n } a := make([]float64, m*lda) for i := range a { a[i] = rnd.NormFloat64() } tau := nanSlice(min(m, n)) work := nanSlice(max(m, n)) impl.Dgeql2(m, n, a, lda, tau, work) aCopy := make([]float64, len(a)) copy(aCopy, a) impl.Dorg2l(m, n, k, a, lda, tau, work) ans := make([]float64, len(a)) copy(ans, a) impl.Dorgql(m, n, k, a, lda, tau, work, -1) work = make([]float64, int(work[0])) copy(a, aCopy) impl.Dorgql(m, n, k, a, lda, tau, work, len(work)) if !floats.EqualApprox(a, ans, 1e-8) { t.Errorf("Answer mismatch. m = %v, n = %v, k = %v", m, n, k) } } }
// TestDeriv uses finite difference to test that the prediction from Deriv // is correct, and tests that computing the loss in parallel works properly // Only does finite difference for the first nTest to save time func TestDeriv(t *testing.T, trainable DerivTester, inputs, trueOutputs common.RowMatrix, name string) { // Set the parameters to something random trainable.RandomizeParameters() // Compute the loss and derivative losser := loss.SquaredDistance{} regularizer := regularize.TwoNorm{} batchGrad := train.NewBatchGradBased(trainable, true, inputs, trueOutputs, losser, regularizer) derivative := make([]float64, trainable.NumParameters()) parameters := trainable.Parameters(nil) // Don't need to check loss, because if predict is right and losser is right then loss must be correct _ = batchGrad.ObjGrad(parameters, derivative) fdDerivative := make([]float64, trainable.NumParameters()) wg := &sync.WaitGroup{} wg.Add(trainable.NumParameters()) for i := 0; i < trainable.NumParameters(); i++ { go func(i int) { newParameters := make([]float64, trainable.NumParameters()) tmpDerivative := make([]float64, trainable.NumParameters()) copy(newParameters, parameters) newParameters[i] += fdStep loss1 := batchGrad.ObjGrad(newParameters, tmpDerivative) newParameters[i] -= 2 * fdStep loss2 := batchGrad.ObjGrad(newParameters, tmpDerivative) newParameters[i] += fdStep fdDerivative[i] = (loss1 - loss2) / (2 * fdStep) wg.Done() }(i) } wg.Wait() if !floats.EqualApprox(derivative, fdDerivative, 1e-6) { t.Errorf("%v: deriv doesn't match: Finite Difference: %v, Analytic: %v", name, fdDerivative, derivative) } }
func TestPredictFeaturized(t *testing.T) { for _, test := range []struct { z []float64 featureWeights [][]float64 output []float64 Name string }{ { Name: "General", z: []float64{1, 2, 3}, featureWeights: [][]float64{ {3, 4}, {1, 2}, {0.5, 0.4}, }, output: []float64{6.5, 9.2}, }, } { zCopy := make([]float64, len(test.z)) copy(zCopy, test.z) fwMat := flatten(test.featureWeights) fwMatCopy := &mat64.Dense{} fwMatCopy.Clone(fwMat) output := make([]float64, len(test.output)) predictFeaturized(zCopy, fwMat, output) // Test that z wasn't changed if !floats.Equal(test.z, zCopy) { t.Errorf("z changed during call") } if !floats.EqualApprox(output, test.output, 1e-14) { t.Errorf("output doesn't match for test %v. Expected %v, found %v", test.Name, test.output, output) } } }
func TestManhattanDistance(t *testing.T) { prediction := []float64{1, 2, 3} truth := []float64{1.1, 2.2, 2.7} trueloss := (.1 + .2 + .3) / 3 derivative := []float64{0, 0, 0} sq := ManhattanDistance{} loss := sq.Loss(prediction, truth) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from Loss()") } loss = sq.LossDeriv(prediction, truth, derivative) if math.Abs(loss-trueloss) > TOL { t.Errorf("loss doesn't match from LossDeriv()") } derivative, fdDerivative := finiteDifferenceLosser(sq, prediction, truth) if !floats.EqualApprox(derivative, fdDerivative, FDTol) { t.Errorf("Derivative doesn't match. \n deriv: %v \n fdDeriv: %v ", derivative, fdDerivative) } err := common.InterfaceTestMarshalAndUnmarshal(sq) if err != nil { t.Errorf("Error marshaling and unmarshaling") } truth = []float64{1, 2, 3} loss = sq.LossDeriv(prediction, truth, derivative) if loss != 0 { t.Errorf("Non-zero loss for equal pred and truth") } for _, val := range derivative { if val != 0 { t.Errorf("Non-zero derivative for equal pred and truth") } } }
func DormbrTest(t *testing.T, impl Dormbrer) { rnd := rand.New(rand.NewSource(1)) bi := blas64.Implementation() for _, vect := range []lapack.DecompUpdate{lapack.ApplyQ, lapack.ApplyP} { for _, side := range []blas.Side{blas.Left, blas.Right} { for _, trans := range []blas.Transpose{blas.NoTrans, blas.Trans} { for _, test := range []struct { m, n, k, lda, ldc int }{ {3, 4, 5, 0, 0}, {3, 5, 4, 0, 0}, {4, 3, 5, 0, 0}, {4, 5, 3, 0, 0}, {5, 3, 4, 0, 0}, {5, 4, 3, 0, 0}, {3, 4, 5, 10, 12}, {3, 5, 4, 10, 12}, {4, 3, 5, 10, 12}, {4, 5, 3, 10, 12}, {5, 3, 4, 10, 12}, {5, 4, 3, 10, 12}, } { m := test.m n := test.n k := test.k ldc := test.ldc if ldc == 0 { ldc = n } nq := n if side == blas.Left { nq = m } // Compute a decomposition. var ma, na int var a []float64 if vect == lapack.ApplyQ { ma = nq na = k } else { ma = k na = nq } lda := test.lda if lda == 0 { lda = na } a = make([]float64, ma*lda) for i := range a { a[i] = rnd.NormFloat64() } nTau := min(nq, k) tauP := make([]float64, nTau) tauQ := make([]float64, nTau) d := make([]float64, nTau) e := make([]float64, nTau) lwork := -1 work := make([]float64, 1) impl.Dgebrd(ma, na, a, lda, d, e, tauQ, tauP, work, lwork) work = make([]float64, int(work[0])) lwork = len(work) impl.Dgebrd(ma, na, a, lda, d, e, tauQ, tauP, work, lwork) // Apply and compare update. c := make([]float64, m*ldc) for i := range c { c[i] = rnd.NormFloat64() } cCopy := make([]float64, len(c)) copy(cCopy, c) if vect == lapack.ApplyQ { impl.Dormbr(vect, side, trans, m, n, k, a, lda, tauQ, c, ldc, work, lwork) } else { impl.Dormbr(vect, side, trans, m, n, k, a, lda, tauP, c, ldc, work, lwork) } // Check that the multiplication was correct. cOrig := blas64.General{ Rows: m, Cols: n, Stride: ldc, Data: make([]float64, len(cCopy)), } copy(cOrig.Data, cCopy) cAns := blas64.General{ Rows: m, Cols: n, Stride: ldc, Data: make([]float64, len(cCopy)), } copy(cAns.Data, cCopy) nb := min(ma, na) var mulMat blas64.General if vect == lapack.ApplyQ { mulMat = constructQPBidiagonal(lapack.ApplyQ, ma, na, nb, a, lda, tauQ) } else { mulMat = constructQPBidiagonal(lapack.ApplyP, ma, na, nb, a, lda, tauP) } mulTrans := trans if side == blas.Left { bi.Dgemm(mulTrans, blas.NoTrans, m, n, m, 1, mulMat.Data, mulMat.Stride, cOrig.Data, cOrig.Stride, 0, cAns.Data, cAns.Stride) } else { bi.Dgemm(blas.NoTrans, mulTrans, m, n, n, 1, cOrig.Data, cOrig.Stride, mulMat.Data, mulMat.Stride, 0, cAns.Data, cAns.Stride) } if !floats.EqualApprox(cAns.Data, c, 1e-8) { isApplyQ := vect == lapack.ApplyQ isLeft := side == blas.Left isTrans := trans == blas.Trans t.Errorf("C mismatch. isApplyQ: %v, isLeft: %v, isTrans: %v, m = %v, n = %v, k = %v, lda = %v, ldc = %v", isApplyQ, isLeft, isTrans, m, n, k, lda, ldc) } } } } } }
// testFunction checks that the function can evaluate itself (and its gradient) // correctly. func testFunction(f function, ftests []funcTest, t *testing.T) { // Make a copy of tests because we may append to the slice. tests := make([]funcTest, len(ftests)) copy(tests, ftests) // Get information about the function. fMinima, isMinimumer := f.(minimumer) fGradient, isGradient := f.(gradient) // If the function is a Minimumer, append its minima to the tests. if isMinimumer { for _, minimum := range fMinima.Minima() { // Allocate gradient only if the function can evaluate it. var grad []float64 if isGradient { grad = make([]float64, len(minimum.X)) } tests = append(tests, funcTest{ X: minimum.X, F: minimum.F, Gradient: grad, }) } } for i, test := range tests { F := f.Func(test.X) // Check that the function value is as expected. if math.Abs(F-test.F) > defaultTol { t.Errorf("Test #%d: function value given by Func is incorrect. Want: %v, Got: %v", i, test.F, F) } if test.Gradient == nil { continue } // Evaluate the finite difference gradient. fdGrad := fd.Gradient(nil, f.Func, test.X, nil) // Check that the finite difference and expected gradients match. if !floats.EqualApprox(fdGrad, test.Gradient, defaultFDGradTol) { dist := floats.Distance(fdGrad, test.Gradient, math.Inf(1)) t.Errorf("Test #%d: numerical and expected gradients do not match. |fdGrad - WantGrad|_∞ = %v", i, dist) } // If the function is a Gradient, check that it computes the gradient correctly. if isGradient { grad := make([]float64, len(test.Gradient)) fGradient.Grad(grad, test.X) if !floats.EqualApprox(grad, test.Gradient, defaultGradTol) { dist := floats.Distance(grad, test.Gradient, math.Inf(1)) t.Errorf("Test #%d: gradient given by Grad is incorrect. |grad - WantGrad|_∞ = %v", i, dist) } } } }
func DsymmTest(t *testing.T, blasser Dsymmer) { for i, test := range []struct { m int n int side blas.Side ul blas.Uplo a [][]float64 b [][]float64 c [][]float64 alpha float64 beta float64 ans [][]float64 }{ { side: blas.Left, ul: blas.Upper, m: 3, n: 4, a: [][]float64{ {2, 3, 4}, {0, 6, 7}, {0, 0, 10}, }, b: [][]float64{ {2, 3, 4, 8}, {5, 6, 7, 15}, {8, 9, 10, 20}, }, c: [][]float64{ {8, 12, 2, 1}, {9, 12, 9, 9}, {12, 1, -1, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {126, 156, 144, 285}, {211, 252, 275, 535}, {282, 291, 327, 689}, }, }, { side: blas.Left, ul: blas.Upper, m: 4, n: 3, a: [][]float64{ {2, 3, 4, 8}, {0, 6, 7, 9}, {0, 0, 10, 10}, {0, 0, 0, 11}, }, b: [][]float64{ {2, 3, 4}, {5, 6, 7}, {8, 9, 10}, {2, 1, 1}, }, c: [][]float64{ {8, 12, 2}, {9, 12, 9}, {12, 1, -1}, {1, 9, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {158, 172, 160}, {247, 270, 293}, {322, 311, 347}, {329, 385, 427}, }, }, { side: blas.Left, ul: blas.Lower, m: 3, n: 4, a: [][]float64{ {2, 0, 0}, {3, 6, 0}, {4, 7, 10}, }, b: [][]float64{ {2, 3, 4, 8}, {5, 6, 7, 15}, {8, 9, 10, 20}, }, c: [][]float64{ {8, 12, 2, 1}, {9, 12, 9, 9}, {12, 1, -1, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {126, 156, 144, 285}, {211, 252, 275, 535}, {282, 291, 327, 689}, }, }, { side: blas.Left, ul: blas.Lower, m: 4, n: 3, a: [][]float64{ {2, 0, 0, 0}, {3, 6, 0, 0}, {4, 7, 10, 0}, {8, 9, 10, 11}, }, b: [][]float64{ {2, 3, 4}, {5, 6, 7}, {8, 9, 10}, {2, 1, 1}, }, c: [][]float64{ {8, 12, 2}, {9, 12, 9}, {12, 1, -1}, {1, 9, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {158, 172, 160}, {247, 270, 293}, {322, 311, 347}, {329, 385, 427}, }, }, { side: blas.Right, ul: blas.Upper, m: 3, n: 4, a: [][]float64{ {2, 0, 0, 0}, {3, 6, 0, 0}, {4, 7, 10, 0}, {3, 4, 5, 6}, }, b: [][]float64{ {2, 3, 4, 9}, {5, 6, 7, -3}, {8, 9, 10, -2}, }, c: [][]float64{ {8, 12, 2, 10}, {9, 12, 9, 10}, {12, 1, -1, 10}, }, alpha: 2, beta: 3, ans: [][]float64{ {32, 72, 86, 138}, {47, 108, 167, -6}, {68, 111, 197, 6}, }, }, { side: blas.Right, ul: blas.Upper, m: 4, n: 3, a: [][]float64{ {2, 0, 0}, {3, 6, 0}, {4, 7, 10}, }, b: [][]float64{ {2, 3, 4}, {5, 6, 7}, {8, 9, 10}, {2, 1, 1}, }, c: [][]float64{ {8, 12, 2}, {9, 12, 9}, {12, 1, -1}, {1, 9, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {32, 72, 86}, {47, 108, 167}, {68, 111, 197}, {11, 39, 35}, }, }, { side: blas.Right, ul: blas.Lower, m: 3, n: 4, a: [][]float64{ {2, 0, 0, 0}, {3, 6, 0, 0}, {4, 7, 10, 0}, {3, 4, 5, 6}, }, b: [][]float64{ {2, 3, 4, 2}, {5, 6, 7, 1}, {8, 9, 10, 1}, }, c: [][]float64{ {8, 12, 2, 1}, {9, 12, 9, 9}, {12, 1, -1, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {94, 156, 164, 103}, {145, 244, 301, 187}, {208, 307, 397, 247}, }, }, { side: blas.Right, ul: blas.Lower, m: 4, n: 3, a: [][]float64{ {2, 0, 0}, {3, 6, 0}, {4, 7, 10}, }, b: [][]float64{ {2, 3, 4}, {5, 6, 7}, {8, 9, 10}, {2, 1, 1}, }, c: [][]float64{ {8, 12, 2}, {9, 12, 9}, {12, 1, -1}, {1, 9, 5}, }, alpha: 2, beta: 3, ans: [][]float64{ {82, 140, 144}, {139, 236, 291}, {202, 299, 387}, {25, 65, 65}, }, }, } { aFlat := flatten(test.a) bFlat := flatten(test.b) cFlat := flatten(test.c) ansFlat := flatten(test.ans) blasser.Dsymm(test.side, test.ul, test.m, test.n, test.alpha, aFlat, len(test.a[0]), bFlat, test.n, test.beta, cFlat, test.n) if !floats.EqualApprox(cFlat, ansFlat, 1e-14) { t.Errorf("Case %v: Want %v, got %v.", i, ansFlat, cFlat) } } }
func Dspr2Test(t *testing.T, blasser Dspr2er) { for i, test := range []struct { n int a [][]float64 ul blas.Uplo x []float64 y []float64 alpha float64 ans [][]float64 }{ { n: 3, a: [][]float64{ {7, 2, 4}, {0, 3, 5}, {0, 0, 6}, }, x: []float64{2, 3, 4}, y: []float64{5, 6, 7}, alpha: 2, ul: blas.Upper, ans: [][]float64{ {47, 56, 72}, {0, 75, 95}, {0, 0, 118}, }, }, { n: 3, a: [][]float64{ {7, 0, 0}, {2, 3, 0}, {4, 5, 6}, }, x: []float64{2, 3, 4}, y: []float64{5, 6, 7}, alpha: 2, ul: blas.Lower, ans: [][]float64{ {47, 0, 0}, {56, 75, 0}, {72, 95, 118}, }, }, } { incTest := func(incX, incY, extra int) { aFlat := flattenTriangular(test.a, test.ul) x := makeIncremented(test.x, incX, extra) y := makeIncremented(test.y, incY, extra) blasser.Dspr2(test.ul, test.n, test.alpha, x, incX, y, incY, aFlat) ansFlat := flattenTriangular(test.ans, test.ul) if !floats.EqualApprox(aFlat, ansFlat, 1e-14) { t.Errorf("Case %v, incX = %v, incY = %v. Want %v, got %v.", i, incX, incY, ansFlat, aFlat) } } incTest(1, 1, 0) incTest(-2, 1, 0) incTest(-2, 3, 0) incTest(2, -3, 0) incTest(3, -2, 0) incTest(-3, -4, 0) } }
func DlarfbTest(t *testing.T, impl Dlarfber) { rnd := rand.New(rand.NewSource(1)) for _, store := range []lapack.StoreV{lapack.ColumnWise, lapack.RowWise} { for _, direct := range []lapack.Direct{lapack.Forward, lapack.Backward} { for _, side := range []blas.Side{blas.Left, blas.Right} { for _, trans := range []blas.Transpose{blas.Trans, blas.NoTrans} { for cas, test := range []struct { ma, na, cdim, lda, ldt, ldc int }{ {6, 6, 6, 0, 0, 0}, {6, 8, 10, 0, 0, 0}, {6, 10, 8, 0, 0, 0}, {8, 6, 10, 0, 0, 0}, {8, 10, 6, 0, 0, 0}, {10, 6, 8, 0, 0, 0}, {10, 8, 6, 0, 0, 0}, {6, 6, 6, 12, 15, 30}, {6, 8, 10, 12, 15, 30}, {6, 10, 8, 12, 15, 30}, {8, 6, 10, 12, 15, 30}, {8, 10, 6, 12, 15, 30}, {10, 6, 8, 12, 15, 30}, {10, 8, 6, 12, 15, 30}, {6, 6, 6, 15, 12, 30}, {6, 8, 10, 15, 12, 30}, {6, 10, 8, 15, 12, 30}, {8, 6, 10, 15, 12, 30}, {8, 10, 6, 15, 12, 30}, {10, 6, 8, 15, 12, 30}, {10, 8, 6, 15, 12, 30}, } { // Generate a matrix for QR ma := test.ma na := test.na lda := test.lda if lda == 0 { lda = na } a := make([]float64, ma*lda) for i := 0; i < ma; i++ { for j := 0; j < lda; j++ { a[i*lda+j] = rnd.Float64() } } k := min(ma, na) // H is always ma x ma var m, n, rowsWork int switch { default: panic("not implemented") case side == blas.Left: m = test.ma n = test.cdim rowsWork = n case side == blas.Right: m = test.cdim n = test.ma rowsWork = m } // Use dgeqr2 to find the v vectors tau := make([]float64, na) work := make([]float64, na) impl.Dgeqr2(ma, k, a, lda, tau, work) // Correct the v vectors based on the direct and store vMatTmp := extractVMat(ma, na, a, lda, lapack.Forward, lapack.ColumnWise) vMat := constructVMat(vMatTmp, store, direct) v := vMat.Data ldv := vMat.Stride // Use dlarft to find the t vector ldt := test.ldt if ldt == 0 { ldt = k } tm := make([]float64, k*ldt) impl.Dlarft(direct, store, ma, k, v, ldv, tau, tm, ldt) // Generate c matrix ldc := test.ldc if ldc == 0 { ldc = n } c := make([]float64, m*ldc) for i := 0; i < m; i++ { for j := 0; j < ldc; j++ { c[i*ldc+j] = rnd.Float64() } } cCopy := make([]float64, len(c)) copy(cCopy, c) ldwork := k work = make([]float64, rowsWork*k) // Call Dlarfb with this information impl.Dlarfb(side, trans, direct, store, m, n, k, v, ldv, tm, ldt, c, ldc, work, ldwork) h := constructH(tau, vMat, store, direct) cMat := blas64.General{ Rows: m, Cols: n, Stride: ldc, Data: make([]float64, m*ldc), } copy(cMat.Data, cCopy) ans := blas64.General{ Rows: m, Cols: n, Stride: ldc, Data: make([]float64, m*ldc), } copy(ans.Data, cMat.Data) switch { default: panic("not implemented") case side == blas.Left && trans == blas.NoTrans: blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, h, cMat, 0, ans) case side == blas.Left && trans == blas.Trans: blas64.Gemm(blas.Trans, blas.NoTrans, 1, h, cMat, 0, ans) case side == blas.Right && trans == blas.NoTrans: blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, cMat, h, 0, ans) case side == blas.Right && trans == blas.Trans: blas64.Gemm(blas.NoTrans, blas.Trans, 1, cMat, h, 0, ans) } if !floats.EqualApprox(ans.Data, c, 1e-14) { t.Errorf("Cas %v mismatch. Want %v, got %v.", cas, ans.Data, c) } } } } } } }
func Dgelq2Test(t *testing.T, impl Dgelq2er) { for c, test := range []struct { m, n, lda int }{ {1, 1, 0}, {2, 2, 0}, {3, 2, 0}, {2, 3, 0}, {1, 12, 0}, {2, 6, 0}, {3, 4, 0}, {4, 3, 0}, {6, 2, 0}, {1, 12, 0}, {1, 1, 20}, {2, 2, 20}, {3, 2, 20}, {2, 3, 20}, {1, 12, 20}, {2, 6, 20}, {3, 4, 20}, {4, 3, 20}, {6, 2, 20}, {1, 12, 20}, } { n := test.n m := test.m lda := test.lda if lda == 0 { lda = test.n } k := min(m, n) tau := make([]float64, k) for i := range tau { tau[i] = rand.Float64() } work := make([]float64, m) for i := range work { work[i] = rand.Float64() } a := make([]float64, m*lda) for i := 0; i < m*lda; i++ { a[i] = rand.Float64() } aCopy := make([]float64, len(a)) copy(aCopy, a) impl.Dgelq2(m, n, a, lda, tau, work) Q := constructQ("LQ", m, n, a, lda, tau) // Check that Q is orthonormal for i := 0; i < Q.Rows; i++ { nrm := blas64.Nrm2(Q.Cols, blas64.Vector{Inc: 1, Data: Q.Data[i*Q.Stride:]}) if math.Abs(nrm-1) > 1e-14 { t.Errorf("Q not normal. Norm is %v", nrm) } for j := 0; j < i; j++ { dot := blas64.Dot(Q.Rows, blas64.Vector{Inc: 1, Data: Q.Data[i*Q.Stride:]}, blas64.Vector{Inc: 1, Data: Q.Data[j*Q.Stride:]}, ) if math.Abs(dot) > 1e-14 { t.Errorf("Q not orthogonal. Dot is %v", dot) } } } L := blas64.General{ Rows: m, Cols: n, Stride: n, Data: make([]float64, m*n), } for i := 0; i < m; i++ { for j := 0; j <= min(i, n-1); j++ { L.Data[i*L.Stride+j] = a[i*lda+j] } } ans := blas64.General{ Rows: m, Cols: n, Stride: lda, Data: make([]float64, m*lda), } copy(ans.Data, aCopy) blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, L, Q, 0, ans) if !floats.EqualApprox(aCopy, ans.Data, 1e-14) { t.Errorf("Case %v, LQ mismatch. Want %v, got %v.", c, aCopy, ans.Data) } } }
// checkPLU checks that the PLU factorization contained in factorize matches // the original matrix contained in original. func checkPLU(t *testing.T, ok bool, m, n, lda int, ipiv []int, factorized, original []float64, tol float64, print bool) { var hasZeroDiagonal bool for i := 0; i < min(m, n); i++ { if factorized[i*lda+i] == 0 { hasZeroDiagonal = true break } } if hasZeroDiagonal && ok { t.Error("Has a zero diagonal but returned ok") } if !hasZeroDiagonal && !ok { t.Error("Non-zero diagonal but returned !ok") } // Check that the LU decomposition is correct. mn := min(m, n) l := make([]float64, m*mn) ldl := mn u := make([]float64, mn*n) ldu := n for i := 0; i < m; i++ { for j := 0; j < n; j++ { v := factorized[i*lda+j] switch { case i == j: l[i*ldl+i] = 1 u[i*ldu+i] = v case i > j: l[i*ldl+j] = v case i < j: u[i*ldu+j] = v } } } LU := blas64.General{ Rows: m, Cols: n, Stride: n, Data: make([]float64, m*n), } U := blas64.General{ Rows: mn, Cols: n, Stride: ldu, Data: u, } L := blas64.General{ Rows: m, Cols: mn, Stride: ldl, Data: l, } blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, L, U, 0, LU) p := make([]float64, m*m) ldp := m for i := 0; i < m; i++ { p[i*ldp+i] = 1 } for i := len(ipiv) - 1; i >= 0; i-- { v := ipiv[i] blas64.Swap(m, blas64.Vector{1, p[i*ldp:]}, blas64.Vector{1, p[v*ldp:]}) } P := blas64.General{ Rows: m, Cols: m, Stride: m, Data: p, } aComp := blas64.General{ Rows: m, Cols: n, Stride: lda, Data: make([]float64, m*lda), } copy(aComp.Data, factorized) blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, P, LU, 0, aComp) if !floats.EqualApprox(aComp.Data, original, tol) { if print { t.Errorf("PLU multiplication does not match original matrix.\nWant: %v\nGot: %v", original, aComp.Data) return } t.Error("PLU multiplication does not match original matrix.") } }
// TestPredict tests that predict returns the expected value, and that calling predict in parallel // also works func TestPredictAndBatch(t *testing.T, p Predictor, inputs, trueOutputs common.RowMatrix, name string) { nSamples, inputDim := inputs.Dims() if inputDim != p.InputDim() { panic("input Dim doesn't match predictor input dim") } nOutSamples, outputDim := trueOutputs.Dims() if outputDim != p.OutputDim() { panic("outpuDim doesn't match predictor outputDim") } if nOutSamples != nSamples { panic("inputs and outputs have different number of rows") } // First, test sequentially for i := 0; i < nSamples; i++ { trueOut := make([]float64, outputDim) for j := 0; j < outputDim; j++ { trueOut[j] = trueOutputs.At(i, j) } // Predict with nil input := make([]float64, inputDim) inputCpy := make([]float64, inputDim) for j := 0; j < inputDim; j++ { input[j] = inputs.At(i, j) inputCpy[j] = inputs.At(i, j) } out1, err := p.Predict(input, nil) if err != nil { t.Errorf(name + ": Error predicting with nil output") return } if !floats.Equal(input, inputCpy) { t.Errorf("%v: input changed with nil input for row %v", name, i) break } out2 := make([]float64, outputDim) for j := 0; j < outputDim; j++ { out2[j] = rand.NormFloat64() } _, err = p.Predict(input, out2) if err != nil { t.Errorf("%v: error predicting with non-nil input for row %v", name, i) break } if !floats.Equal(input, inputCpy) { t.Errorf("%v: input changed with non-nil input for row %v", name, i) break } if !floats.Equal(out1, out2) { t.Errorf(name + ": different answers with nil and non-nil predict ") break } if !floats.EqualApprox(out1, trueOut, 1e-14) { t.Errorf("%v: predicted output doesn't match for row %v. Expected %v, found %v", name, i, trueOut, out1) break } } // Check that predict errors with bad sized arguments badOuput := make([]float64, outputDim+1) input := make([]float64, inputDim) for i := 0; i < inputDim; i++ { input[i] = inputs.At(0, i) } output := make([]float64, outputDim) for i := 0; i < outputDim; i++ { output[i] = trueOutputs.At(0, i) } _, err := p.Predict(input, badOuput) if err == nil { t.Errorf("Predict did not throw an error with an output too large") } if outputDim > 1 { badOuput := make([]float64, outputDim-1) _, err := p.Predict(input, badOuput) if err == nil { t.Errorf("Predict did not throw an error with an output too small") } } badInput := make([]float64, inputDim+1) _, err = p.Predict(badInput, output) if err == nil { t.Errorf("Predict did not err when input is too large") } if inputDim > 1 { badInput := make([]float64, inputDim-1) _, err = p.Predict(badInput, output) if err == nil { t.Errorf("Predict did not err when input is too small") } } // Now, test batch // With non-nil inputCpy := &mat64.Dense{} inputCpy.Clone(inputs) predOutput, err := p.PredictBatch(inputs, nil) if err != nil { t.Errorf("Error batch predicting: %v", err) } if !inputCpy.Equals(inputs) { t.Errorf("Inputs changed during call to PredictBatch") } predOutputRows, predOutputCols := predOutput.Dims() if predOutputRows != nSamples || predOutputCols != outputDim { t.Errorf("Dimension mismatch after predictbatch with nil input") } outputs := mat64.NewDense(nSamples, outputDim, nil) _, err = p.PredictBatch(inputs, outputs) pd := predOutput.(*mat64.Dense) if !pd.Equals(outputs) { t.Errorf("Different outputs from predict batch with nil and non-nil") } badInputs := mat64.NewDense(nSamples, inputDim+1, nil) _, err = p.PredictBatch(badInputs, outputs) if err == nil { t.Error("PredictBatch did not err when input dim too large") } badInputs = mat64.NewDense(nSamples+1, inputDim, nil) _, err = p.PredictBatch(badInputs, outputs) if err == nil { t.Errorf("PredictBatch did not err with row mismatch") } badOuputs := mat64.NewDense(nSamples, outputDim+1, nil) _, err = p.PredictBatch(inputs, badOuputs) if err == nil { t.Errorf("PredictBatch did not err with output dim too large") } }
// TestLinearsolveAndDeriv compares the optimal weights found from gradient-based optimization with those found // from computing a linear solve func TestLinearsolveAndDeriv(t *testing.T, linear train.LinearTrainable, inputs, trueOutputs common.RowMatrix, name string) { // Compare with no weights rows, cols := trueOutputs.Dims() predOutLinear := mat64.NewDense(rows, cols, nil) parametersLinearSolve := train.LinearSolve(linear, nil, inputs, trueOutputs, nil, nil) linear.SetParameters(parametersLinearSolve) linear.Predictor().PredictBatch(inputs, predOutLinear) //fmt.Println("Pred out linear", predOutLinear) linear.RandomizeParameters() parameters := linear.Parameters(nil) batch := train.NewBatchGradBased(linear, true, inputs, trueOutputs, nil, loss.SquaredDistance{}, regularize.None{}) problem := batch settings := multivariate.DefaultSettings() settings.GradAbsTol = 1e-11 //settings. = 0 result, err := multivariate.OptimizeGrad(problem, parameters, settings, nil) if err != nil { t.Errorf("Error training: %v", err) } parametersDeriv := result.Loc deriv := make([]float64, linear.NumParameters()) loss1 := batch.ObjGrad(parametersDeriv, deriv) linear.SetParameters(parametersDeriv) predOutDeriv := mat64.NewDense(rows, cols, nil) linear.Predictor().PredictBatch(inputs, predOutDeriv) linear.RandomizeParameters() init2 := linear.Parameters(nil) batch2 := train.NewBatchGradBased(linear, true, inputs, trueOutputs, nil, loss.SquaredDistance{}, regularize.None{}) problem2 := batch2 result2, err := multivariate.OptimizeGrad(problem2, init2, settings, nil) parametersDeriv2 := result2.Loc //fmt.Println("starting deriv2 loss") deriv2 := make([]float64, linear.NumParameters()) loss2 := batch2.ObjGrad(parametersDeriv2, deriv2) //fmt.Println("starting derivlin loss") derivlinear := make([]float64, linear.NumParameters()) lossLin := batch2.ObjGrad(parametersLinearSolve, derivlinear) _ = loss1 _ = loss2 _ = lossLin /* fmt.Println("param deriv 1 =", parametersDeriv) fmt.Println("param deriv2 =", parametersDeriv2) fmt.Println("linear params =", parametersLinearSolve) fmt.Println("deriv1 loss =", loss1) fmt.Println("deriv2 loss =", loss2) fmt.Println("lin loss =", lossLin) fmt.Println("deriv =", deriv) fmt.Println("deriv2 =", deriv2) fmt.Println("linderiv =", derivlinear) //fmt.Println("Pred out deriv", predOutDeriv) */ /* for i := 0; i < rows; i++ { fmt.Println(predOutLinear.RowView(i), predOutBatch.RowView(i)) } */ if !floats.EqualApprox(parametersLinearSolve, parametersDeriv, 1e-8) { t.Errorf("Parameters don't match for gradient based and linear solve.") //for i := range parametersDeriv { // fmt.Printf("index %v: Deriv = %v, linsolve = %v, diff = %v\n", i, parametersDeriv[i], parametersLinearSolve[i], parametersDeriv[i]-parametersLinearSolve[i]) //} } }
func DlarfTest(t *testing.T, impl Dlarfer) { for i, test := range []struct { m, n, ldc int incv, lastv int lastr, lastc int tau float64 }{ { m: 3, n: 2, ldc: 2, incv: 4, lastv: 1, lastr: 2, lastc: 1, tau: 2, }, { m: 2, n: 3, ldc: 3, incv: 4, lastv: 1, lastr: 1, lastc: 2, tau: 2, }, { m: 2, n: 3, ldc: 3, incv: 4, lastv: 1, lastr: 0, lastc: 1, tau: 2, }, { m: 2, n: 3, ldc: 3, incv: 4, lastv: 0, lastr: 0, lastc: 1, tau: 2, }, { m: 10, n: 10, ldc: 10, incv: 4, lastv: 6, lastr: 9, lastc: 8, tau: 2, }, } { // Construct a random matrix. c := make([]float64, test.ldc*test.m) for i := 0; i <= test.lastr; i++ { for j := 0; j <= test.lastc; j++ { c[i*test.ldc+j] = rand.Float64() } } cCopy := make([]float64, len(c)) copy(cCopy, c) cCopy2 := make([]float64, len(c)) copy(cCopy2, c) // Test with side right. sz := max(test.m, test.n) // so v works for both right and left side. v := make([]float64, test.incv*sz+1) // Fill with nonzero entries up until lastv. for i := 0; i <= test.lastv; i++ { v[i*test.incv] = rand.Float64() } // Construct h explicitly to compare. h := make([]float64, test.n*test.n) for i := 0; i < test.n; i++ { h[i*test.n+i] = 1 } hMat := blas64.General{ Rows: test.n, Cols: test.n, Stride: test.n, Data: h, } vVec := blas64.Vector{ Inc: test.incv, Data: v, } blas64.Ger(-test.tau, vVec, vVec, hMat) // Apply multiplication (2nd copy is to avoid aliasing). cMat := blas64.General{ Rows: test.m, Cols: test.n, Stride: test.ldc, Data: cCopy, } cMat2 := blas64.General{ Rows: test.m, Cols: test.n, Stride: test.ldc, Data: cCopy2, } blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, cMat2, hMat, 0, cMat) // cMat now stores the true answer. Compare with the function call. work := make([]float64, sz) impl.Dlarf(blas.Right, test.m, test.n, v, test.incv, test.tau, c, test.ldc, work) if !floats.EqualApprox(c, cMat.Data, 1e-14) { t.Errorf("Dlarf mismatch right, case %v. Want %v, got %v", i, cMat.Data, c) } // Test on the left side. copy(c, cCopy2) copy(cCopy, c) // Construct h. h = make([]float64, test.m*test.m) for i := 0; i < test.m; i++ { h[i*test.m+i] = 1 } hMat = blas64.General{ Rows: test.m, Cols: test.m, Stride: test.m, Data: h, } blas64.Ger(-test.tau, vVec, vVec, hMat) blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, hMat, cMat2, 0, cMat) impl.Dlarf(blas.Left, test.m, test.n, v, test.incv, test.tau, c, test.ldc, work) if !floats.EqualApprox(c, cMat.Data, 1e-14) { t.Errorf("Dlarf mismatch left, case %v. Want %v, got %v", i, cMat.Data, c) } } }
func DgeqrfTest(t *testing.T, impl Dgeqrfer) { for c, test := range []struct { m, n, lda int }{ {10, 5, 0}, {5, 10, 0}, {10, 10, 0}, {300, 5, 0}, {3, 500, 0}, {200, 200, 0}, {300, 200, 0}, {204, 300, 0}, {1, 3000, 0}, {3000, 1, 0}, {10, 5, 20}, {5, 10, 20}, {10, 10, 20}, {300, 5, 400}, {3, 500, 600}, {200, 200, 300}, {300, 200, 300}, {204, 300, 400}, {1, 3000, 4000}, {3000, 1, 4000}, } { m := test.m n := test.n lda := test.lda if lda == 0 { lda = test.n } a := make([]float64, m*lda) for i := 0; i < m; i++ { for j := 0; j < n; j++ { a[i*lda+j] = rand.Float64() } } tau := make([]float64, n) for i := 0; i < n; i++ { tau[i] = rand.Float64() } aCopy := make([]float64, len(a)) copy(aCopy, a) ans := make([]float64, len(a)) copy(ans, a) work := make([]float64, n) // Compute unblocked QR. impl.Dgeqr2(m, n, ans, lda, tau, work) // Compute blocked QR with small work. impl.Dgeqrf(m, n, a, lda, tau, work, len(work)) if !floats.EqualApprox(ans, a, 1e-14) { t.Errorf("Case %v, mismatch small work.", c) } // Try the full length of work. impl.Dgeqrf(m, n, a, lda, tau, work, -1) lwork := int(work[0]) work = make([]float64, lwork) copy(a, aCopy) impl.Dgeqrf(m, n, a, lda, tau, work, lwork) if !floats.EqualApprox(ans, a, 1e-12) { t.Errorf("Case %v, mismatch large work.", c) } // Try a slightly smaller version of work to test blocking. work = work[1:] lwork-- copy(a, aCopy) impl.Dgeqrf(m, n, a, lda, tau, work, lwork) if !floats.EqualApprox(ans, a, 1e-12) { t.Errorf("Case %v, mismatch large work.", c) } } }
func TestGradient(t *testing.T) { for i, test := range []struct { nDim int tol float64 method Method }{ { nDim: 2, tol: 2e-4, method: Forward, }, { nDim: 2, tol: 1e-6, method: Central, }, { nDim: 40, tol: 2e-4, method: Forward, }, { nDim: 40, tol: 1e-6, method: Central, }, } { x := make([]float64, test.nDim) for i := range x { x[i] = rand.Float64() } xcopy := make([]float64, len(x)) copy(xcopy, x) r := Rosenbrock{len(x)} trueGradient := make([]float64, len(x)) r.FDf(x, trueGradient) settings := DefaultSettings() settings.Method = test.method // try with gradient nil gradient := Gradient(nil, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch in serial with nil. Want: %v, Got: %v.", i, trueGradient, gradient) } if !floats.Equal(x, xcopy) { t.Errorf("Case %v: x modified during call to gradient in serial with nil.", i) } for i := range gradient { gradient[i] = rand.Float64() } Gradient(gradient, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch in serial. Want: %v, Got: %v.", i, trueGradient, gradient) } if !floats.Equal(x, xcopy) { t.Errorf("Case %v: x modified during call to gradient in serial with non-nil.", i) } // Try with known value for i := range gradient { gradient[i] = rand.Float64() } settings.OriginKnown = true settings.OriginValue = r.F(x) Gradient(gradient, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch with known origin in serial. Want: %v, Got: %v.", i, trueGradient, gradient) } // Concurrently for i := range gradient { gradient[i] = rand.Float64() } settings.Concurrent = true settings.OriginKnown = false settings.Workers = 1000 Gradient(gradient, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch with unknown origin in parallel. Want: %v, Got: %v.", i, trueGradient, gradient) } if !floats.Equal(x, xcopy) { t.Errorf("Case %v: x modified during call to gradient in parallel", i) } // Concurrently with origin known for i := range gradient { gradient[i] = rand.Float64() } settings.OriginKnown = true Gradient(gradient, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch with known origin in parallel. Want: %v, Got: %v.", i, trueGradient, gradient) } // With default settings for i := range gradient { gradient[i] = rand.Float64() } settings = nil Gradient(gradient, r.F, x, settings) if !floats.EqualApprox(gradient, trueGradient, test.tol) { t.Errorf("Case %v: gradient mismatch with default settings. Want: %v, Got: %v.", i, trueGradient, gradient) } } }