func TestNormProbs(t *testing.T) { dist1, ok := NewNormal([]float64{0, 0}, mat64.NewSymDense(2, []float64{1, 0, 0, 1}), nil) if !ok { t.Errorf("bad test") } dist2, ok := NewNormal([]float64{6, 7}, mat64.NewSymDense(2, []float64{8, 2, 0, 4}), nil) if !ok { t.Errorf("bad test") } testProbability(t, []probCase{ { dist: dist1, loc: []float64{0, 0}, logProb: -1.837877066409345, }, { dist: dist2, loc: []float64{6, 7}, logProb: -3.503979321496947, }, { dist: dist2, loc: []float64{1, 2}, logProb: -7.075407892925519, }, }) }
func TestMarginal(t *testing.T) { for _, test := range []struct { mu []float64 sigma *mat64.SymDense marginal []int }{ { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), marginal: []int{0}, }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), marginal: []int{0, 2}, }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}), marginal: []int{0, 3}, }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, covariance matrix not positive definite") } marginal, ok := normal.MarginalNormal(test.marginal, nil) if !ok { t.Fatalf("Bad test, marginal matrix not positive definite") } dim := normal.Dim() nSamples := 1000000 samps := mat64.NewDense(nSamples, dim, nil) for i := 0; i < nSamples; i++ { normal.Rand(samps.RawRowView(i)) } estMean := make([]float64, dim) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil) } for i, v := range test.marginal { if math.Abs(marginal.mu[i]-estMean[v]) > 1e-2 { t.Errorf("Mean mismatch: want: %v, got %v", estMean[v], marginal.mu[i]) } } marginalCov := marginal.CovarianceMatrix(nil) estCov := stat.CovarianceMatrix(nil, samps, nil) for i, v1 := range test.marginal { for j, v2 := range test.marginal { c := marginalCov.At(i, j) ec := estCov.At(v1, v2) if math.Abs(c-ec) > 5e-2 { t.Errorf("Cov mismatch element i = %d, j = %d: want: %v, got %v", i, j, c, ec) } } } } }
func TestCovarianceMatrix(t *testing.T) { for _, test := range []struct { mu []float64 sigma *mat64.SymDense }{ { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{1, 0.5, 3, 0.5, 8, -1, 3, -1, 15}), }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, covariance matrix not positive definite") } cov := normal.CovarianceMatrix(nil) if !mat64.EqualApprox(cov, test.sigma, 1e-14) { t.Errorf("Covariance mismatch with nil input") } dim := test.sigma.Symmetric() cov = mat64.NewSymDense(dim, nil) normal.CovarianceMatrix(cov) if !mat64.EqualApprox(cov, test.sigma, 1e-14) { t.Errorf("Covariance mismatch with supplied input") } } }
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) { dim := len(loc.X) b.dim = dim b.first = true x := mat64.NewVector(dim, loc.X) grad := mat64.NewVector(dim, loc.Gradient) b.x.CloneVec(x) b.grad.CloneVec(grad) b.y.Reset() b.s.Reset() b.tmp.Reset() if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim { b.invHess = mat64.NewSymDense(dim, nil) } else { b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim]) } // The values of the inverse Hessian are initialized in the first call to // NextDirection. // Initial direction is just negative of the gradient because the Hessian // is an identity matrix. d := mat64.NewVector(dim, dir) d.ScaleVec(-1, grad) return 1 / mat64.Norm(d, 2) }
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) { dim := len(loc.X) b.dim = dim b.x = resize(b.x, dim) copy(b.x, loc.X) b.grad = resize(b.grad, dim) copy(b.grad, loc.Gradient) b.y = resize(b.y, dim) b.s = resize(b.s, dim) b.tmp = resize(b.tmp, dim) b.yVec = mat64.NewVector(dim, b.y) b.sVec = mat64.NewVector(dim, b.s) b.tmpVec = mat64.NewVector(dim, b.tmp) if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim { b.invHess = mat64.NewSymDense(dim, nil) } else { b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim]) } // The values of the hessian are initialized in the first call to NextDirection // initial direcion is just negative of gradient because the hessian is 1 copy(dir, loc.Gradient) floats.Scale(-1, dir) b.first = true return 1 / floats.Norm(dir, 2) }
func newMargLikeMemory(hyper, outputs int) *margLikeMemory { m := &margLikeMemory{ lastX: make([]float64, hyper), k: mat64.NewSymDense(outputs, nil), chol: &mat64.Cholesky{}, alpha: mat64.NewVector(outputs, nil), tmp: mat64.NewVector(1, nil), dKdTheta: make([]*mat64.SymDense, hyper), kInvDK: mat64.NewDense(outputs, outputs, nil), } for i := 0; i < hyper; i++ { m.dKdTheta[i] = mat64.NewSymDense(outputs, nil) } return m }
// Cov returns the covariance between a set of data points based on the current // GP fit. func (g *GP) Cov(m *mat64.SymDense, x mat64.Matrix) *mat64.SymDense { if m != nil { // TODO(btracey): Make this k** panic("resuing m not coded") } // The joint covariance matrix is // K(x_*, k_*) - k(x_*, x) k(x,x)^-1 k(x, x*) nSamp, nDim := x.Dims() if nDim != g.inputDim { panic(badInputLength) } // Compute K(x_*, x) K(x, x)^-1 K(x, x_*) kstar := g.formKStar(x) var tmp mat64.Dense tmp.SolveCholesky(g.cholK, kstar) var tmp2 mat64.Dense tmp2.Mul(kstar.T(), &tmp) // Compute k(x_*, x_*) and perform the subtraction. kstarstar := mat64.NewSymDense(nSamp, nil) for i := 0; i < nSamp; i++ { for j := i; j < nSamp; j++ { v := g.kernel.Distance(mat64.Row(nil, i, x), mat64.Row(nil, j, x)) if i == j { v += g.noise } kstarstar.SetSym(i, j, v-tmp2.At(i, j)) } } return kstarstar }
func TestMetropolisHastings(t *testing.T) { // Test by finding the expected value of a normal distribution. dim := 3 target, ok := randomNormal(dim) if !ok { t.Fatal("bad test, sigma not pos def") } sigmaImp := mat64.NewSymDense(dim, nil) for i := 0; i < dim; i++ { sigmaImp.SetSym(i, i, 0.25) } proposal, ok := NewProposalNormal(sigmaImp, nil) if !ok { t.Fatal("bad test, sigma not pos def") } nSamples := 1000000 burnin := 5000 batch := mat64.NewDense(nSamples, dim, nil) initial := make([]float64, dim) MetropolisHastings(batch, initial, target, proposal, nil) batch = batch.View(burnin, 0, nSamples-burnin, dim).(*mat64.Dense) compareNormal(t, target, batch, nil) }
func TestImportance(t *testing.T) { // Test by finding the expected value of a multi-variate normal. dim := 3 target, ok := randomNormal(dim) if !ok { t.Fatal("bad test, sigma not pos def") } muImp := make([]float64, dim) sigmaImp := mat64.NewSymDense(dim, nil) for i := 0; i < dim; i++ { sigmaImp.SetSym(i, i, 3) } proposal, ok := distmv.NewNormal(muImp, sigmaImp, nil) if !ok { t.Fatal("bad test, sigma not pos def") } nSamples := 100000 batch := mat64.NewDense(nSamples, dim, nil) weights := make([]float64, nSamples) Importance(batch, weights, target, proposal) compareNormal(t, target, batch, weights) }
// NewNormal creates a new Normal with the given mean and covariance matrix. // NewNormal panics if len(mu) == 0, or if len(mu) != sigma.N. If the covariance // matrix is not positive-definite, the returned boolean is false. func NewNormal(mu []float64, sigma mat64.Symmetric, src *rand.Rand) (*Normal, bool) { if len(mu) == 0 { panic(badZeroDimension) } dim := sigma.Symmetric() if dim != len(mu) { panic(badSizeMismatch) } n := &Normal{ src: src, dim: dim, mu: make([]float64, dim), sigma: mat64.NewSymDense(dim, nil), chol: mat64.NewTriDense(dim, true, nil), } copy(n.mu, mu) n.sigma.CopySym(sigma) // TODO(btracey): Change this to the input Sigma, in case it is diagonal or // banded. ok := n.chol.Cholesky(n.sigma, true) if !ok { return nil, false } for i := 0; i < dim; i++ { n.logSqrtDet += math.Log(n.chol.At(i, i)) } return n, true }
func benchmarkCovarianceMatrixInPlace(b *testing.B, m mat64.Matrix) { _, c := m.Dims() res := mat64.NewSymDense(c, nil) b.ResetTimer() for i := 0; i < b.N; i++ { CovarianceMatrix(res, m, nil) } }
// getStartingLocation allocates and initializes the starting location for the minimization. func getStartingLocation(p *Problem, method Method, initX []float64, stats *Stats, settings *Settings) (*Location, error) { dim := len(initX) loc := &Location{ X: make([]float64, dim), } copy(loc.X, initX) if method.Needs().Gradient { loc.Gradient = make([]float64, dim) } if method.Needs().Hessian { loc.Hessian = mat64.NewSymDense(dim, nil) } if settings.UseInitialData { loc.F = settings.InitialValue if loc.Gradient != nil { initG := settings.InitialGradient if initG == nil { panic("optimize: initial gradient is nil") } if len(initG) != dim { panic("optimize: initial gradient size mismatch") } copy(loc.Gradient, initG) } if loc.Hessian != nil { initH := settings.InitialHessian if initH == nil { panic("optimize: initial Hessian is nil") } if initH.Symmetric() != dim { panic("optimize: initial Hessian size mismatch") } loc.Hessian.CopySym(initH) } } else { eval := FuncEvaluation if loc.Gradient != nil { eval |= GradEvaluation } if loc.Hessian != nil { eval |= HessEvaluation } x := make([]float64, len(loc.X)) evaluate(p, loc, eval, stats, x) } if math.IsInf(loc.F, 1) || math.IsNaN(loc.F) { return loc, ErrFunc(loc.F) } for i, v := range loc.Gradient { if math.IsInf(v, 0) || math.IsNaN(v) { return loc, ErrGrad{Grad: v, Index: i} } } return loc, nil }
// NewUndirectedDenseGraph creates an undirected dense graph with n nodes. // If passable is true all pairs of nodes will be connected by an edge // with unit cost, otherwise every node will start unconnected with // the cost specified by absent. func NewUndirectedDenseGraph(n int, passable bool, absent float64) *UndirectedDenseGraph { mat := make([]float64, n*n) v := 1. if !passable { v = absent } for i := range mat { mat[i] = v } return &UndirectedDenseGraph{mat: mat64.NewSymDense(n, mat), absent: absent} }
// CovarianceMatrix returns the covariance matrix of the distribution. Upon // return, the value at element {i, j} of the covariance matrix is equal to // the covariance of the i^th and j^th variables. // covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])] // If the input matrix is nil a new matrix is allocated, otherwise the result // is stored in-place into the input. func (n *Normal) CovarianceMatrix(s *mat64.SymDense) *mat64.SymDense { if s == nil { s = mat64.NewSymDense(n.Dim(), nil) } sn := s.Symmetric() if sn != n.Dim() { panic("normal: input matrix size mismatch") } n.setSigma() s.CopySym(n.sigma) return s }
func ExampleCholeskySymRankOne() { a := mat64.NewSymDense(4, []float64{ 1, 1, 1, 1, 0, 2, 3, 4, 0, 0, 6, 10, 0, 0, 0, 20, }) fmt.Printf("A = %0.4v\n", mat64.Formatted(a, mat64.Prefix(" "))) // Compute the Cholesky factorization. var chol mat64.Cholesky if ok := chol.Factorize(a); !ok { fmt.Println("matrix a is not positive definite.") } x := mat64.NewVector(4, []float64{0, 0, 0, 1}) fmt.Printf("\nx = %0.4v\n", mat64.Formatted(x, mat64.Prefix(" "))) // Rank-1 update the factorization. chol.SymRankOne(&chol, 1, x) // Rank-1 update the matrix a. a.SymRankOne(a, 1, x) var au mat64.SymDense au.FromCholesky(&chol) // Print the matrix that was updated directly. fmt.Printf("\nA' = %0.4v\n", mat64.Formatted(a, mat64.Prefix(" "))) // Print the matrix recovered from the factorization. fmt.Printf("\nU'^T * U' = %0.4v\n", mat64.Formatted(&au, mat64.Prefix(" "))) // Output: // A = ⎡ 1 1 1 1⎤ // ⎢ 1 2 3 4⎥ // ⎢ 1 3 6 10⎥ // ⎣ 1 4 10 20⎦ // // x = ⎡0⎤ // ⎢0⎥ // ⎢0⎥ // ⎣1⎦ // // A' = ⎡ 1 1 1 1⎤ // ⎢ 1 2 3 4⎥ // ⎢ 1 3 6 10⎥ // ⎣ 1 4 10 21⎦ // // U'^T * U' = ⎡ 1 1 1 1⎤ // ⎢ 1 2 3 4⎥ // ⎢ 1 3 6 10⎥ // ⎣ 1 4 10 21⎦ }
func TestNormRand(t *testing.T) { for _, test := range []struct { mean []float64 cov []float64 }{ { mean: []float64{0, 0}, cov: []float64{ 1, 0, 0, 1, }, }, { mean: []float64{0, 0}, cov: []float64{ 1, 0.9, 0.9, 1, }, }, { mean: []float64{6, 7}, cov: []float64{ 5, 0.9, 0.9, 2, }, }, } { dim := len(test.mean) cov := mat64.NewSymDense(dim, test.cov) n, ok := NewNormal(test.mean, cov, nil) if !ok { t.Errorf("bad covariance matrix") } nSamples := 1000000 samps := mat64.NewDense(nSamples, dim, nil) for i := 0; i < nSamples; i++ { n.Rand(samps.RawRowView(i)) } estMean := make([]float64, dim) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil) } if !floats.EqualApprox(estMean, test.mean, 1e-2) { t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean) } estCov := stat.CovarianceMatrix(nil, samps, nil) if !mat64.EqualApprox(estCov, cov, 1e-2) { t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov) } } }
func BenchmarkCovToCorr(b *testing.B) { // generate a 10x10 covariance matrix m := randMat(small, small) c := CovarianceMatrix(nil, m, nil) cc := mat64.NewSymDense(c.Symmetric(), nil) b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() cc.CopySym(c) b.StartTimer() covToCorr(cc) } }
// CovarianceMatrix calculates a covariance matrix (also known as a // variance-covariance matrix) from a matrix of data, using a two-pass // algorithm. // // The weights must have length equal to the number of rows in // input data matrix x. If cov is nil, then a new matrix with appropriate size will // be constructed. If cov is not nil, it should have the same number of columns as the // input data matrix x, and it will be used as the destination for the covariance // data. Weights must not be negative. func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *mat64.SymDense { // This is the matrix version of the two-pass algorithm. It doesn't use the // additional floating point error correction that the Covariance function uses // to reduce the impact of rounding during centering. r, c := x.Dims() if cov == nil { cov = mat64.NewSymDense(c, nil) } else if n := cov.Symmetric(); n != c { panic(matrix.ErrShape) } var xt mat64.Dense xt.Clone(x.T()) // Subtract the mean of each of the columns. for i := 0; i < c; i++ { v := xt.RawRowView(i) // This will panic with ErrShape if len(weights) != len(v), so // we don't have to check the size later. mean := Mean(v, weights) floats.AddConst(-mean, v) } if weights == nil { // Calculate the normalization factor // scaled by the sample size. cov.SymOuterK(1/(float64(r)-1), &xt) return cov } // Multiply by the sqrt of the weights, so that multiplication is symmetric. sqrtwts := make([]float64, r) for i, w := range weights { if w < 0 { panic("stat: negative covariance matrix weights") } sqrtwts[i] = math.Sqrt(w) } // Weight the rows. for i := 0; i < c; i++ { v := xt.RawRowView(i) floats.Mul(v, sqrtwts) } // Calculate the normalization factor // scaled by the weighted sample size. cov.SymOuterK(1/(floats.Sum(weights)-1), &xt) return cov }
func copyLocation(dst, src *Location) { dst.X = resize(dst.X, len(src.X)) copy(dst.X, src.X) dst.F = src.F dst.Gradient = resize(dst.Gradient, len(src.Gradient)) copy(dst.Gradient, src.Gradient) if src.Hessian != nil { if dst.Hessian == nil || dst.Hessian.Symmetric() != len(src.X) { dst.Hessian = mat64.NewSymDense(len(src.X), nil) } dst.Hessian.CopySym(src.Hessian) } }
// NewUndirectedMatrix creates an undirected dense graph with n nodes. // All edges are initialized with the weight given by init. The self parameter // specifies the cost of self connection, and absent specifies the weight // returned for absent edges. func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { mat := make([]float64, n*n) if init != 0 { for i := range mat { mat[i] = init } } for i := 0; i < len(mat); i += n + 1 { mat[i] = self } return &UndirectedMatrix{ mat: mat64.NewSymDense(n, mat), self: self, absent: absent, } }
func BenchmarkCorrToCov(b *testing.B) { // generate a 10x10 correlation matrix m := randMat(small, small) c := CorrelationMatrix(nil, m, nil) cc := mat64.NewSymDense(c.Symmetric(), nil) sigma := make([]float64, small) for i := range sigma { sigma[i] = 2 } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() cc.CopySym(c) b.StartTimer() corrToCov(cc, sigma) } }
func ExampleSymDense_SubsetSym() { n := 5 s := mat64.NewSymDense(5, nil) count := 1.0 for i := 0; i < n; i++ { for j := i; j < n; j++ { s.SetSym(i, j, count) count++ } } fmt.Println("Original matrix:") fmt.Printf("%0.4v\n\n", mat64.Formatted(s)) // Take the subset {0, 2, 4} var sub mat64.SymDense sub.SubsetSym(s, []int{0, 2, 4}) fmt.Println("Subset {0, 2, 4}") fmt.Printf("%0.4v\n\n", mat64.Formatted(&sub)) // Take the subset {0, 0, 4} sub.SubsetSym(s, []int{0, 0, 4}) fmt.Println("Subset {0, 0, 4}") fmt.Printf("%0.4v\n\n", mat64.Formatted(&sub)) // Output: // Original matrix: // ⎡ 1 2 3 4 5⎤ // ⎢ 2 6 7 8 9⎥ // ⎢ 3 7 10 11 12⎥ // ⎢ 4 8 11 13 14⎥ // ⎣ 5 9 12 14 15⎦ // // Subset {0, 2, 4} // ⎡ 1 3 5⎤ // ⎢ 3 10 12⎥ // ⎣ 5 12 15⎦ // // Subset {0, 0, 4} // ⎡ 1 1 5⎤ // ⎢ 1 1 5⎥ // ⎣ 5 5 15⎦ }
func TestRejection(t *testing.T) { // Test by finding the expected value of a uniform. dim := 3 bounds := make([]distmv.Bound, dim) for i := 0; i < dim; i++ { min := rand.NormFloat64() max := rand.NormFloat64() if min > max { min, max = max, min } bounds[i].Min = min bounds[i].Max = max } target := distmv.NewUniform(bounds, nil) mu := target.Mean(nil) muImp := make([]float64, dim) sigmaImp := mat64.NewSymDense(dim, nil) for i := 0; i < dim; i++ { sigmaImp.SetSym(i, i, 6) } proposal, ok := distmv.NewNormal(muImp, sigmaImp, nil) if !ok { t.Fatal("bad test, sigma not pos def") } nSamples := 1000 batch := mat64.NewDense(nSamples, dim, nil) weights := make([]float64, nSamples) _, ok = Rejection(batch, target, proposal, 1000, nil) if !ok { t.Error("Bad test, nan samples") } for i := 0; i < dim; i++ { col := mat64.Col(nil, i, batch) ev := stat.Mean(col, weights) if math.Abs(ev-mu[i]) > 1e-2 { t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev) } } }
// New creates a new GP with the given input dimension, the given // kernel function, and output noise parameter. Output dim must be one. func New(inputDim int, kernel Kernel, noise float64) *GP { if inputDim <= 0 { panic("gp: non-positive inputDim") } if kernel == nil { panic("gp: nil kernel") } if !(noise >= 0) { panic("gp: negative noise") // also handles NaN. } return &GP{ kernel: kernel, noise: noise, inputDim: inputDim, mean: 0, std: 1, inputs: &mat64.Dense{}, outputs: make([]float64, 0), k: mat64.NewSymDense(0, nil), sigInvY: &mat64.Vector{}, cholK: &mat64.Cholesky{}, } }
func TestConditionNormal(t *testing.T) { // Uncorrelated values shouldn't influence the updated values. for _, test := range []struct { mu []float64 sigma *mat64.SymDense observed []int values []float64 newMu []float64 newSigma *mat64.SymDense }{ { mu: []float64{2, 3}, sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}), observed: []int{0}, values: []float64{10}, newMu: []float64{3}, newSigma: mat64.NewSymDense(1, []float64{5}), }, { mu: []float64{2, 3}, sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}), observed: []int{1}, values: []float64{10}, newMu: []float64{2}, newSigma: mat64.NewSymDense(1, []float64{2}), }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}), observed: []int{1}, values: []float64{10}, newMu: []float64{2, 4}, newSigma: mat64.NewSymDense(2, []float64{2, 0, 0, 10}), }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}), observed: []int{0, 1}, values: []float64{10, 15}, newMu: []float64{4}, newSigma: mat64.NewSymDense(1, []float64{10}), }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}), observed: []int{0, 1}, values: []float64{10, 15}, newMu: []float64{4, 5}, newSigma: mat64.NewSymDense(2, []float64{10, 2, 2, 3}), }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, original sigma not positive definite") } newNormal, ok := normal.ConditionNormal(test.observed, test.values, nil) if !ok { t.Fatalf("Bad test, update failure") } if !floats.EqualApprox(test.newMu, newNormal.mu, 1e-12) { t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu) } var sigma mat64.SymDense sigma.FromCholesky(&newNormal.chol) if !mat64.EqualApprox(test.newSigma, &sigma, 1e-12) { t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma) } } // Test bivariate case where the update rule is analytic for _, test := range []struct { mu []float64 std []float64 rho float64 value float64 }{ { mu: []float64{2, 3}, std: []float64{3, 5}, rho: 0.9, value: 1000, }, { mu: []float64{2, 3}, std: []float64{3, 5}, rho: -0.9, value: 1000, }, } { std := test.std rho := test.rho sigma := mat64.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]}) normal, ok := NewNormal(test.mu, sigma, nil) if !ok { t.Fatalf("Bad test, original sigma not positive definite") } newNormal, ok := normal.ConditionNormal([]int{1}, []float64{test.value}, nil) if !ok { t.Fatalf("Bad test, update failed") } var newSigma mat64.SymDense newSigma.FromCholesky(&newNormal.chol) trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1]) if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 { t.Errorf("Mean mismatch. Want %v, got %v", trueMean, newNormal.mu[0]) } trueVar := (1 - rho*rho) * std[0] * std[0] if math.Abs(trueVar-newSigma.At(0, 0)) > 1e-14 { t.Errorf("Std mismatch. Want %v, got %v", trueMean, newNormal.mu[0]) } } // Test via sampling. for _, test := range []struct { mu []float64 sigma *mat64.SymDense observed []int unobserved []int value []float64 }{ // The indices in unobserved must be in ascending order for this test. { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), observed: []int{0}, unobserved: []int{1, 2}, value: []float64{1.9}, }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}), observed: []int{0, 3}, unobserved: []int{1, 2}, value: []float64{1.9, 2.9}, }, } { totalSamp := 4000000 var nSamp int samples := mat64.NewDense(totalSamp, len(test.mu), nil) normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Errorf("bad test") } sample := make([]float64, len(test.mu)) for i := 0; i < totalSamp; i++ { normal.Rand(sample) isClose := true for i, v := range test.observed { if math.Abs(sample[v]-test.value[i]) > 1e-1 { isClose = false break } } if isClose { samples.SetRow(nSamp, sample) nSamp++ } } if nSamp < 100 { t.Errorf("bad test, not enough samples") continue } samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat64.Dense) // Compute mean and covariance matrix. estMean := make([]float64, len(test.mu)) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samples), nil) } estCov := stat.CovarianceMatrix(nil, samples, nil) // Compute update rule. newNormal, ok := normal.ConditionNormal(test.observed, test.value, nil) if !ok { t.Fatalf("Bad test, update failure") } var subEstMean []float64 for _, v := range test.unobserved { subEstMean = append(subEstMean, estMean[v]) } subEstCov := mat64.NewSymDense(len(test.unobserved), nil) for i := 0; i < len(test.unobserved); i++ { for j := i; j < len(test.unobserved); j++ { subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j])) } } for i, v := range subEstMean { if math.Abs(newNormal.mu[i]-v) > 5e-2 { t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v) } } var sigma mat64.SymDense sigma.FromCholesky(&newNormal.chol) if !mat64.EqualApprox(&sigma, subEstCov, 1e-1) { t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma) } } }
func resizeSymDense(m *mat64.SymDense, dim int) *mat64.SymDense { if m == nil || cap(m.RawSymmetric().Data) < dim*dim { return mat64.NewSymDense(dim, nil) } return mat64.NewSymDense(dim, m.RawSymmetric().Data[:dim*dim]) }
func TestMetropolisHastingser(t *testing.T) { for seed, test := range []struct { dim, burnin, rate, samples int }{ {3, 10, 1, 1}, {3, 10, 2, 1}, {3, 10, 1, 2}, {3, 10, 3, 2}, {3, 10, 7, 4}, {3, 10, 7, 4}, {3, 11, 51, 103}, {3, 11, 103, 51}, {3, 51, 11, 103}, {3, 51, 103, 11}, {3, 103, 11, 51}, {3, 103, 51, 11}, } { dim := test.dim initial := make([]float64, dim) target, ok := randomNormal(dim) if !ok { t.Fatal("bad test, sigma not pos def") } sigmaImp := mat64.NewSymDense(dim, nil) for i := 0; i < dim; i++ { sigmaImp.SetSym(i, i, 0.25) } proposal, ok := NewProposalNormal(sigmaImp, nil) if !ok { t.Fatal("bad test, sigma not pos def") } // Test the Metropolis Hastingser by generating all the samples, then generating // the same samples with a burnin and rate. rand.Seed(int64(seed)) mh := MetropolisHastingser{ Initial: initial, Target: target, Proposal: proposal, Src: nil, BurnIn: 0, Rate: 0, } samples := test.samples burnin := test.burnin rate := test.rate fullBatch := mat64.NewDense(1+burnin+rate*(samples-1), dim, nil) mh.Sample(fullBatch) mh = MetropolisHastingser{ Initial: initial, Target: target, Proposal: proposal, Src: nil, BurnIn: burnin, Rate: rate, } rand.Seed(int64(seed)) batch := mat64.NewDense(samples, dim, nil) mh.Sample(batch) same := true count := burnin for i := 0; i < samples; i++ { if !floats.Equal(batch.RawRowView(i), fullBatch.RawRowView(count)) { fmt.Println("sample ", i, "is different") same = false break } count += rate } if !same { fmt.Printf("%v\n", mat64.Formatted(batch)) fmt.Printf("%v\n", mat64.Formatted(fullBatch)) t.Errorf("sampling mismatch: dim = %v, burnin = %v, rate = %v, samples = %v", dim, burnin, rate, samples) } } }
// AddBatch adds a set training points to the Gp. This call updates internal // values needed for prediction, so it is more efficient to add samples // as a batch. func (g *GP) AddBatch(x mat64.Matrix, y []float64) error { // Note: The outputs are stored scaled to have a mean of zero and a variance // of 1. // Verify input parameters rx, cx := x.Dims() ry := len(y) if rx != ry { panic(badInOut) } if cx != g.inputDim { panic(badInputLength) } nSamples := len(g.outputs) // Append the new data to the list of stored data. inputs := mat64.NewDense(rx+nSamples, g.inputDim, nil) inputs.Copy(g.inputs) inputs.View(nSamples, 0, rx, g.inputDim).(*mat64.Dense).Copy(x) g.inputs = inputs // Rescale the output data to its original value, append the new data, and // then rescale to have mean 0 and variance of 1. for i, v := range g.outputs { g.outputs[i] = v*g.std + g.mean } g.outputs = append(g.outputs, y...) g.mean = stat.Mean(g.outputs, nil) g.std = stat.StdDev(g.outputs, nil) for i, v := range g.outputs { g.outputs[i] = (v - g.mean) / g.std } // Add to the kernel matrix. k := mat64.NewSymDense(rx+nSamples, nil) k.CopySym(g.k) g.k = k // Compute the kernel with the new points and the old points for i := 0; i < nSamples; i++ { for j := nSamples; j < rx+nSamples; j++ { v := g.kernel.Distance(g.inputs.RawRowView(i), g.inputs.RawRowView(j)) g.k.SetSym(i, j, v) } } // Compute the kernel with the new points and themselves for i := nSamples; i < rx+nSamples; i++ { for j := i; j < nSamples+rx; j++ { v := g.kernel.Distance(g.inputs.RawRowView(i), g.inputs.RawRowView(j)) if i == j { v += g.noise } g.k.SetSym(i, j, v) } } // Cache necessary matrix results for computing predictions. var chol mat64.Cholesky ok := chol.Factorize(g.k) if !ok { return ErrSingular } g.cholK = &chol g.sigInvY.Reset() v := mat64.NewVector(len(g.outputs), g.outputs) g.sigInvY.SolveCholeskyVec(g.cholK, v) return nil }
// setSigma computes and stores the covariance matrix of the distribution. func (n *Normal) setSigma() { n.once.Do(func() { n.sigma = mat64.NewSymDense(n.Dim(), nil) n.sigma.FromCholesky(&n.chol) }) }
func TestMarginalSingle(t *testing.T) { for _, test := range []struct { mu []float64 sigma *mat64.SymDense }{ { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}), }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, covariance matrix not positive definite") } // Verify with nil Sigma. normal.sigma = nil for i, mean := range test.mu { norm := normal.MarginalNormalSingle(i, nil) if norm.Mean() != mean { t.Errorf("Mean mismatch nil Sigma, idx %v: want %v, got %v.", i, mean, norm.Mean()) } std := math.Sqrt(test.sigma.At(i, i)) if math.Abs(norm.StdDev()-std) > 1e-14 { t.Errorf("StdDev mismatch nil Sigma, idx %v: want %v, got %v.", i, std, norm.StdDev()) } } // Verify with non-nil Sigma. normal.setSigma() for i, mean := range test.mu { norm := normal.MarginalNormalSingle(i, nil) if norm.Mean() != mean { t.Errorf("Mean mismatch non-nil Sigma, idx %v: want %v, got %v.", i, mean, norm.Mean()) } std := math.Sqrt(test.sigma.At(i, i)) if math.Abs(norm.StdDev()-std) > 1e-14 { t.Errorf("StdDev mismatch non-nil Sigma, idx %v: want %v, got %v.", i, std, norm.StdDev()) } } } // Test matching with TestMarginal. rnd := rand.New(rand.NewSource(1)) for cas := 0; cas < 10; cas++ { dim := rnd.Intn(10) + 1 mu := make([]float64, dim) for i := range mu { mu[i] = rnd.Float64() } x := make([]float64, dim*dim) for i := range x { x[i] = rnd.Float64() } mat := mat64.NewDense(dim, dim, x) var sigma mat64.SymDense sigma.SymOuterK(1, mat) normal, ok := NewNormal(mu, &sigma, nil) if !ok { t.Fatal("bad test") } for i := 0; i < dim; i++ { single := normal.MarginalNormalSingle(i, nil) mult, ok := normal.MarginalNormal([]int{i}, nil) if !ok { t.Fatal("bad test") } if math.Abs(single.Mean()-mult.Mean(nil)[0]) > 1e-14 { t.Errorf("Mean mismatch") } if math.Abs(single.Variance()-mult.CovarianceMatrix(nil).At(0, 0)) > 1e-14 { t.Errorf("Variance mismatch") } } } }