Exemplo n.º 1
0
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
Exemplo n.º 2
0
// MarginalNormal returns the marginal distribution of the given input variables.
// That is, MarginalNormal returns
//  p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o
// where x_i are the dimensions in the input, and x_o are the remaining dimensions.
// The input src is passed to the call to NewNormal.
func (n *Normal) MarginalNormal(vars []int, src *rand.Rand) (*Normal, bool) {
	newMean := make([]float64, len(vars))
	for i, v := range vars {
		newMean[i] = n.mu[v]
	}
	n.setSigma()
	var s mat64.SymDense
	s.SubsetSym(n.sigma, vars)
	return NewNormal(newMean, &s, src)
}
Exemplo n.º 3
0
// NewProposalNormal constructs a new ProposalNormal for use as a proposal
// distribution for Metropolis-Hastings. ProposalNormal is a multivariate normal
// distribution (implemented by distmv.Normal) where the covariance matrix is fixed
// and the mean of the distribution changes.
//
// NewProposalNormal returns {nil, false} if the covariance matrix is not positive-definite.
func NewProposalNormal(sigma *mat64.SymDense, src *rand.Rand) (*ProposalNormal, bool) {
	mu := make([]float64, sigma.Symmetric())
	normal, ok := distmv.NewNormal(mu, sigma, src)
	if !ok {
		return nil, false
	}
	p := &ProposalNormal{
		normal: normal,
	}
	return p, true
}
Exemplo n.º 4
0
// CovarianceMatrix returns the covariance matrix of the distribution. Upon
// return, the value at element {i, j} of the covariance matrix is equal to
// the covariance of the i^th and j^th variables.
//  covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
// If the input matrix is nil a new matrix is allocated, otherwise the result
// is stored in-place into the input.
func (n *Normal) CovarianceMatrix(s *mat64.SymDense) *mat64.SymDense {
	if s == nil {
		s = mat64.NewSymDense(n.Dim(), nil)
	}
	sn := s.Symmetric()
	if sn != n.Dim() {
		panic("normal: input matrix size mismatch")
	}
	n.setSigma()
	s.CopySym(n.sigma)
	return s
}
Exemplo n.º 5
0
func ExampleCholeskySymRankOne() {
	a := mat64.NewSymDense(4, []float64{
		1, 1, 1, 1,
		0, 2, 3, 4,
		0, 0, 6, 10,
		0, 0, 0, 20,
	})
	fmt.Printf("A = %0.4v\n", mat64.Formatted(a, mat64.Prefix("    ")))

	// Compute the Cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(a); !ok {
		fmt.Println("matrix a is not positive definite.")
	}

	x := mat64.NewVector(4, []float64{0, 0, 0, 1})
	fmt.Printf("\nx = %0.4v\n", mat64.Formatted(x, mat64.Prefix("    ")))

	// Rank-1 update the factorization.
	chol.SymRankOne(&chol, 1, x)
	// Rank-1 update the matrix a.
	a.SymRankOne(a, 1, x)

	var au mat64.SymDense
	au.FromCholesky(&chol)

	// Print the matrix that was updated directly.
	fmt.Printf("\nA' =        %0.4v\n", mat64.Formatted(a, mat64.Prefix("            ")))
	// Print the matrix recovered from the factorization.
	fmt.Printf("\nU'^T * U' = %0.4v\n", mat64.Formatted(&au, mat64.Prefix("            ")))

	// Output:
	// A = ⎡ 1   1   1   1⎤
	//     ⎢ 1   2   3   4⎥
	//     ⎢ 1   3   6  10⎥
	//     ⎣ 1   4  10  20⎦
	//
	// x = ⎡0⎤
	//     ⎢0⎥
	//     ⎢0⎥
	//     ⎣1⎦
	//
	// A' =        ⎡ 1   1   1   1⎤
	//             ⎢ 1   2   3   4⎥
	//             ⎢ 1   3   6  10⎥
	//             ⎣ 1   4  10  21⎦
	//
	// U'^T * U' = ⎡ 1   1   1   1⎤
	//             ⎢ 1   2   3   4⎥
	//             ⎢ 1   3   6  10⎥
	//             ⎣ 1   4  10  21⎦
}
Exemplo n.º 6
0
// randomNormal constructs a random Normal distribution.
func randomNormal(dim int) (*distmv.Normal, bool) {
	data := make([]float64, dim*dim)
	for i := range data {
		data[i] = rand.Float64()
	}
	a := mat64.NewDense(dim, dim, data)
	var sigma mat64.SymDense
	sigma.SymOuterK(1, a)
	mu := make([]float64, dim)
	for i := range mu {
		mu[i] = rand.NormFloat64()
	}
	return distmv.NewNormal(mu, &sigma, nil)
}
Exemplo n.º 7
0
func (gp *GP) setKernelMat(s *mat64.SymDense, noise float64) {
	n := s.Symmetric()
	for i := 0; i < n; i++ {
		for j := i; j < n; j++ {
			v := gp.kernel.Distance(
				gp.inputs.RawRowView(i),
				gp.inputs.RawRowView(j),
			)
			if i == j {
				v += noise
			}
			s.SetSym(i, j, v)
		}
	}
}
Exemplo n.º 8
0
// covToCorr converts a covariance matrix to a correlation matrix.
func covToCorr(c *mat64.SymDense) {
	r := c.Symmetric()

	s := make([]float64, r)
	for i := 0; i < r; i++ {
		s[i] = 1 / math.Sqrt(c.At(i, i))
	}
	for i, sx := range s {
		// Ensure that the diagonal has exactly ones.
		c.SetSym(i, i, 1)
		for j := i + 1; j < r; j++ {
			v := c.At(i, j)
			c.SetSym(i, j, v*sx*s[j])
		}
	}
}
Exemplo n.º 9
0
func randomNormal(sz int, rnd *rand.Rand) *Normal {
	mu := make([]float64, sz)
	for i := range mu {
		mu[i] = rnd.Float64()
	}
	data := make([]float64, sz*sz)
	for i := range data {
		data[i] = rnd.Float64()
	}
	dM := mat64.NewDense(sz, sz, data)
	var sigma mat64.SymDense
	sigma.SymOuterK(1, dM)

	normal, ok := NewNormal(mu, &sigma, nil)
	if !ok {
		log.Fatal("bad test, not pos def")
	}
	return normal
}
Exemplo n.º 10
0
func ExampleSymDense_SubsetSym() {
	n := 5
	s := mat64.NewSymDense(5, nil)
	count := 1.0
	for i := 0; i < n; i++ {
		for j := i; j < n; j++ {
			s.SetSym(i, j, count)
			count++
		}
	}
	fmt.Println("Original matrix:")
	fmt.Printf("%0.4v\n\n", mat64.Formatted(s))

	// Take the subset {0, 2, 4}
	var sub mat64.SymDense
	sub.SubsetSym(s, []int{0, 2, 4})
	fmt.Println("Subset {0, 2, 4}")
	fmt.Printf("%0.4v\n\n", mat64.Formatted(&sub))

	// Take the subset {0, 0, 4}
	sub.SubsetSym(s, []int{0, 0, 4})
	fmt.Println("Subset {0, 0, 4}")
	fmt.Printf("%0.4v\n\n", mat64.Formatted(&sub))

	// Output:
	// Original matrix:
	// ⎡ 1   2   3   4   5⎤
	// ⎢ 2   6   7   8   9⎥
	// ⎢ 3   7  10  11  12⎥
	// ⎢ 4   8  11  13  14⎥
	// ⎣ 5   9  12  14  15⎦
	//
	// Subset {0, 2, 4}
	// ⎡ 1   3   5⎤
	// ⎢ 3  10  12⎥
	// ⎣ 5  12  15⎦
	//
	// Subset {0, 0, 4}
	// ⎡ 1   1   5⎤
	// ⎢ 1   1   5⎥
	// ⎣ 5   5  15⎦
}
Exemplo n.º 11
0
// NewNormalPrecision creates a new Normal distribution with the given mean and
// precision matrix (inverse of the covariance matrix). NewNormalPrecision
// panics if len(mu) is not equal to prec.Symmetric(). If the precision matrix
// is not positive-definite, NewNormalPrecision returns nil for norm and false
// for ok.
func NewNormalPrecision(mu []float64, prec *mat64.SymDense, src *rand.Rand) (norm *Normal, ok bool) {
	if len(mu) == 0 {
		panic(badZeroDimension)
	}
	dim := prec.Symmetric()
	if dim != len(mu) {
		panic(badSizeMismatch)
	}
	// TODO(btracey): Computing a matrix inverse is generally numerically instable.
	// This only has to compute the inverse of a positive definite matrix, which
	// is much better, but this still loses precision. It is worth considering if
	// instead the precision matrix should be stored explicitly and used instead
	// of the Cholesky decomposition of the covariance matrix where appropriate.
	var chol mat64.Cholesky
	ok = chol.Factorize(prec)
	if !ok {
		return nil, false
	}
	var sigma mat64.SymDense
	sigma.InverseCholesky(&chol)
	return NewNormal(mu, &sigma, src)
}
Exemplo n.º 12
0
// corrToCov converts a correlation matrix to a covariance matrix.
// The input sigma should be vector of standard deviations corresponding
// to the covariance.  It will panic if len(sigma) is not equal to the
// number of rows in the correlation matrix.
func corrToCov(c *mat64.SymDense, sigma []float64) {
	r, _ := c.Dims()

	if r != len(sigma) {
		panic(matrix.ErrShape)
	}
	for i, sx := range sigma {
		// Ensure that the diagonal has exactly sigma squared.
		c.SetSym(i, i, sx*sx)
		for j := i + 1; j < r; j++ {
			v := c.At(i, j)
			c.SetSym(i, j, v*sx*sigma[j])
		}
	}
}
Exemplo n.º 13
0
// CovarianceMatrix calculates a covariance matrix (also known as a
// variance-covariance matrix) from a matrix of data, using a two-pass
// algorithm.
//
// The weights must have length equal to the number of rows in
// input data matrix x. If cov is nil, then a new matrix with appropriate size will
// be constructed. If cov is not nil, it should have the same number of columns as the
// input data matrix x, and it will be used as the destination for the covariance
// data. Weights must not be negative.
func CovarianceMatrix(cov *mat64.SymDense, x mat64.Matrix, weights []float64) *mat64.SymDense {
	// This is the matrix version of the two-pass algorithm. It doesn't use the
	// additional floating point error correction that the Covariance function uses
	// to reduce the impact of rounding during centering.

	r, c := x.Dims()

	if cov == nil {
		cov = mat64.NewSymDense(c, nil)
	} else if n := cov.Symmetric(); n != c {
		panic(matrix.ErrShape)
	}

	var xt mat64.Dense
	xt.Clone(x.T())
	// Subtract the mean of each of the columns.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		// This will panic with ErrShape if len(weights) != len(v), so
		// we don't have to check the size later.
		mean := Mean(v, weights)
		floats.AddConst(-mean, v)
	}

	if weights == nil {
		// Calculate the normalization factor
		// scaled by the sample size.
		cov.SymOuterK(1/(float64(r)-1), &xt)
		return cov
	}

	// Multiply by the sqrt of the weights, so that multiplication is symmetric.
	sqrtwts := make([]float64, r)
	for i, w := range weights {
		if w < 0 {
			panic("stat: negative covariance matrix weights")
		}
		sqrtwts[i] = math.Sqrt(w)
	}
	// Weight the rows.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		floats.Mul(v, sqrtwts)
	}

	// Calculate the normalization factor
	// scaled by the weighted sample size.
	cov.SymOuterK(1/(floats.Sum(weights)-1), &xt)
	return cov
}
Exemplo n.º 14
0
func (BrownBadlyScaled) Hess(x []float64, hess *mat64.SymDense) {
	if len(x) != 2 {
		panic("dimension of the problem must be 2")
	}
	if len(x) != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	h00 := 2 + 2*x[1]*x[1]
	h01 := 4*x[0]*x[1] - 4
	h11 := 2 + 2*x[0]*x[0]
	hess.SetSym(0, 0, h00)
	hess.SetSym(0, 1, h01)
	hess.SetSym(1, 1, h11)
}
Exemplo n.º 15
0
func (PowellBadlyScaled) Hess(x []float64, hess *mat64.SymDense) {
	if len(x) != 2 {
		panic("dimension of the problem must be 2")
	}
	if len(x) != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	t1 := 1e4*x[0]*x[1] - 1
	s1 := math.Exp(-x[0])
	s2 := math.Exp(-x[1])
	t2 := s1 + s2 - 1.0001

	h00 := 2 * (1e8*x[1]*x[1] + s1*(s1+t2))
	h01 := 2 * (1e4*(1+2*t1) + s1*s2)
	h11 := 2 * (1e8*x[0]*x[0] + s2*(s2+t2))
	hess.SetSym(0, 0, h00)
	hess.SetSym(0, 1, h01)
	hess.SetSym(1, 1, h11)
}
Exemplo n.º 16
0
func (Beale) Hess(x []float64, hess *mat64.SymDense) {
	if len(x) != 2 {
		panic("dimension of the problem must be 2")
	}
	if len(x) != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	t1 := 1 - x[1]
	t2 := 1 - x[1]*x[1]
	t3 := 1 - x[1]*x[1]*x[1]
	f1 := 1.5 - x[1]*t1
	f2 := 2.25 - x[1]*t2
	f3 := 2.625 - x[1]*t3

	h00 := 2 * (t1*t1 + t2*t2 + t3*t3)
	h01 := 2 * (f1 + x[1]*(2*f2+3*x[1]*f3) - x[0]*(t1+x[1]*(2*t2+3*x[1]*t3)))
	h11 := 2 * x[0] * (x[0] + 2*f2 + x[1]*(6*f3+x[0]*x[1]*(4+9*x[1]*x[1])))
	hess.SetSym(0, 0, h00)
	hess.SetSym(0, 1, h01)
	hess.SetSym(1, 1, h11)
}
Exemplo n.º 17
0
func (Watson) Hess(x []float64, hess *mat64.SymDense) {
	dim := len(x)
	if dim != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	for j := 0; j < dim; j++ {
		for k := j; k < dim; k++ {
			hess.SetSym(j, k, 0)
		}
	}
	for i := 1; i <= 29; i++ {
		d1 := float64(i) / 29
		d2 := 1.0
		var s1 float64
		for j := 1; j < dim; j++ {
			s1 += float64(j) * d2 * x[j]
			d2 *= d1
		}

		d2 = 1.0
		var s2 float64
		for _, v := range x {
			s2 += d2 * v
			d2 *= d1
		}

		t := s1 - s2*s2 - 1
		s3 := 2 * d1 * s2
		d2 = 2 / d1
		th := 2 * d1 * d1 * t
		for j := 0; j < dim; j++ {
			v := float64(j) - s3
			d3 := 1 / d1
			for k := 0; k <= j; k++ {
				hess.SetSym(k, j, hess.At(k, j)+d2*d3*(v*(float64(k)-s3)-th))
				d3 *= d1
			}
			d2 *= d1
		}
	}
	t1 := x[1] - x[0]*x[0] - 1
	hess.SetSym(0, 0, hess.At(0, 0)+8*x[0]*x[0]+2-4*t1)
	hess.SetSym(0, 1, hess.At(0, 1)-4*x[0])
	hess.SetSym(1, 1, hess.At(1, 1)+2)
}
Exemplo n.º 18
0
func (Wood) Hess(x []float64, hess *mat64.SymDense) {
	if len(x) != 4 {
		panic("dimension of the problem must be 4")
	}
	if len(x) != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	hess.SetSym(0, 0, 400*(3*x[0]*x[0]-x[1])+2)
	hess.SetSym(0, 1, -400*x[0])
	hess.SetSym(1, 1, 220.2)
	hess.SetSym(0, 2, 0)
	hess.SetSym(1, 2, 0)
	hess.SetSym(2, 2, 360*(3*x[2]*x[2]-x[3])+2)
	hess.SetSym(0, 3, 0)
	hess.SetSym(1, 3, 19.8)
	hess.SetSym(2, 3, -360*x[2])
	hess.SetSym(3, 3, 200.2)
}
Exemplo n.º 19
0
// ConditionNormal returns the Normal distribution that is the receiver conditioned
// on the input evidence. The returned multivariate normal has dimension
// n - len(observed), where n is the dimension of the original receiver. The updated
// mean and covariance are
//  mu = mu_un + sigma_{ob,un}^T * sigma_{ob,ob}^-1 (v - mu_ob)
//  sigma = sigma_{un,un} - sigma_{ob,un}^T * sigma_{ob,ob}^-1 * sigma_{ob,un}
// where mu_un and mu_ob are the original means of the unobserved and observed
// variables respectively, sigma_{un,un} is the unobserved subset of the covariance
// matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and
// sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with
// values v. The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Normal distribution.
//
// ConditionNormal returns {nil, false} if there is a failure during the update.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Rand) (*Normal, bool) {
	if len(observed) == 0 {
		panic("normal: no observed value")
	}
	if len(observed) != len(values) {
		panic("normal: input slice length mismatch")
	}
	for _, v := range observed {
		if v < 0 || v >= n.Dim() {
			panic("normal: observed value out of bounds")
		}
	}

	ob := len(observed)
	unob := n.Dim() - ob
	obMap := make(map[int]struct{})
	for _, v := range observed {
		if _, ok := obMap[v]; ok {
			panic("normal: observed dimension occurs twice")
		}
		obMap[v] = struct{}{}
	}
	if len(observed) == n.Dim() {
		panic("normal: all dimensions observed")
	}
	unobserved := make([]int, 0, unob)
	for i := 0; i < n.Dim(); i++ {
		if _, ok := obMap[i]; !ok {
			unobserved = append(unobserved, i)
		}
	}
	mu1 := make([]float64, unob)
	for i, v := range unobserved {
		mu1[i] = n.mu[v]
	}
	mu2 := make([]float64, ob) // really v - mu2
	for i, v := range observed {
		mu2[i] = values[i] - n.mu[v]
	}

	n.setSigma()

	var sigma11, sigma22 mat64.SymDense
	sigma11.SubsetSym(n.sigma, unobserved)
	sigma22.SubsetSym(n.sigma, observed)

	sigma21 := mat64.NewDense(ob, unob, nil)
	for i, r := range observed {
		for j, c := range unobserved {
			v := n.sigma.At(r, c)
			sigma21.Set(i, j, v)
		}
	}

	var chol mat64.Cholesky
	ok := chol.Factorize(&sigma22)
	if !ok {
		return nil, ok
	}

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
	v := mat64.NewVector(ob, mu2)
	var tmp, tmp2 mat64.Vector
	err := tmp.SolveCholeskyVec(&chol, v)
	if err != nil {
		return nil, false
	}
	tmp2.MulVec(sigma21.T(), &tmp)

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
	// TODO(btracey): Should this be a method of SymDense?
	var tmp3, tmp4 mat64.Dense
	err = tmp3.SolveCholesky(&chol, sigma21)
	if err != nil {
		return nil, false
	}
	tmp4.Mul(sigma21.T(), &tmp3)

	for i := range mu1 {
		mu1[i] += tmp2.At(i, 0)
	}

	// TODO(btracey): If tmp2 can constructed with a method, then this can be
	// replaced with SubSym.
	for i := 0; i < len(unobserved); i++ {
		for j := i; j < len(unobserved); j++ {
			v := sigma11.At(i, j)
			sigma11.SetSym(i, j, v-tmp4.At(i, j))
		}
	}
	return NewNormal(mu1, &sigma11, src)
}
Exemplo n.º 20
0
func (BrownAndDennis) Hess(x []float64, hess *mat64.SymDense) {
	if len(x) != 4 {
		panic("dimension of the problem must be 4")
	}
	if len(x) != hess.Symmetric() {
		panic("incorrect size of the Hessian")
	}

	for i := 0; i < 4; i++ {
		for j := i; j < 4; j++ {
			hess.SetSym(i, j, 0)
		}
	}
	for i := 1; i <= 20; i++ {
		d1 := float64(i) / 5
		d2 := math.Sin(d1)
		t1 := x[0] + d1*x[1] - math.Exp(d1)
		t2 := x[2] + d2*x[3] - math.Cos(d1)
		t := t1*t1 + t2*t2
		s3 := 2 * t1 * t2
		r1 := t + 2*t1*t1
		r2 := t + 2*t2*t2
		hess.SetSym(0, 0, hess.At(0, 0)+r1)
		hess.SetSym(0, 1, hess.At(0, 1)+d1*r1)
		hess.SetSym(1, 1, hess.At(1, 1)+d1*d1*r1)
		hess.SetSym(0, 2, hess.At(0, 2)+s3)
		hess.SetSym(1, 2, hess.At(1, 2)+d1*s3)
		hess.SetSym(2, 2, hess.At(2, 2)+r2)
		hess.SetSym(0, 3, hess.At(0, 3)+d2*s3)
		hess.SetSym(1, 3, hess.At(1, 3)+d1*d2*s3)
		hess.SetSym(2, 3, hess.At(2, 3)+d2*r2)
		hess.SetSym(3, 3, hess.At(3, 3)+d2*d2*r2)
	}
	for i := 0; i < 4; i++ {
		for j := i; j < 4; j++ {
			hess.SetSym(i, j, 4*hess.At(i, j))
		}
	}
}
Exemplo n.º 21
0
func TestConditionNormal(t *testing.T) {
	// Uncorrelated values shouldn't influence the updated values.
	for _, test := range []struct {
		mu       []float64
		sigma    *mat64.SymDense
		observed []int
		values   []float64

		newMu    []float64
		newSigma *mat64.SymDense
	}{
		{
			mu:       []float64{2, 3},
			sigma:    mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
			observed: []int{0},
			values:   []float64{10},

			newMu:    []float64{3},
			newSigma: mat64.NewSymDense(1, []float64{5}),
		},
		{
			mu:       []float64{2, 3},
			sigma:    mat64.NewSymDense(2, []float64{2, 0, 0, 5}),
			observed: []int{1},
			values:   []float64{10},

			newMu:    []float64{2},
			newSigma: mat64.NewSymDense(1, []float64{2}),
		},
		{
			mu:       []float64{2, 3, 4},
			sigma:    mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
			observed: []int{1},
			values:   []float64{10},

			newMu:    []float64{2, 4},
			newSigma: mat64.NewSymDense(2, []float64{2, 0, 0, 10}),
		},
		{
			mu:       []float64{2, 3, 4},
			sigma:    mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}),
			observed: []int{0, 1},
			values:   []float64{10, 15},

			newMu:    []float64{4},
			newSigma: mat64.NewSymDense(1, []float64{10}),
		},
		{
			mu:       []float64{2, 3, 4, 5},
			sigma:    mat64.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}),
			observed: []int{0, 1},
			values:   []float64{10, 15},

			newMu:    []float64{4, 5},
			newSigma: mat64.NewSymDense(2, []float64{10, 2, 2, 3}),
		},
	} {
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Fatalf("Bad test, original sigma not positive definite")
		}
		newNormal, ok := normal.ConditionNormal(test.observed, test.values, nil)
		if !ok {
			t.Fatalf("Bad test, update failure")
		}

		if !floats.EqualApprox(test.newMu, newNormal.mu, 1e-12) {
			t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu)
		}

		var sigma mat64.SymDense
		sigma.FromCholesky(&newNormal.chol)
		if !mat64.EqualApprox(test.newSigma, &sigma, 1e-12) {
			t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma)
		}
	}

	// Test bivariate case where the update rule is analytic
	for _, test := range []struct {
		mu    []float64
		std   []float64
		rho   float64
		value float64
	}{
		{
			mu:    []float64{2, 3},
			std:   []float64{3, 5},
			rho:   0.9,
			value: 1000,
		},
		{
			mu:    []float64{2, 3},
			std:   []float64{3, 5},
			rho:   -0.9,
			value: 1000,
		},
	} {
		std := test.std
		rho := test.rho
		sigma := mat64.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]})
		normal, ok := NewNormal(test.mu, sigma, nil)
		if !ok {
			t.Fatalf("Bad test, original sigma not positive definite")
		}
		newNormal, ok := normal.ConditionNormal([]int{1}, []float64{test.value}, nil)
		if !ok {
			t.Fatalf("Bad test, update failed")
		}
		var newSigma mat64.SymDense
		newSigma.FromCholesky(&newNormal.chol)
		trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1])
		if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 {
			t.Errorf("Mean mismatch. Want %v, got %v", trueMean, newNormal.mu[0])
		}
		trueVar := (1 - rho*rho) * std[0] * std[0]
		if math.Abs(trueVar-newSigma.At(0, 0)) > 1e-14 {
			t.Errorf("Std mismatch. Want %v, got %v", trueMean, newNormal.mu[0])
		}
	}

	// Test via sampling.
	for _, test := range []struct {
		mu         []float64
		sigma      *mat64.SymDense
		observed   []int
		unobserved []int
		value      []float64
	}{
		// The indices in unobserved must be in ascending order for this test.
		{
			mu:    []float64{2, 3, 4},
			sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),

			observed:   []int{0},
			unobserved: []int{1, 2},
			value:      []float64{1.9},
		},
		{
			mu:    []float64{2, 3, 4, 5},
			sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),

			observed:   []int{0, 3},
			unobserved: []int{1, 2},
			value:      []float64{1.9, 2.9},
		},
	} {
		totalSamp := 4000000
		var nSamp int
		samples := mat64.NewDense(totalSamp, len(test.mu), nil)
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Errorf("bad test")
		}
		sample := make([]float64, len(test.mu))
		for i := 0; i < totalSamp; i++ {
			normal.Rand(sample)
			isClose := true
			for i, v := range test.observed {
				if math.Abs(sample[v]-test.value[i]) > 1e-1 {
					isClose = false
					break
				}
			}
			if isClose {
				samples.SetRow(nSamp, sample)
				nSamp++
			}
		}

		if nSamp < 100 {
			t.Errorf("bad test, not enough samples")
			continue
		}
		samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat64.Dense)

		// Compute mean and covariance matrix.
		estMean := make([]float64, len(test.mu))
		for i := range estMean {
			estMean[i] = stat.Mean(mat64.Col(nil, i, samples), nil)
		}
		estCov := stat.CovarianceMatrix(nil, samples, nil)

		// Compute update rule.
		newNormal, ok := normal.ConditionNormal(test.observed, test.value, nil)
		if !ok {
			t.Fatalf("Bad test, update failure")
		}

		var subEstMean []float64
		for _, v := range test.unobserved {

			subEstMean = append(subEstMean, estMean[v])
		}
		subEstCov := mat64.NewSymDense(len(test.unobserved), nil)
		for i := 0; i < len(test.unobserved); i++ {
			for j := i; j < len(test.unobserved); j++ {
				subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j]))
			}
		}

		for i, v := range subEstMean {
			if math.Abs(newNormal.mu[i]-v) > 5e-2 {
				t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v)
			}
		}
		var sigma mat64.SymDense
		sigma.FromCholesky(&newNormal.chol)
		if !mat64.EqualApprox(&sigma, subEstCov, 1e-1) {
			t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma)
		}
	}
}
Exemplo n.º 22
0
func resizeSymDense(m *mat64.SymDense, dim int) *mat64.SymDense {
	if m == nil || cap(m.RawSymmetric().Data) < dim*dim {
		return mat64.NewSymDense(dim, nil)
	}
	return mat64.NewSymDense(dim, m.RawSymmetric().Data[:dim*dim])
}
Exemplo n.º 23
0
func TestMarginalSingle(t *testing.T) {
	for _, test := range []struct {
		mu    []float64
		sigma *mat64.SymDense
	}{
		{
			mu:    []float64{2, 3, 4},
			sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}),
		},
		{
			mu:    []float64{2, 3, 4, 5},
			sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}),
		},
	} {
		normal, ok := NewNormal(test.mu, test.sigma, nil)
		if !ok {
			t.Fatalf("Bad test, covariance matrix not positive definite")
		}
		// Verify with nil Sigma.
		normal.sigma = nil
		for i, mean := range test.mu {
			norm := normal.MarginalNormalSingle(i, nil)
			if norm.Mean() != mean {
				t.Errorf("Mean mismatch nil Sigma, idx %v: want %v, got %v.", i, mean, norm.Mean())
			}
			std := math.Sqrt(test.sigma.At(i, i))
			if math.Abs(norm.StdDev()-std) > 1e-14 {
				t.Errorf("StdDev mismatch nil Sigma, idx %v: want %v, got %v.", i, std, norm.StdDev())
			}
		}

		// Verify with non-nil Sigma.
		normal.setSigma()
		for i, mean := range test.mu {
			norm := normal.MarginalNormalSingle(i, nil)
			if norm.Mean() != mean {
				t.Errorf("Mean mismatch non-nil Sigma, idx %v: want %v, got %v.", i, mean, norm.Mean())
			}
			std := math.Sqrt(test.sigma.At(i, i))
			if math.Abs(norm.StdDev()-std) > 1e-14 {
				t.Errorf("StdDev mismatch non-nil Sigma, idx %v: want %v, got %v.", i, std, norm.StdDev())
			}
		}
	}

	// Test matching with TestMarginal.
	rnd := rand.New(rand.NewSource(1))
	for cas := 0; cas < 10; cas++ {
		dim := rnd.Intn(10) + 1
		mu := make([]float64, dim)
		for i := range mu {
			mu[i] = rnd.Float64()
		}
		x := make([]float64, dim*dim)
		for i := range x {
			x[i] = rnd.Float64()
		}
		mat := mat64.NewDense(dim, dim, x)
		var sigma mat64.SymDense
		sigma.SymOuterK(1, mat)

		normal, ok := NewNormal(mu, &sigma, nil)
		if !ok {
			t.Fatal("bad test")
		}
		for i := 0; i < dim; i++ {
			single := normal.MarginalNormalSingle(i, nil)
			mult, ok := normal.MarginalNormal([]int{i}, nil)
			if !ok {
				t.Fatal("bad test")
			}
			if math.Abs(single.Mean()-mult.Mean(nil)[0]) > 1e-14 {
				t.Errorf("Mean mismatch")
			}
			if math.Abs(single.Variance()-mult.CovarianceMatrix(nil).At(0, 0)) > 1e-14 {
				t.Errorf("Variance mismatch")
			}
		}
	}
}