Пример #1
0
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
Пример #2
0
func ExampleCholeskySymRankOne() {
	a := mat64.NewSymDense(4, []float64{
		1, 1, 1, 1,
		0, 2, 3, 4,
		0, 0, 6, 10,
		0, 0, 0, 20,
	})
	fmt.Printf("A = %0.4v\n", mat64.Formatted(a, mat64.Prefix("    ")))

	// Compute the Cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(a); !ok {
		fmt.Println("matrix a is not positive definite.")
	}

	x := mat64.NewVector(4, []float64{0, 0, 0, 1})
	fmt.Printf("\nx = %0.4v\n", mat64.Formatted(x, mat64.Prefix("    ")))

	// Rank-1 update the factorization.
	chol.SymRankOne(&chol, 1, x)
	// Rank-1 update the matrix a.
	a.SymRankOne(a, 1, x)

	var au mat64.SymDense
	au.FromCholesky(&chol)

	// Print the matrix that was updated directly.
	fmt.Printf("\nA' =        %0.4v\n", mat64.Formatted(a, mat64.Prefix("            ")))
	// Print the matrix recovered from the factorization.
	fmt.Printf("\nU'^T * U' = %0.4v\n", mat64.Formatted(&au, mat64.Prefix("            ")))

	// Output:
	// A = ⎡ 1   1   1   1⎤
	//     ⎢ 1   2   3   4⎥
	//     ⎢ 1   3   6  10⎥
	//     ⎣ 1   4  10  20⎦
	//
	// x = ⎡0⎤
	//     ⎢0⎥
	//     ⎢0⎥
	//     ⎣1⎦
	//
	// A' =        ⎡ 1   1   1   1⎤
	//             ⎢ 1   2   3   4⎥
	//             ⎢ 1   3   6  10⎥
	//             ⎣ 1   4  10  21⎦
	//
	// U'^T * U' = ⎡ 1   1   1   1⎤
	//             ⎢ 1   2   3   4⎥
	//             ⎢ 1   3   6  10⎥
	//             ⎣ 1   4  10  21⎦
}
Пример #3
0
// NewNormalPrecision creates a new Normal distribution with the given mean and
// precision matrix (inverse of the covariance matrix). NewNormalPrecision
// panics if len(mu) is not equal to prec.Symmetric(). If the precision matrix
// is not positive-definite, NewNormalPrecision returns nil for norm and false
// for ok.
func NewNormalPrecision(mu []float64, prec *mat64.SymDense, src *rand.Rand) (norm *Normal, ok bool) {
	if len(mu) == 0 {
		panic(badZeroDimension)
	}
	dim := prec.Symmetric()
	if dim != len(mu) {
		panic(badSizeMismatch)
	}
	// TODO(btracey): Computing a matrix inverse is generally numerically instable.
	// This only has to compute the inverse of a positive definite matrix, which
	// is much better, but this still loses precision. It is worth considering if
	// instead the precision matrix should be stored explicitly and used instead
	// of the Cholesky decomposition of the covariance matrix where appropriate.
	var chol mat64.Cholesky
	ok = chol.Factorize(prec)
	if !ok {
		return nil, false
	}
	var sigma mat64.SymDense
	sigma.InverseCholesky(&chol)
	return NewNormal(mu, &sigma, src)
}
Пример #4
0
// ConditionNormal returns the Normal distribution that is the receiver conditioned
// on the input evidence. The returned multivariate normal has dimension
// n - len(observed), where n is the dimension of the original receiver. The updated
// mean and covariance are
//  mu = mu_un + sigma_{ob,un}^T * sigma_{ob,ob}^-1 (v - mu_ob)
//  sigma = sigma_{un,un} - sigma_{ob,un}^T * sigma_{ob,ob}^-1 * sigma_{ob,un}
// where mu_un and mu_ob are the original means of the unobserved and observed
// variables respectively, sigma_{un,un} is the unobserved subset of the covariance
// matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and
// sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with
// values v. The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Normal distribution.
//
// ConditionNormal returns {nil, false} if there is a failure during the update.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Rand) (*Normal, bool) {
	if len(observed) == 0 {
		panic("normal: no observed value")
	}
	if len(observed) != len(values) {
		panic("normal: input slice length mismatch")
	}
	for _, v := range observed {
		if v < 0 || v >= n.Dim() {
			panic("normal: observed value out of bounds")
		}
	}

	ob := len(observed)
	unob := n.Dim() - ob
	obMap := make(map[int]struct{})
	for _, v := range observed {
		if _, ok := obMap[v]; ok {
			panic("normal: observed dimension occurs twice")
		}
		obMap[v] = struct{}{}
	}
	if len(observed) == n.Dim() {
		panic("normal: all dimensions observed")
	}
	unobserved := make([]int, 0, unob)
	for i := 0; i < n.Dim(); i++ {
		if _, ok := obMap[i]; !ok {
			unobserved = append(unobserved, i)
		}
	}
	mu1 := make([]float64, unob)
	for i, v := range unobserved {
		mu1[i] = n.mu[v]
	}
	mu2 := make([]float64, ob) // really v - mu2
	for i, v := range observed {
		mu2[i] = values[i] - n.mu[v]
	}

	n.setSigma()

	var sigma11, sigma22 mat64.SymDense
	sigma11.SubsetSym(n.sigma, unobserved)
	sigma22.SubsetSym(n.sigma, observed)

	sigma21 := mat64.NewDense(ob, unob, nil)
	for i, r := range observed {
		for j, c := range unobserved {
			v := n.sigma.At(r, c)
			sigma21.Set(i, j, v)
		}
	}

	var chol mat64.Cholesky
	ok := chol.Factorize(&sigma22)
	if !ok {
		return nil, ok
	}

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
	v := mat64.NewVector(ob, mu2)
	var tmp, tmp2 mat64.Vector
	err := tmp.SolveCholeskyVec(&chol, v)
	if err != nil {
		return nil, false
	}
	tmp2.MulVec(sigma21.T(), &tmp)

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
	// TODO(btracey): Should this be a method of SymDense?
	var tmp3, tmp4 mat64.Dense
	err = tmp3.SolveCholesky(&chol, sigma21)
	if err != nil {
		return nil, false
	}
	tmp4.Mul(sigma21.T(), &tmp3)

	for i := range mu1 {
		mu1[i] += tmp2.At(i, 0)
	}

	// TODO(btracey): If tmp2 can constructed with a method, then this can be
	// replaced with SubSym.
	for i := 0; i < len(unobserved); i++ {
		for j := i; j < len(unobserved); j++ {
			v := sigma11.At(i, j)
			sigma11.SetSym(i, j, v-tmp4.At(i, j))
		}
	}
	return NewNormal(mu1, &sigma11, src)
}
Пример #5
0
// AddBatch adds a set training points to the Gp. This call updates internal
// values needed for prediction, so it is more efficient to add samples
// as a batch.
func (g *GP) AddBatch(x mat64.Matrix, y []float64) error {
	// Note: The outputs are stored scaled to have a mean of zero and a variance
	// of 1.

	// Verify input parameters
	rx, cx := x.Dims()
	ry := len(y)
	if rx != ry {
		panic(badInOut)
	}
	if cx != g.inputDim {
		panic(badInputLength)
	}
	nSamples := len(g.outputs)

	// Append the new data to the list of stored data.
	inputs := mat64.NewDense(rx+nSamples, g.inputDim, nil)
	inputs.Copy(g.inputs)
	inputs.View(nSamples, 0, rx, g.inputDim).(*mat64.Dense).Copy(x)
	g.inputs = inputs
	// Rescale the output data to its original value, append the new data, and
	// then rescale to have mean 0 and variance of 1.
	for i, v := range g.outputs {
		g.outputs[i] = v*g.std + g.mean
	}
	g.outputs = append(g.outputs, y...)
	g.mean = stat.Mean(g.outputs, nil)
	g.std = stat.StdDev(g.outputs, nil)
	for i, v := range g.outputs {
		g.outputs[i] = (v - g.mean) / g.std
	}

	// Add to the kernel matrix.
	k := mat64.NewSymDense(rx+nSamples, nil)
	k.CopySym(g.k)
	g.k = k
	// Compute the kernel with the new points and the old points
	for i := 0; i < nSamples; i++ {
		for j := nSamples; j < rx+nSamples; j++ {
			v := g.kernel.Distance(g.inputs.RawRowView(i), g.inputs.RawRowView(j))
			g.k.SetSym(i, j, v)
		}
	}

	// Compute the kernel with the new points and themselves
	for i := nSamples; i < rx+nSamples; i++ {
		for j := i; j < nSamples+rx; j++ {
			v := g.kernel.Distance(g.inputs.RawRowView(i), g.inputs.RawRowView(j))
			if i == j {
				v += g.noise
			}
			g.k.SetSym(i, j, v)
		}
	}
	// Cache necessary matrix results for computing predictions.
	var chol mat64.Cholesky
	ok := chol.Factorize(g.k)
	if !ok {
		return ErrSingular
	}
	g.cholK = &chol
	g.sigInvY.Reset()
	v := mat64.NewVector(len(g.outputs), g.outputs)
	g.sigInvY.SolveCholeskyVec(g.cholK, v)
	return nil
}