Esempio n. 1
0
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	b.dim = dim

	b.x = resize(b.x, dim)
	copy(b.x, loc.X)
	b.grad = resize(b.grad, dim)
	copy(b.grad, loc.Gradient)

	b.y = resize(b.y, dim)
	b.s = resize(b.s, dim)
	b.tmp = resize(b.tmp, dim)
	b.yVec = mat64.NewVector(dim, b.y)
	b.sVec = mat64.NewVector(dim, b.s)
	b.tmpVec = mat64.NewVector(dim, b.tmp)

	if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
		b.invHess = mat64.NewSymDense(dim, nil)
	} else {
		b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
	}

	// The values of the hessian are initialized in the first call to NextDirection

	// initial direcion is just negative of gradient because the hessian is 1
	copy(dir, loc.Gradient)
	floats.Scale(-1, dir)

	b.first = true

	return 1 / floats.Norm(dir, 2)
}
Esempio n. 2
0
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
	// m = Number of Training Examples
	// n = Number of Features
	m, n := X.Dims()
	h := mat64.NewVector(m, nil)
	partials := mat64.NewVector(n, nil)
	new_theta := mat64.NewVector(n, nil)

Regression:
	for i := 0; i < maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(X, new_theta)
		for el := 0; el < m; el++ {
			val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
			h.SetVec(el, val)
		}
		partials.MulVec(X.T(), h)

		// Update theta values
		for el := 0; el < n; el++ {
			new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
			new_theta.SetVec(el, new_val)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= tolerance {
			break Regression
		}
	}
	return new_theta
}
Esempio n. 3
0
func (bicg *BiCG) Init(ctx *Context) Operation {
	if bicg.BreakdownTolerance == 0 {
		bicg.BreakdownTolerance = 1e-6
	}
	bicg.rho = math.NaN()

	dim := ctx.X.Len()
	if ctx.P == nil || ctx.P.Len() != dim {
		ctx.P = mat64.NewVector(dim, nil)
	}
	if ctx.Ap == nil || ctx.Ap.Len() != dim {
		ctx.Ap = mat64.NewVector(dim, nil)
	}
	if ctx.Q == nil || ctx.Q.Len() != dim {
		ctx.Q = mat64.NewVector(dim, nil)
	}
	if ctx.Aq == nil || ctx.Aq.Len() != dim {
		ctx.Aq = mat64.NewVector(dim, nil)
	}
	if ctx.Z == nil || ctx.Z.Len() != dim {
		ctx.Z = mat64.NewVector(dim, nil)
	}

	bicg.resume = 2
	return SolvePreconditioner
	// Solve M z = r_{i-1}
}
Esempio n. 4
0
func (lr *LinearRegression) Fit() {
	h := *mat64.NewVector(lr.m, nil)
	partials := mat64.NewVector(lr.n, nil)
	alpha_m := lr.alpha / float64(lr.m)

Descent:
	for i := 0; i < lr.maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(lr.x, lr.Theta)
		for x := 0; x < lr.m; x++ {
			h.SetVec(x, h.At(x, 0)-lr.y.At(x, 0))
		}
		partials.MulVec(lr.x.T(), &h)

		// Update theta values with the precalculated partials
		for x := 0; x < lr.n; x++ {
			theta_j := lr.Theta.At(x, 0) - alpha_m*partials.At(x, 0)
			lr.Theta.SetVec(x, theta_j)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= lr.tolerance {
			break Descent
		}
	}
}
Esempio n. 5
0
func TestHypothesis(t *testing.T) {
	for _, test := range []struct {
		theta *mat64.Vector
		x     *mat64.Vector
		y     float64
	}{
		{
			mat64.NewVector(2, []float64{0, 2}),
			mat64.NewVector(2, []float64{0, 1}),
			2.0,
		}, {
			mat64.NewVector(2, []float64{0, 2}),
			mat64.NewVector(2, []float64{0, 2}),
			4.0,
		}, {
			mat64.NewVector(2, []float64{0, 2}),
			mat64.NewVector(2, []float64{0, 10}),
			20.0,
		}, {
			mat64.NewVector(2, []float64{1, 2}),
			mat64.NewVector(2, []float64{1, 10}),
			21.0,
		}, {
			mat64.NewVector(3, []float64{1, 2.5, 5}),
			mat64.NewVector(3, []float64{10, 20, 0}),
			60.0,
		},
	} {
		h := Hypothesis(test.x, test.theta)

		if h != test.y {
			t.Errorf("Hypothesis(%v,%v) is expected to be equal to %v, found %v", test.x, test.theta, test.y, h)
		}
	}
}
Esempio n. 6
0
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	b.dim = dim
	b.first = true

	x := mat64.NewVector(dim, loc.X)
	grad := mat64.NewVector(dim, loc.Gradient)
	b.x.CloneVec(x)
	b.grad.CloneVec(grad)

	b.y.Reset()
	b.s.Reset()
	b.tmp.Reset()

	if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
		b.invHess = mat64.NewSymDense(dim, nil)
	} else {
		b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
	}
	// The values of the inverse Hessian are initialized in the first call to
	// NextDirection.

	// Initial direction is just negative of the gradient because the Hessian
	// is an identity matrix.
	d := mat64.NewVector(dim, dir)
	d.ScaleVec(-1, grad)
	return 1 / mat64.Norm(d, 2)
}
Esempio n. 7
0
// transformNormal performs the same operation as TransformNormal except no
// safety checks are performed and both input slices must be non-nil.
func (n *Normal) transformNormal(dst, normal []float64) []float64 {
	srcVec := mat64.NewVector(n.dim, normal)
	dstVec := mat64.NewVector(n.dim, dst)
	dstVec.MulVec(&n.lower, srcVec)
	floats.Add(dst, n.mu)
	return dst
}
Esempio n. 8
0
// LinearLeastSquares computes the least squares fit for the function
//
//   f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
//   РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
//   РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
	// The optimal parameters are found by solving for ╬њ╠ѓ in the
	// "normal equations":
	//
	//    (­ЮљЌрхђ­Юљќ­ЮљЌ)╬њ╠ѓ = ­ЮљЌрхђ­Юљќ­Юљ▓
	//
	// where ­Юљќ is a diagonal weight matrix (or the identity matrix
	// for the unweighted case).

	// TODO: Consider using orthogonal decomposition.

	if len(xs) != len(ys) {
		panic("len(xs) != len(ys)")
	}
	if weights != nil && len(xs) != len(weights) {
		panic("len(xs) != len(weights")
	}

	// Construct ­ЮљЌрхђ. This is the more convenient representation
	// for efficiently calling the term functions.
	xTVals := make([]float64, len(terms)*len(xs))
	for i, term := range terms {
		term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
	}
	XT := mat64.NewDense(len(terms), len(xs), xTVals)
	X := XT.T()

	// Construct ­ЮљЌрхђ­Юљќ.
	var XTW *mat64.Dense
	if weights == nil {
		// ­Юљќ is the identity matrix.
		XTW = XT
	} else {
		// Since ­Юљќ is a diagonal matrix, we do this directly.
		XTW = mat64.DenseCopyOf(XT)
		WDiag := mat64.NewVector(len(weights), weights)
		for row := 0; row < len(terms); row++ {
			rowView := XTW.RowView(row)
			rowView.MulElemVec(rowView, WDiag)
		}
	}

	// Construct ­Юљ▓.
	y := mat64.NewVector(len(ys), ys)

	// Compute ╬њ╠ѓ.
	lhs := mat64.NewDense(len(terms), len(terms), nil)
	lhs.Mul(XTW, X)

	rhs := mat64.NewVector(len(terms), nil)
	rhs.MulVec(XTW, y)

	BVals := make([]float64, len(terms))
	B := mat64.NewVector(len(terms), BVals)
	B.SolveVec(lhs, rhs)
	return BVals
}
Esempio n. 9
0
// Calculates the distance between to vectors
func TestVectorDistance(t *testing.T) {
	vec1 := mat.NewVector(3, []float64{4, 6, 2})
	vec2 := mat.NewVector(3, []float64{1, 9, 3})
	expectedAns := float64(19)

	if expectedAns != vectorDistance(vec1, vec2) {
		t.Errorf("Expected %f, got %f", expectedAns, vectorDistance(vec1, vec2))
	}
}
Esempio n. 10
0
func (n *Newton) NextDirection(loc *Location, dir []float64) (stepSize float64) {
	// This method implements Algorithm 3.3 (Cholesky with Added Multiple of
	// the Identity) from Nocedal, Wright (2006), 2nd edition.

	dim := len(loc.X)
	n.hess.CopySym(loc.Hessian)

	// Find the smallest diagonal entry of the Hesssian.
	minA := n.hess.At(0, 0)
	for i := 1; i < dim; i++ {
		a := n.hess.At(i, i)
		if a < minA {
			minA = a
		}
	}
	// If the smallest diagonal entry is positive, the Hessian may be positive
	// definite, and so first attempt to apply the Cholesky factorization to
	// the un-modified Hessian. If the smallest entry is negative, use the
	// final tau from the last iteration if regularization was needed,
	// otherwise guess an appropriate value for tau.
	if minA > 0 {
		n.tau = 0
	} else if n.tau == 0 {
		n.tau = -minA + 0.001
	}

	for k := 0; k < maxNewtonModifications; k++ {
		if n.tau != 0 {
			// Add a multiple of identity to the Hessian.
			for i := 0; i < dim; i++ {
				n.hess.SetSym(i, i, loc.Hessian.At(i, i)+n.tau)
			}
		}
		// Try to apply the Cholesky factorization.
		pd := n.chol.Factorize(n.hess)
		if pd {
			d := mat64.NewVector(dim, dir)
			// Store the solution in d's backing array, dir.
			d.SolveCholeskyVec(&n.chol, mat64.NewVector(dim, loc.Gradient))
			floats.Scale(-1, dir)
			return 1
		}
		// Modified Hessian is not PD, so increase tau.
		n.tau = math.Max(n.Increase*n.tau, 0.001)
	}

	// Hessian modification failed to get a PD matrix. Return the negative
	// gradient as the descent direction.
	copy(dir, loc.Gradient)
	floats.Scale(-1, dir)
	return 1
}
Esempio n. 11
0
func newMargLikeMemory(hyper, outputs int) *margLikeMemory {
	m := &margLikeMemory{
		lastX:    make([]float64, hyper),
		k:        mat64.NewSymDense(outputs, nil),
		chol:     &mat64.Cholesky{},
		alpha:    mat64.NewVector(outputs, nil),
		tmp:      mat64.NewVector(1, nil),
		dKdTheta: make([]*mat64.SymDense, hyper),
		kInvDK:   mat64.NewDense(outputs, outputs, nil),
	}
	for i := 0; i < hyper; i++ {
		m.dKdTheta[i] = mat64.NewSymDense(outputs, nil)
	}
	return m
}
Esempio n. 12
0
func Solve(a sparse.Matrix, b, xInit *mat64.Vector, settings *Settings, method Method) (result Result, err error) {
	stats := Stats{
		StartTime: time.Now(),
	}

	dim, c := a.Dims()
	if dim != c {
		panic("iterative: matrix is not square")
	}
	if xInit != nil && dim != xInit.Len() {
		panic("iterative: mismatched size of the initial guess")
	}
	if b.Len() != dim {
		panic("iterative: mismatched size of the right-hand side vector")
	}

	if xInit == nil {
		xInit = mat64.NewVector(dim, nil)
	}
	if settings == nil {
		settings = DefaultSettings(dim)
	}

	ctx := Context{
		X:        mat64.NewVector(dim, nil),
		Residual: mat64.NewVector(dim, nil),
	}
	// X = xInit
	ctx.X.CopyVec(xInit)
	if mat64.Norm(ctx.X, math.Inf(1)) > 0 {
		// Residual = Ax
		sparse.MulMatVec(ctx.Residual, 1, false, a, ctx.X)
		stats.MatVecMultiplies++
	}
	// Residual = Ax - b
	ctx.Residual.SubVec(ctx.Residual, b)

	if mat64.Norm(ctx.Residual, 2) >= settings.Tolerance {
		err = iterate(method, a, b, settings, &ctx, &stats)
	}

	result = Result{
		X:       ctx.X,
		Stats:   stats,
		Runtime: time.Since(stats.StartTime),
	}
	return result, err
}
Esempio n. 13
0
// findLinearlyIndependnt finds a set of linearly independent columns of A, and
// returns the column indexes of the linearly independent columns.
func findLinearlyIndependent(A mat64.Matrix) []int {
	m, n := A.Dims()
	idxs := make([]int, 0, m)
	columns := mat64.NewDense(m, m, nil)
	newCol := make([]float64, m)
	// Walk in reverse order because slack variables are typically the last columns
	// of A.
	for i := n - 1; i >= 0; i-- {
		if len(idxs) == m {
			break
		}
		mat64.Col(newCol, i, A)
		if len(idxs) == 0 {
			// A column is linearly independent from the null set.
			// This is what needs to be changed if zero columns are allowed, as
			// a column of all zeros is not linearly independent from itself.
			columns.SetCol(len(idxs), newCol)
			idxs = append(idxs, i)
			continue
		}
		if linearlyDependent(mat64.NewVector(m, newCol), columns.View(0, 0, m, len(idxs))) {
			continue
		}
		columns.SetCol(len(idxs), newCol)
		idxs = append(idxs, i)
	}
	return idxs
}
Esempio n. 14
0
func TestGather(t *testing.T) {
	for i, test := range []struct {
		y       []float64
		indices []int

		want []float64
	}{
		{
			y:       []float64{1, 2, 3, 4},
			indices: []int{0, 2, 3},

			want: []float64{1, 3, 4},
		},
		{
			indices: []int{0, 2, 3, 6},
			y:       []float64{1, 2, 3, 4, 5, 6, 7, 8},

			want: []float64{1, 3, 4, 7},
		},
	} {
		y := mat64.NewVector(len(test.y), test.y)
		var x Vector
		Gather(&x, y, test.indices)

		if x.N != y.Len() {
			t.Errorf("%d: wrong dimension, want = %v, got = %v ", i, y.Len(), x.N)
		}
		if !reflect.DeepEqual(x.Data, test.want) {
			t.Errorf("%d: data not equal, want = %v, got %v\n", i, test.want, x.Data)
		}
		if !reflect.DeepEqual(x.Indices, test.indices) {
			t.Errorf("%d: indices not equal, want = %v, got %v\n", i, test.indices, x.Indices)
		}
	}
}
Esempio n. 15
0
func TestLeastSquares(t *testing.T) {

	matA := mat64.NewDense(5, 3, []float64{
		1, -2, 4,
		1, -1, 1,
		1, 0, 0,
		1, 1, 1,
		1, 2, 4,
	})

	vecb := mat64.NewVector(5, []float64{
		0,
		0,
		1,
		0,
		0,
	})

	x := vec3(linalg.LeastSquares(matA, vecb))

	expected := Vec3{34.0 / 70.0, 0.0, -10.0 / 70.0}
	if x != expected {
		t.Errorf("expected %v, got %v", expected, x)
	}
}
Esempio n. 16
0
func TestScatter(t *testing.T) {
	for i, test := range []struct {
		x, y    []float64
		indices []int

		want []float64
	}{
		{
			x:       []float64{1, 2, 3},
			indices: []int{0, 2, 3},
			y:       []float64{math.NaN(), 0, math.NaN(), math.NaN()},

			want: []float64{1, 0, 2, 3},
		},
		{
			x:       []float64{1, 2, 3},
			indices: []int{0, 4, 6},
			y:       []float64{math.NaN(), 0, 0, 0, math.NaN(), 0, math.NaN(), 0},

			want: []float64{1, 0, 0, 0, 2, 0, 3, 0},
		},
	} {
		y := mat64.NewVector(len(test.y), test.y)
		x := NewVector(len(test.y), test.x, test.indices)

		Scatter(y, x)
		if !reflect.DeepEqual(test.y, test.want) {
			t.Errorf("%d: want = %v, got %v\n", i, test.want, test.y)
		}
	}
}
Esempio n. 17
0
// MeanBatch predicts the mean at the set of locations specified by x. Stores in-place into yPred
// If yPred is nil new memory is allocated.
func (g *GP) MeanBatch(yPred []float64, x mat64.Matrix) []float64 {
	rx, cx := x.Dims()
	if cx != g.inputDim {
		panic(badInputLength)
	}
	if yPred == nil {
		yPred = make([]float64, rx)
	}
	ry := len(yPred)
	if rx != ry {
		panic(badOutputLength)
	}
	nSamples, _ := g.inputs.Dims()

	covariance := mat64.NewDense(nSamples, rx, nil)
	row := make([]float64, g.inputDim)
	for j := 0; j < rx; j++ {
		for k := 0; k < g.inputDim; k++ {
			row[k] = x.At(j, k)
		}
		for i := 0; i < nSamples; i++ {
			v := g.kernel.Distance(g.inputs.RawRowView(i), row)
			covariance.Set(i, j, v)
		}
	}
	yPredVec := mat64.NewVector(len(yPred), yPred)
	yPredVec.MulVec(covariance.T(), g.sigInvY)
	// Rescale the outputs
	for i, v := range yPred {
		yPred[i] = v*g.std + g.mean
	}
	return yPred
}
Esempio n. 18
0
func TestDot(t *testing.T) {
	for _, test := range []struct {
		n       int
		x, y    []float64
		indices []int

		want float64
	}{
		{
			n:       5,
			x:       []float64{1, 2, 3},
			indices: []int{0, 2, 4},
			y:       []float64{1, math.NaN(), 3, math.NaN(), 5},

			want: 22,
		},
	} {
		x := NewVector(test.n, test.x, test.indices)
		y := mat64.NewVector(len(test.y), test.y)
		got := Dot(x, y)
		if got != test.want {
			t.Errorf("want = %v, got %v\n", test.want, got)
		}
	}
}
Esempio n. 19
0
// Train sets the paramters of the gaussian process. If noise == true,
// the noise parameter is adjusted, otherwise it is not.
// TODO(btracey): Need to implement barrier method for parameters. Steps get crazy.
func (g *GP) Train(trainNoise bool) error {
	// TODO(btracey): Implement a memory struct that can be passed around with
	// all of this data.

	initHyper := g.kernel.Hyper(nil)
	nKerHyper := len(initHyper)
	if trainNoise {
		initHyper = append(initHyper, math.Log(g.noise))
	}

	mem := newMargLikeMemory(len(initHyper), len(g.outputs))

	f := func(x []float64) float64 {
		fmt.Println("x =", x)
		obj := g.marginalLikelihood(x, trainNoise, mem)
		fmt.Println("obj =", obj)
		return obj
	}
	df := func(x, grad []float64) {
		g.marginalLikelihoodDerivative(x, grad, trainNoise, mem)
		fmt.Println("x = ", x)
		fmt.Println("grad = ", grad)
	}

	//	grad =  [0.4500442759224154 -3.074041876494095 0.42568788880060204]
	/*
		x := []float64{0.7287793210009457, -0.9371471942974932, -14.017213937483529}
		fofx := f(x)
		fmt.Println("fofx", fofx)

		set := fd.DefaultSettings()
		set.Method.Step = 1e-4
		fdGrad := fd.Gradient(nil, f, x, nil)
		fmt.Println("fd grad = ", fdGrad)
		grad := make([]float64, len(fdGrad))
		df(x, grad)
		fmt.Println("real grad = ", grad)
		os.Exit(1)
	*/

	problem := optimize.Problem{
		Func: f,
		Grad: df,
	}
	settings := optimize.DefaultSettings()
	settings.GradientThreshold = 1e-4
	result, err := optimize.Local(problem, initHyper, settings, nil)
	// set noise
	g.noise = math.Exp(result.X[len(result.X)-1])
	g.kernel.SetHyper(result.X[:nKerHyper])
	g.setKernelMat(g.k, g.noise)
	ok := g.cholK.Factorize(g.k)
	if !ok {
		return errors.New("gp: final kernel matrix is not positive definite")
	}
	v := mat64.NewVector(len(g.outputs), g.outputs)
	g.sigInvY.SolveCholeskyVec(g.cholK, v)
	return err
}
Esempio n. 20
0
// LogProb computes the log of the pdf of the point x.
func (n *Normal) LogProb(x []float64) float64 {
	dim := n.dim
	if len(x) != dim {
		panic(badSizeMismatch)
	}
	// Compute the normalization constant
	c := -0.5*float64(dim)*logTwoPi - n.logSqrtDet

	// Compute (x-mu)'Sigma^-1 (x-mu)
	xMinusMu := make([]float64, dim)
	floats.SubTo(xMinusMu, x, n.mu)
	d := mat64.NewVector(dim, xMinusMu)
	tmp := make([]float64, dim)
	tmpVec := mat64.NewVector(dim, tmp)
	tmpVec.SolveCholeskyVec(n.chol, d)
	return c - 0.5*floats.Dot(tmp, xMinusMu)
}
Esempio n. 21
0
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
Esempio n. 22
0
// Rand generates a random number according to the distributon.
// If the input slice is nil, new memory is allocated, otherwise the result is stored
// in place.
func (n *Normal) Rand(x []float64) []float64 {
	x = reuseAs(x, n.dim)
	tmp := make([]float64, n.dim)
	if n.src == nil {
		for i := range x {
			tmp[i] = rand.NormFloat64()
		}
	} else {
		for i := range x {
			tmp[i] = n.src.NormFloat64()
		}
	}
	tmpVec := mat64.NewVector(n.dim, tmp)
	xVec := mat64.NewVector(n.dim, x)
	xVec.MulVec(n.chol, true, tmpVec)
	floats.Add(x, n.mu)
	return x
}
Esempio n. 23
0
func vectorDistance(vec1, vec2 *mat.Vector) (v float64) {
	result := mat.NewVector(vec1.Len(), nil)

	result.SubVec(vec1, vec2)
	result.MulElemVec(result, result)
	v = mat.Sum(result)

	return
}
Esempio n. 24
0
func TestFindIn(t *testing.T) {
	v := mat.NewVector(4, []float64{
		1,
		0,
		1,
		0,
	})
	x := 1
	expectedVec := mat.NewVector(2, []float64{
		0,
		2,
	})

	result := findIn(float64(x), v)
	if !mat.Equal(result, expectedVec) {
		t.Errorf("Expected \n%v, found \n%v",
			printMatrix(expectedVec), printMatrix(result))
	}
}
Esempio n. 25
0
func TestGradientDescent(t *testing.T) {

	alpha := 0.01
	maxIters := 15000
	tolerance := 0.0001
	for _, test := range []struct {
		x      *mat64.Dense
		y      *mat64.Vector
		result *mat64.Vector
	}{
		{
			mat64.NewDense(3, 4, []float64{
				1, 3, 5, 6,
				1, 1, 2, 3,
				1, 9, 4, 2}),
			mat64.NewVector(3, []float64{1, 6, 4}),
			mat64.NewVector(4, []float64{8.0918, 0.8920, -3.7990, 1.5379}),
		}, {
			mat64.NewDense(10, 4, []float64{
				1, 2, 3, 4,
				1, 3, 4, 5,
				1, 4, 5, 6,
				1, 5, 6, 7,
				1, 6, 7, 8,
				1, 7, 8, 9,
				1, 8, 9, 10,
				1, 9, 10, 11,
				1, 10, 11, 12,
				1, 11, 12, 13}),
			mat64.NewVector(10, []float64{20, 26, 32, 38, 44, 50, 56, 62, 68, 74}),
			mat64.NewVector(4, []float64{0.6665, 1.3335, 2.0000, 2.6665}),
		},
	} {

		theta := GradientDescent(test.x, test.y, alpha, tolerance, maxIters)

		if !mat64.EqualApprox(test.result, theta, 0.0001) {
			t.Error("Expected:", test.result)
			t.Error("Actual:  ", theta)
		}
	}
}
Esempio n. 26
0
func getRowVector(index int, M mat.Matrix) *mat.Vector {
	_, cols := M.Dims()
	var rowData []float64

	if cols == 0 {
		rowData = []float64{}
	} else {
		rowData = mat.Row(nil, index, M)
	}
	return mat.NewVector(cols, rowData)
}
Esempio n. 27
0
func TestMultiHypothesis(t *testing.T) {
	for _, test := range []struct {
		theta *mat64.Vector
		x     *mat64.Dense
		y     *mat64.Vector
	}{
		{
			mat64.NewVector(2, []float64{0, 2}),
			mat64.NewDense(2, 3, []float64{0, 0, 0, 1, 2, 10}),
			mat64.NewVector(3, []float64{2, 4, 20}),
		},
	} {
		h := MultiHypothesis(test.x, test.theta)

		if !mat64.Equal(h, test.y) {
			t.Errorf("MultiHypothesis(%v,%v) is expected to be equal to %v, found %v", test.x, test.theta, test.y, h)
		}
	}

}
Esempio n. 28
0
// findIn returns the indexes of the values in vec that match scalar
func findIn(scalar float64, vec *mat.Vector) *mat.Vector {
	var result []float64

	for i := 0; i < vec.Len(); i++ {
		if scalar == vec.At(i, 0) {
			result = append(result, float64(i))
		}
	}

	return mat.NewVector(len(result), result)
}
Esempio n. 29
0
// AssignCentroid assigns all of the examples in X to one of the groups
// in Mu
// X -> (m*n), Mu -> (K*n)
// returns (m*1)
func AssignCentroid(X, Mu mat.Matrix) *mat.Vector {
	m, _ := X.Dims()
	idx := mat.NewVector(m, nil)

	for i := 0; i < m; i++ {
		x := getRowVector(i, X)
		idx.SetVec(i, float64(NearestCentroid(x, Mu)))
	}

	return idx
}
Esempio n. 30
0
func ExampleExcerpt() {
	// Excerpt allows diagnostic display of very large
	// matrices and vectors.

	// The big matrix is too large to properly print...
	big := mat64.NewDense(100, 100, nil)
	for i := 0; i < 100; i++ {
		big.Set(i, i, 1)
	}

	// so only print corner excerpts of the matrix.
	fmt.Printf("excerpt big identity matrix: %v\n\n",
		mat64.Formatted(big, mat64.Prefix(" "), mat64.Excerpt(3)))

	// The long vector is also too large, ...
	long := mat64.NewVector(100, nil)
	for i := 0; i < 100; i++ {
		long.SetVec(i, float64(i))
	}

	// ... so print end excerpts of the vector,
	fmt.Printf("excerpt long column vector: %v\n\n",
		mat64.Formatted(long, mat64.Prefix(" "), mat64.Excerpt(3)))
	// or its transpose.
	fmt.Printf("excerpt long row vector: %v\n",
		mat64.Formatted(long.T(), mat64.Prefix(" "), mat64.Excerpt(3)))

	// Output:
	// excerpt big identity matrix: Dims(100, 100)
	//  ⎡1  0  0  ...  ...  0  0  0⎤
	//  ⎢0  1  0            0  0  0⎥
	//  ⎢0  0  1            0  0  0⎥
	//  .
	//  .
	//  .
	//  ⎢0  0  0            1  0  0⎥
	//  ⎢0  0  0            0  1  0⎥
	//  ⎣0  0  0  ...  ...  0  0  1⎦
	//
	// excerpt long column vector: Dims(100, 1)
	//  ⎡ 0⎤
	//  ⎢ 1⎥
	//  ⎢ 2⎥
	//  .
	//  .
	//  .
	//  ⎢97⎥
	//  ⎢98⎥
	//  ⎣99⎦
	//
	// excerpt long row vector: Dims(1, 100)
	//  [ 0   1   2  ...  ...  97  98  99]

}