Exemple #1
0
// Cov returns the covariance between a set of data points based on the current
// GP fit.
func (g *GP) Cov(m *mat64.SymDense, x mat64.Matrix) *mat64.SymDense {
	if m != nil {
		// TODO(btracey): Make this k**
		panic("resuing m not coded")
	}
	// The joint covariance matrix is
	// K(x_*, k_*) - k(x_*, x) k(x,x)^-1 k(x, x*)
	nSamp, nDim := x.Dims()
	if nDim != g.inputDim {
		panic(badInputLength)
	}

	// Compute K(x_*, x) K(x, x)^-1 K(x, x_*)
	kstar := g.formKStar(x)
	var tmp mat64.Dense
	tmp.SolveCholesky(g.cholK, kstar)
	var tmp2 mat64.Dense
	tmp2.Mul(kstar.T(), &tmp)

	// Compute k(x_*, x_*) and perform the subtraction.
	kstarstar := mat64.NewSymDense(nSamp, nil)
	for i := 0; i < nSamp; i++ {
		for j := i; j < nSamp; j++ {
			v := g.kernel.Distance(mat64.Row(nil, i, x), mat64.Row(nil, j, x))
			if i == j {
				v += g.noise
			}
			kstarstar.SetSym(i, j, v-tmp2.At(i, j))
		}
	}
	return kstarstar
}
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
Exemple #3
0
func forward(x *mat64.Dense) (yHat mat64.Dense) {
	z2 := mat64.Dense{} // 3x3
	a2 := mat64.Dense{} // 3x3
	z3 := mat64.Dense{} // 3x1
	z2.Mul(x, w1)
	a2.Apply(sigmoidApplyable, &z2)
	z3.Mul(&a2, w2)
	// yHat is 3x1
	yHat.Apply(sigmoidApplyable, &z3)
	return
}
Exemple #4
0
// predictFeaturized multiplies the featureWeights by the featurized input and stores the value. It assumes
// that inMat and outMat already have the correct shape, but will replace the data in them
func predictFeaturized(featurizedInput []float64, output []float64, featureWeights *mat64.Dense, inMat *mat64.Dense, outMat *mat64.Dense) {
	rm := inMat.RawMatrix()
	rmin.Data = featurizedInput
	inMat.LoadRawMatrix(rmin)

	rm = outMat.RawMatrix()
	rm.Data = outMat
	outMat.LoadRawMatrix(rmin)

	// Multiply the feature weights by the featurized input ond store
	outMat.Mul(inMat, featureWeights)
}
Exemple #5
0
func (self *Layer) Update(learningConfiguration LearningConfiguration) {
	var deltas mat64.Dense
	deltas.Mul(self.Deltas, self.Input)
	rows, cols := self.Weight.Dims()
	weight := self.Weight.View(0, 0, rows-1, cols).(*mat64.Dense)
	if *learningConfiguration.Decay > 0 {
		var decay mat64.Dense
		decay.Scale(*learningConfiguration.Decay, weight)
		deltas.Sub(&deltas, decay.T())
	}
	deltas.Scale(*learningConfiguration.Rate, &deltas)
	weight.Sub(weight, deltas.T())
}
Exemple #6
0
func mulMulti(a *mat64.Dense, b []float64, rows int) (r []float64) {
	var m, m2 mat64.Dense

	b1 := mat64.NewDense(1, 1, []float64{b[0]})
	b2 := mat64.NewDense(1, 1, []float64{b[1]})

	m.Mul(a.ColView(0), b1)
	m2.Mul(a.ColView(1), b2)

	for i := 0; i < rows; i++ {
		r = append(r, m.ColView(0).At(i, 0)+m2.ColView(0).At(i, 0))
	}
	return r
}
Exemple #7
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	if weights != nil {
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}

		diagWeight := diagonal.NewDiagonal(nFeatures, weights)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Mul(diagWeight, trueOutputs)
		weightedFeatures.Mul(diagWeight, features)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}
	if weights == nil {
		parameterMat := mat64.Solve(features, trueOutputs)
		return parameterMat.RawMatrix().Data

	}
	parameterMat := mat64.Solve(weightedFeatures, weightedOutput)

	return parameterMat.RawMatrix().Data
}
func main() {
	a := mat64.NewDense(2, 4, []float64{
		1, 2, 3, 4,
		5, 6, 7, 8,
	})
	b := mat64.NewDense(4, 3, []float64{
		1, 2, 3,
		4, 5, 6,
		7, 8, 9,
		10, 11, 12,
	})
	var m mat64.Dense
	m.Mul(a, b)
	fmt.Println(mat64.Formatted(&m))
}
Exemple #9
0
func (lr *LinearRegression) Fit(inst *base.Instances) error {
	if inst.Rows < inst.GetAttributeCount() {
		return NotEnoughDataError
	}

	// Split into two matrices, observed results (dependent variable y)
	// and the explanatory variables (X) - see http://en.wikipedia.org/wiki/Linear_regression
	observed := mat64.NewDense(inst.Rows, 1, nil)
	explVariables := mat64.NewDense(inst.Rows, inst.GetAttributeCount(), nil)

	for i := 0; i < inst.Rows; i++ {
		observed.Set(i, 0, inst.Get(i, inst.ClassIndex)) // Set observed data

		for j := 0; j < inst.GetAttributeCount(); j++ {
			if j == 0 {
				// Set intercepts to 1.0
				// Could / should be done better: http://www.theanalysisfactor.com/interpret-the-intercept/
				explVariables.Set(i, 0, 1.0)
			} else {
				explVariables.Set(i, j, inst.Get(i, j-1))
			}
		}
	}

	n := inst.GetAttributeCount()
	qr := mat64.QR(explVariables)
	q := qr.Q()
	reg := qr.R()

	var transposed, qty mat64.Dense
	transposed.TCopy(q)
	qty.Mul(&transposed, observed)

	regressionCoefficients := make([]float64, n)
	for i := n - 1; i >= 0; i-- {
		regressionCoefficients[i] = qty.At(i, 0)
		for j := i + 1; j < n; j++ {
			regressionCoefficients[i] -= regressionCoefficients[j] * reg.At(i, j)
		}
		regressionCoefficients[i] /= reg.At(i, i)
	}

	lr.disturbance = regressionCoefficients[0]
	lr.regressionCoefficients = regressionCoefficients[1:]
	lr.fitted = true

	return nil
}
Exemple #10
0
func randmatmul(n int) *mat64.Dense {
	aData := make([]float64, n*n)
	for i := range aData {
		aData[i] = rnd.Float64()
	}
	a := mat64.NewDense(n, n, aData)

	bData := make([]float64, n*n)
	for i := range bData {
		bData[i] = rnd.Float64()
	}
	b := mat64.NewDense(n, n, bData)
	var c mat64.Dense
	c.Mul(a, b)
	return &c
}
Exemple #11
0
func ExamplePrincipalComponents() {
	// iris is a truncated sample of the Fisher's Iris dataset.
	n := 10
	d := 4
	iris := mat64.NewDense(n, d, []float64{
		5.1, 3.5, 1.4, 0.2,
		4.9, 3.0, 1.4, 0.2,
		4.7, 3.2, 1.3, 0.2,
		4.6, 3.1, 1.5, 0.2,
		5.0, 3.6, 1.4, 0.2,
		5.4, 3.9, 1.7, 0.4,
		4.6, 3.4, 1.4, 0.3,
		5.0, 3.4, 1.5, 0.2,
		4.4, 2.9, 1.4, 0.2,
		4.9, 3.1, 1.5, 0.1,
	})

	// Calculate the principal component direction vectors
	// and variances.
	vecs, vars, ok := stat.PrincipalComponents(iris, nil)
	if !ok {
		return
	}
	fmt.Printf("variances = %.4f\n\n", vars)

	// Project the data onto the first 2 principal components.
	k := 2
	var proj mat64.Dense
	proj.Mul(iris, vecs.View(0, 0, d, k))

	fmt.Printf("proj = %.4f", mat64.Formatted(&proj, mat64.Prefix("       ")))

	// Output:
	// variances = [0.1666 0.0207 0.0079 0.0019]
	//
	// proj = ⎡-6.1686   1.4659⎤
	//        ⎢-5.6767   1.6459⎥
	//        ⎢-5.6699   1.3642⎥
	//        ⎢-5.5643   1.3816⎥
	//        ⎢-6.1734   1.3309⎥
	//        ⎢-6.7278   1.4021⎥
	//        ⎢-5.7743   1.1498⎥
	//        ⎢-6.0466   1.4714⎥
	//        ⎢-5.2709   1.3570⎥
	//        ⎣-5.7533   1.6207⎦
}
Exemple #12
0
// Activate propagates the given input matrix (with) across the network
// a certain number of times (up to maxIterations).
//
// The with matrix should be size * size elements, with only the values
// of input neurons set (everything else should be zero).
//
// If the network is conceptually organised into layers, maxIterations
// should be set to the number of layers.
//
// This function overwrites whatever's stored in its first argument.
func (n *Network) Activate(with *mat64.Dense, maxIterations int) {

	// Add bias and feed to activation
	biasFunc := func(r, c int, v float64) float64 {
		return v + n.biases[r]
	}
	activFunc := func(r, c int, v float64) float64 {
		return n.funcs[r].Forward(v)
	}

	tmp := new(mat64.Dense)
	tmp.Clone(with)

	// Main loop
	for i := 0; i < maxIterations; i++ {
		with.Mul(n.weights, with)
		with.Apply(biasFunc, with)
		with.Apply(activFunc, with)
	}
}
Exemple #13
0
func (lr *LinearRegression) Fit(inst base.FixedDataGrid) error {

	// Retrieve row size
	_, rows := inst.Size()

	// Validate class Attribute count
	classAttrs := inst.AllClassAttributes()
	if len(classAttrs) != 1 {
		return fmt.Errorf("Only 1 class variable is permitted")
	}
	classAttrSpecs := base.ResolveAttributes(inst, classAttrs)

	// Retrieve relevant Attributes
	allAttrs := base.NonClassAttributes(inst)
	attrs := make([]base.Attribute, 0)
	for _, a := range allAttrs {
		if _, ok := a.(*base.FloatAttribute); ok {
			attrs = append(attrs, a)
		}
	}

	cols := len(attrs) + 1

	if rows < cols {
		return NotEnoughDataError
	}

	// Retrieve relevant Attribute specifications
	attrSpecs := base.ResolveAttributes(inst, attrs)

	// Split into two matrices, observed results (dependent variable y)
	// and the explanatory variables (X) - see http://en.wikipedia.org/wiki/Linear_regression
	observed := mat64.NewDense(rows, 1, nil)
	explVariables := mat64.NewDense(rows, cols, nil)

	// Build the observed matrix
	inst.MapOverRows(classAttrSpecs, func(row [][]byte, i int) (bool, error) {
		val := base.UnpackBytesToFloat(row[0])
		observed.Set(i, 0, val)
		return true, nil
	})

	// Build the explainatory variables
	inst.MapOverRows(attrSpecs, func(row [][]byte, i int) (bool, error) {
		// Set intercepts to 1.0
		explVariables.Set(i, 0, 1.0)
		for j, r := range row {
			explVariables.Set(i, j+1, base.UnpackBytesToFloat(r))
		}
		return true, nil
	})

	n := cols
	qr := new(mat64.QR)
	qr.Factorize(explVariables)
	var q, reg mat64.Dense
	q.QFromQR(qr)
	reg.RFromQR(qr)

	var transposed, qty mat64.Dense
	transposed.Clone(q.T())
	qty.Mul(&transposed, observed)

	regressionCoefficients := make([]float64, n)
	for i := n - 1; i >= 0; i-- {
		regressionCoefficients[i] = qty.At(i, 0)
		for j := i + 1; j < n; j++ {
			regressionCoefficients[i] -= regressionCoefficients[j] * reg.At(i, j)
		}
		regressionCoefficients[i] /= reg.At(i, i)
	}

	lr.disturbance = regressionCoefficients[0]
	lr.regressionCoefficients = regressionCoefficients[1:]
	lr.fitted = true
	lr.attrs = attrs
	lr.cls = classAttrs[0]
	return nil
}
Exemple #14
0
// Factors returns matrices W and H that are non-negative factors of V within the
// specified tolerance and computation limits given initial non-negative solutions Wo
// and Ho.
func Factors(V, Wo, Ho *mat64.Dense, c Config) (W, H *mat64.Dense, ok bool) {
	to := time.Now()

	W = Wo
	H = Ho

	var (
		wr, wc = W.Dims()
		hr, hc = H.Dims()

		tmp mat64.Dense
	)

	var vhT mat64.Dense
	gW := mat64.NewDense(wr, wc, nil)
	tmp.Mul(H, H.T())
	gW.Mul(W, &tmp)
	vhT.Mul(V, H.T())
	gW.Sub(gW, &vhT)

	var wTv mat64.Dense
	gH := mat64.NewDense(hr, hc, nil)
	tmp.Reset()
	tmp.Mul(W.T(), W)
	gH.Mul(&tmp, H)
	wTv.Mul(W.T(), V)
	gH.Sub(gH, &wTv)

	var gHT, gWHT mat64.Dense
	gHT.Clone(gH.T())
	gWHT.Stack(gW, &gHT)

	grad := mat64.Norm(&gWHT, 2)
	tolW := math.Max(0.001, c.Tolerance) * grad
	tolH := tolW

	var (
		_ok  bool
		iter int
	)

	decFiltW := func(r, c int, v float64) float64 {
		// decFiltW is applied to gW, so v = gW.At(r, c).
		if v < 0 || W.At(r, c) > 0 {
			return v
		}
		return 0
	}

	decFiltH := func(r, c int, v float64) float64 {
		// decFiltH is applied to gH, so v = gH.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	var vT, hT, wT mat64.Dense
	for i := 0; i < c.MaxIter; i++ {
		gW.Apply(decFiltW, gW)
		gH.Apply(decFiltH, gH)

		var proj float64
		for _, v := range gW.RawMatrix().Data {
			proj += v * v
		}
		for _, v := range gH.RawMatrix().Data {
			proj += v * v
		}
		proj = math.Sqrt(proj)
		if proj < c.Tolerance*grad || time.Now().Sub(to) > c.Limit {
			break
		}

		vT.Clone(V.T())
		hT.Clone(H.T())
		wT.Clone(W.T())
		W, gW, iter, ok = nnlsSubproblem(&vT, &hT, &wT, tolW, c.MaxOuterSub, c.MaxInnerSub)
		if iter == 0 {
			tolW *= 0.1
		}

		wT.Reset()
		wT.Clone(W.T())
		W = &wT

		var gWT mat64.Dense
		gWT.Clone(gW.T())
		*gW = gWT

		H, gH, iter, _ok = nnlsSubproblem(V, W, H, tolH, c.MaxOuterSub, c.MaxInnerSub)
		ok = ok && _ok
		if iter == 0 {
			tolH *= 0.1
		}
	}

	return W, H, ok
}
Exemple #15
0
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
	H = new(mat64.Dense)
	H.Clone(Ho)

	var WtV, WtW mat64.Dense
	WtV.Mul(W.T(), V)
	WtW.Mul(W.T(), W)

	alpha, beta := 1., 0.1

	decFilt := func(r, c int, v float64) float64 {
		// decFilt is applied to G, so v = G.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	G = new(mat64.Dense)
	for i = 0; i < outer; i++ {
		G.Mul(&WtW, H)
		G.Sub(G, &WtV)
		G.Apply(decFilt, G)

		if mat64.Norm(G, 2) < tol {
			break
		}

		var (
			reduce bool
			Hp     *mat64.Dense
			d, dQ  mat64.Dense
		)
		for j := 0; j < inner; j++ {
			var Hn mat64.Dense
			Hn.Scale(alpha, G)
			Hn.Sub(H, &Hn)
			Hn.Apply(posFilt, &Hn)

			d.Sub(&Hn, H)
			dQ.Mul(&WtW, &d)
			dQ.MulElem(&dQ, &d)
			d.MulElem(G, &d)

			sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0

			if j == 0 {
				reduce = !sufficient
				Hp = H
			}
			if reduce {
				if sufficient {
					H = &Hn
					ok = true
					break
				} else {
					alpha *= beta
				}
			} else {
				if !sufficient || mat64.Equal(Hp, &Hn) {
					H = Hp
					break
				} else {
					alpha /= beta
					Hp = &Hn
				}
			}
		}
	}

	return H, G, i, ok
}
Exemple #16
0
// ConditionNormal returns the Normal distribution that is the receiver conditioned
// on the input evidence. The returned multivariate normal has dimension
// n - len(observed), where n is the dimension of the original receiver. The updated
// mean and covariance are
//  mu = mu_un + sigma_{ob,un}^T * sigma_{ob,ob}^-1 (v - mu_ob)
//  sigma = sigma_{un,un} - sigma_{ob,un}^T * sigma_{ob,ob}^-1 * sigma_{ob,un}
// where mu_un and mu_ob are the original means of the unobserved and observed
// variables respectively, sigma_{un,un} is the unobserved subset of the covariance
// matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and
// sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with
// values v. The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Normal distribution.
//
// ConditionNormal returns {nil, false} if there is a failure during the update.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Rand) (*Normal, bool) {
	if len(observed) == 0 {
		panic("normal: no observed value")
	}
	if len(observed) != len(values) {
		panic("normal: input slice length mismatch")
	}
	for _, v := range observed {
		if v < 0 || v >= n.Dim() {
			panic("normal: observed value out of bounds")
		}
	}

	ob := len(observed)
	unob := n.Dim() - ob
	obMap := make(map[int]struct{})
	for _, v := range observed {
		if _, ok := obMap[v]; ok {
			panic("normal: observed dimension occurs twice")
		}
		obMap[v] = struct{}{}
	}
	if len(observed) == n.Dim() {
		panic("normal: all dimensions observed")
	}
	unobserved := make([]int, 0, unob)
	for i := 0; i < n.Dim(); i++ {
		if _, ok := obMap[i]; !ok {
			unobserved = append(unobserved, i)
		}
	}
	mu1 := make([]float64, unob)
	for i, v := range unobserved {
		mu1[i] = n.mu[v]
	}
	mu2 := make([]float64, ob) // really v - mu2
	for i, v := range observed {
		mu2[i] = values[i] - n.mu[v]
	}

	n.setSigma()

	var sigma11, sigma22 mat64.SymDense
	sigma11.SubsetSym(n.sigma, unobserved)
	sigma22.SubsetSym(n.sigma, observed)

	sigma21 := mat64.NewDense(ob, unob, nil)
	for i, r := range observed {
		for j, c := range unobserved {
			v := n.sigma.At(r, c)
			sigma21.Set(i, j, v)
		}
	}

	var chol mat64.Cholesky
	ok := chol.Factorize(&sigma22)
	if !ok {
		return nil, ok
	}

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
	v := mat64.NewVector(ob, mu2)
	var tmp, tmp2 mat64.Vector
	err := tmp.SolveCholeskyVec(&chol, v)
	if err != nil {
		return nil, false
	}
	tmp2.MulVec(sigma21.T(), &tmp)

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
	// TODO(btracey): Should this be a method of SymDense?
	var tmp3, tmp4 mat64.Dense
	err = tmp3.SolveCholesky(&chol, sigma21)
	if err != nil {
		return nil, false
	}
	tmp4.Mul(sigma21.T(), &tmp3)

	for i := range mu1 {
		mu1[i] += tmp2.At(i, 0)
	}

	// TODO(btracey): If tmp2 can constructed with a method, then this can be
	// replaced with SubSym.
	for i := 0; i < len(unobserved); i++ {
		for j := i; j < len(unobserved); j++ {
			v := sigma11.At(i, j)
			sigma11.SetSym(i, j, v-tmp4.At(i, j))
		}
	}
	return NewNormal(mu1, &sigma11, src)
}
Exemple #17
0
func MultiHypothesis(x *mat64.Dense, theta *mat64.Vector) *mat64.Vector {
	var res mat64.Dense
	res.Mul(theta.T(), x)
	return res.RowView(0)
}
Exemple #18
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	fmt.Println("In linear solve")

	if weights != nil {
		panic("Need functionality to be better. Either banded special case in matrix or do the mulitplication by hand")
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}
		diagWeight := diagonal.NewDiagonal(len(scaledWeight), scaledWeight)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Copy(trueOutputs)
		weightedFeatures.Copy(features)

		// TODO: Replace this with better than mat multiply
		weightedOutput.Mul(diagWeight, weightedOutput)
		weightedFeatures.Mul(diagWeight, weightedFeatures)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}

	if weights == nil {
		parameterMat, err := mat64.Solve(features, trueOutputs)
		if err != nil {
			panic(err)
		}
		return parameterMat.RawMatrix().Data

	}
	parameterMat, err := mat64.Solve(weightedFeatures, weightedOutput)
	if err != nil {
		panic(err)
	}

	return parameterMat.RawMatrix().Data
}
Exemple #19
0
// CovarianceMatrix calculates a covariance matrix (also known as a
// variance-covariance matrix) from a matrix of data, using a two-pass
// algorithm. The matrix returned will be symmetric and square.
//
// The weights wts should have the length equal to the number of rows in
// input data matrix x. If c is nil, then a new matrix with appropriate size will
// be constructed.  If c is not nil, it should be a square matrix with the same
// number of columns as the input data matrix x, and it will be used as the receiver
// for the covariance data.  Weights cannot be negative.
func CovarianceMatrix(cov *mat64.Dense, x mat64.Matrix, wts []float64) *mat64.Dense {
	// This is the matrix version of the two-pass algorithm. It doesn't use the
	// additional floating point error correction that the Covariance function uses
	// to reduce the impact of rounding during centering.

	// TODO(jonlawlor): indicate that the resulting matrix is symmetric, and change
	// the returned type from a *mat.Dense to a *mat.Symmetric.

	r, c := x.Dims()

	if cov == nil {
		cov = mat64.NewDense(c, c, nil)
	} else if covr, covc := cov.Dims(); covr != covc || covc != c {
		panic(mat64.ErrShape)
	}

	var xt mat64.Dense
	xt.Clone(x.T())
	// Subtract the mean of each of the columns.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		// This will panic with ErrShape if len(wts) != len(v), so
		// we don't have to check the size later.
		mean := Mean(v, wts)
		floats.AddConst(-mean, v)
	}

	var n float64
	if wts == nil {

		n = float64(r)

		cov.Mul(&xt, (&xt).T())

		// Scale by the sample size.
		cov.Scale(1/(n-1), cov)
		return cov
	}

	// Multiply by the sqrt of the weights, so that multiplication is symmetric.
	sqrtwts := make([]float64, r)
	for i, w := range wts {
		if w < 0 {
			panic("stat: negative covariance matrix weights")
		}
		sqrtwts[i] = math.Sqrt(w)
	}
	// Weight the rows.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		floats.Mul(v, sqrtwts)
	}

	// Calculate the normalization factor.
	n = floats.Sum(wts)
	cov.Mul(&xt, (&xt).T())

	// Scale by the sample size.
	cov.Scale(1/(n-1), cov)
	return cov
}