Exemple #1
1
func TestUpdate(t *testing.T) {
	neuralNetwork := CreateSimpleNetwork(t)
	inputs := mat64.NewDense(1, 2, []float64{0.05, 0.10})
	neuralNetwork.Forward(inputs)
	values := mat64.NewDense(1, 2, []float64{0.01, 0.99})
	neuralNetwork.Backward(values)
	learningConfiguration := neural.LearningConfiguration{
		Epochs:    proto.Int32(1),
		Rate:      proto.Float64(0.5),
		Decay:     proto.Float64(0),
		BatchSize: proto.Int32(1),
	}
	neuralNetwork.Update(learningConfiguration)
	expected_weights_0 := mat64.NewDense(
		3, 2, []float64{0.149780716, 0.24975114, 0.19956143, 0.29950229, 0.35,
			0.35})
	if !mat64.EqualApprox(
		neuralNetwork.Layers[0].Weight, expected_weights_0, 0.0001) {
		t.Errorf("weights 0 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[0].Weight))
	}
	expected_weights_1 := mat64.NewDense(
		3, 2, []float64{0.35891648, 0.51130127, 0.408666186, 0.561370121, 0.6,
			0.6})
	if !mat64.EqualApprox(
		neuralNetwork.Layers[1].Weight, expected_weights_1, 0.0001) {
		t.Errorf("weights 1 unexpected:\n%v",
			mat64.Formatted(neuralNetwork.Layers[1].Weight))
	}
}
Exemple #2
0
func Train(neuralNetwork *Network, datapoints []Datapoint,
	learningConfiguration LearningConfiguration) {
	// Train on some number of iterations of permuted versions of the input.
	batchSize := int(*learningConfiguration.BatchSize)
	// Batch size 0 means do full batch learning.
	if batchSize == 0 {
		batchSize = len(datapoints)
	}
	error_function := NewErrorFunction(*learningConfiguration.ErrorName)
	features := mat64.NewDense(batchSize, len(datapoints[0].Features), nil)
	values := mat64.NewDense(batchSize, len(datapoints[0].Values), nil)
	for i := 0; i < int(*learningConfiguration.Epochs); i++ {
		perm := rand.Perm(len(datapoints))
		// TODO(ariw): This misses the last len(perm) % batchSize examples. Is this
		// okay?
		for j := 0; j <= len(perm)-batchSize; j += batchSize {
			for k := 0; k < batchSize; k++ {
				features.SetRow(k, datapoints[perm[j+k]].Features)
				values.SetRow(k, datapoints[perm[j+k]].Values)
			}
			neuralNetwork.Forward(features)
			neuralNetwork.Backward(values, error_function)
			neuralNetwork.Update(learningConfiguration)
		}
	}
}
Exemple #3
0
// AfterConstr builds and returns matrices representing equality
// constraints with a parameter multiplier matrix A and upper and lower
// bounds. The constraint expresses that each facility can only be built after
// a certain date.
func (s *Scenario) AfterConstr() (A, target *mat64.Dense) {
	nperiods := s.nPeriods()

	// count facilities that have build time constraints
	n := 0
	for _, fac := range s.Facs {
		if fac.BuildAfter != 0 {
			n++
		}
	}

	A = mat64.NewDense(n*nperiods, s.Nvars(), nil)
	target = mat64.NewDense(n*nperiods, 1, nil)

	r := 0
	for f, fac := range s.Facs {
		if fac.BuildAfter == 0 {
			continue
		}
		for t := s.BuildPeriod; t < s.SimDur; t += s.BuildPeriod {
			if !fac.Available(t) {
				c := f*nperiods + t/s.BuildPeriod - 1
				A.Set(r, c, 1)
			}
			r++
		}
	}

	return A, target
}
Exemple #4
0
// SupportConstr builds and returns matrices representing linear inequality
// constraints with a parameter multiplier matrix A and upper and lower
// bounds. The constraint expresses that the total number of support
// facilities (i.e. not reactors) at every timestep must never be more
// than twice the number of deployed reactors.
func (s *Scenario) SupportConstr() (low, A, up *mat64.Dense) {
	nperiods := s.nPeriods()

	A = mat64.NewDense(nperiods, s.Nvars(), nil)
	low = mat64.NewDense(nperiods, 1, nil)
	tmp := make([]float64, len(s.MaxPower))
	copy(tmp, s.MaxPower)
	up = mat64.NewDense(nperiods, 1, tmp)
	up.Apply(func(r, c int, v float64) float64 { return 1e200 }, up)

	for t := s.BuildPeriod; t < s.SimDur; t += s.BuildPeriod {
		for f, fac := range s.Facs {
			for n := 0; n < nperiods; n++ {
				if !fac.Alive(n*s.BuildPeriod+1, t) {
					continue
				}

				i := f*nperiods + n
				if fac.Cap == 0 {
					A.Set(t/s.BuildPeriod-1, i, -1)
				} else {
					A.Set(t/s.BuildPeriod-1, i, 2)
				}
			}
		}
	}

	return low, A, up
}
/*
 * Test the Network for a basic XOR gate.
 */
func TestSGD(t *testing.T) {
	var a = []int{2, 3, 1}
	var eta float64 = 3

	net := Network{}
	net.Init(a)
	net.TestFunc = func(output, desiredOutput *mat64.Dense) bool {
		if math.Abs(output.At(0, 0)-desiredOutput.At(0, 0)) < 0.1 {
			return true
		}
		return false
	}

	data := make([][]mat64.Dense, 10000)
	for i := 0; i < len(data); i++ {
		data[i] = make([]mat64.Dense, 2)
		rand.Seed(time.Now().UTC().UnixNano())
		x := rand.Intn(2)
		y := rand.Intn(2)
		data[i][0] = *mat64.NewDense(1, 2, []float64{float64(x), float64(y)})
		data[i][1] = *mat64.NewDense(1, 1, []float64{float64(x ^ y)})
	}

	test := make([][]mat64.Dense, 4)
	for i := 0; i < 4; i++ {
		test[i] = make([]mat64.Dense, 2)
		test[i][0] = *mat64.NewDense(1, 2, []float64{float64(i / 2), float64(i % 2)})
		test[i][1] = *mat64.NewDense(1, 1, []float64{float64((i / 2) ^ (i % 2))})
	}

	net.SGD(data, eta, 3, test)
}
Exemple #6
0
func TestEuclidean(t *testing.T) {
	var vectorX, vectorY *mat64.Dense
	euclidean := NewEuclidean()

	Convey("Given two vectors", t, func() {
		vectorX = mat64.NewDense(3, 1, []float64{1, 2, 3})
		vectorY = mat64.NewDense(3, 1, []float64{2, 4, 5})

		Convey("When doing inner product", func() {
			result := euclidean.InnerProduct(vectorX, vectorY)

			Convey("The result should be 25", func() {
				So(result, ShouldEqual, 25)
			})
		})

		Convey("When calculating distance", func() {
			result := euclidean.Distance(vectorX, vectorY)

			Convey("The result should be 3", func() {
				So(result, ShouldEqual, 3)
			})

		})

	})
}
func main() {
	// task 1: show qr decomp of wp example
	a := mat64.NewDense(3, 3, []float64{
		12, -51, 4,
		6, 167, -68,
		-4, 24, -41,
	})
	var qr mat64.QR
	qr.Factorize(a)
	var q, r mat64.Dense
	q.QFromQR(&qr)
	r.RFromQR(&qr)
	fmt.Printf("q: %.3f\n\n", mat64.Formatted(&q, mat64.Prefix("   ")))
	fmt.Printf("r: %.3f\n\n", mat64.Formatted(&r, mat64.Prefix("   ")))

	// task 2: use qr decomp for polynomial regression example
	x := []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
	y := []float64{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}
	a = Vandermonde(x, 2)
	b := mat64.NewDense(11, 1, y)
	qr.Factorize(a)
	var f mat64.Dense
	f.SolveQR(&qr, false, b)
	fmt.Printf("polyfit: %.3f\n",
		mat64.Formatted(&f, mat64.Prefix("         ")))
}
Exemple #8
0
func TestChebyshev(t *testing.T) {
	var vectorX, vectorY *mat64.Dense
	chebyshev := NewChebyshev()

	Convey("Given two vectors", t, func() {
		vectorX = mat64.NewDense(4, 1, []float64{1, 2, 3, 4})
		vectorY = mat64.NewDense(4, 1, []float64{-5, -6, 7, 8})

		Convey("When calculating distance with two vectors", func() {
			result := chebyshev.Distance(vectorX, vectorY)

			Convey("The result should be 8", func() {
				So(result, ShouldEqual, 8)
			})
		})

		Convey("When calculating distance with row vectors", func() {
			vectorX.Copy(vectorX.T())
			vectorY.Copy(vectorY.T())
			result := chebyshev.Distance(vectorX, vectorY)

			Convey("The result should be 8", func() {
				So(result, ShouldEqual, 8)
			})
		})

		Convey("When calculating distance with different dimension matrices", func() {
			vectorX.Clone(vectorX.T())
			So(func() { chebyshev.Distance(vectorX, vectorY) }, ShouldPanic)
		})

	})
}
Exemple #9
0
func (fm *FeatureMatrix) Mat64(header, transpose bool) *mat64.Dense {
	var (
		idx   int
		iter  fmIt
		dense *mat64.Dense
	)

	ncol := len(fm.Data)
	nrow := len(fm.CaseLabels)

	if !transpose {
		iter = rowIter(fm, header)
		dense = mat64.NewDense(nrow, ncol, nil)
	} else {
		iter = colIter(fm, header)
		dense = mat64.NewDense(ncol, nrow+1, nil)
	}

	for row, ok := iter(); ok; idx++ {
		for j, val := range row {
			flt, _ := strconv.ParseFloat(val, 64)
			dense.Set(idx, j, flt)
		}
		row, ok = iter()
	}

	return dense
}
Exemple #10
0
func NewTaskGraphStructure() *TaskGraphStructure {
	return &TaskGraphStructure{
		make(map[int]*Task, 0),
		mat64.NewDense(0, 0, nil),
		mat64.NewDense(0, 0, nil),
	}
}
Exemple #11
0
// ReadLibsvm reads libsvm format data from `filepath`. `oneBased` denotes the
// index of data stored in the file starts from 1 (`oneBased=true`) or 0
// (`oneBased=false`). Returned X, y is of dimension (nSamples, nFeatures) and
// (nSamples, 1) respectively.
func ReadLibsvm(filepath string, oneBased bool) (X, y *mat64.Dense) {
	type Data []string

	file, err := os.Open(filepath)
	if err != nil {
		fmt.Println("Got error when trying to open libsvm file")
		panic(err)
	}
	defer file.Close()

	nFeatures := 0
	nSamples := 0
	dataList := []Data{}

	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		row := strings.Split(scanner.Text(), " ")
		dataList = append(dataList, row)

		if idx, _ := parseLibsvmElem(row[len(row)-1]); idx+1 > nFeatures {
			nFeatures = idx + 1
		}

		if oneBased {
			nFeatures = nFeatures - 1
		}

		nSamples++
	}

	X = mat64.NewDense(nSamples, nFeatures, nil)
	y = mat64.NewDense(nSamples, 1, nil)

	for i, data := range dataList {
		label, err := strconv.Atoi(data[0])
		if err != nil {
			fmt.Println("Got error when trying to set label for %v-th sample", i)
			panic(err)
		}
		y.Set(i, 0, float64(label))

		for k := 1; k < len(data); k++ {
			idx, val := parseLibsvmElem(data[k])

			if oneBased {
				X.Set(i, idx-1, float64(val))
			} else {
				X.Set(i, idx, float64(val))
			}

		}
	}

	if err := scanner.Err(); err != nil {
		fmt.Println("Got error when trying to read libsvm file")
		panic(err)
	}

	return
}
Exemple #12
0
func TestPolyKernel(t *testing.T) {
	var vectorX, vectorY *mat64.Dense
	polyKernel := NewPolyKernel(3)

	Convey("Given two vectors", t, func() {
		vectorX = mat64.NewDense(3, 1, []float64{1, 2, 3})
		vectorY = mat64.NewDense(3, 1, []float64{2, 4, 5})

		Convey("When doing inner product", func() {
			result := polyKernel.InnerProduct(vectorX, vectorY)

			Convey("The result should be 17576", func() {
				So(result, ShouldEqual, 17576)
			})
		})

		Convey("When calculating distance", func() {
			result := polyKernel.Distance(vectorX, vectorY)

			Convey("The result should alomost equal 31.622776601683793", func() {
				So(result, ShouldAlmostEqual, 31.622776601683793)
			})

		})

	})
}
Exemple #13
0
// InstancesTrainTestSplit takes a given Instances (src) and a train-test fraction
// (prop) and returns an array of two new Instances, one containing approximately
// that fraction and the other containing what's left.
//
// IMPORTANT: this function is only meaningful when prop is between 0.0 and 1.0.
// Using any other values may result in odd behaviour.
func InstancesTrainTestSplit(src *Instances, prop float64) (*Instances, *Instances) {
	trainingRows := make([]int, 0)
	testingRows := make([]int, 0)
	numAttrs := len(src.attributes)
	src.Shuffle()
	for i := 0; i < src.Rows; i++ {
		trainOrTest := rand.Intn(101)
		if trainOrTest > int(100*prop) {
			trainingRows = append(trainingRows, i)
		} else {
			testingRows = append(testingRows, i)
		}
	}

	rawTrainMatrix := mat64.NewDense(len(trainingRows), numAttrs, make([]float64, len(trainingRows)*numAttrs))
	rawTestMatrix := mat64.NewDense(len(testingRows), numAttrs, make([]float64, len(testingRows)*numAttrs))

	for i, row := range trainingRows {
		rowDat := src.storage.RowView(row)
		rawTrainMatrix.SetRow(i, rowDat)
	}
	for i, row := range testingRows {
		rowDat := src.storage.RowView(row)
		rawTestMatrix.SetRow(i, rowDat)
	}

	trainingRet := NewInstancesFromDense(src.attributes, len(trainingRows), rawTrainMatrix)
	testRet := NewInstancesFromDense(src.attributes, len(testingRows), rawTestMatrix)
	return trainingRet, testRet
}
Exemple #14
0
// Project a point on the torus onto the screen.
func (ts TorusScreen) Project(v *mat64.Vector) (uint, uint) {
	xUnit, yUnit := ts.pixelSize()

	reflectComps := []float64{
		1, 0,
		0, -1,
	}
	reflect := mat64.NewDense(2, 2, reflectComps)

	trans := Vec2(float64(ts.t.W)/2.0, float64(ts.t.H)/2.0)

	// Scaling matrix
	scaleComps := []float64{
		xUnit, 0,
		0, yUnit,
	}
	scale := mat64.NewDense(2, 2, scaleComps)

	pr := Vec2(0, 0)
	pr.MulVec(reflect, v)
	pr.AddVec(pr, trans)
	pr.MulVec(scale, pr)

	rx := uint(math.Floor(pr.At(0, 0)))
	ry := uint(math.Floor(pr.At(1, 0)))

	return rx, ry
}
Exemple #15
0
// LinearLeastSquares computes the least squares fit for the function
//
//   f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
//   РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
//   РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
	// The optimal parameters are found by solving for ╬њ╠ѓ in the
	// "normal equations":
	//
	//    (­ЮљЌрхђ­Юљќ­ЮљЌ)╬њ╠ѓ = ­ЮљЌрхђ­Юљќ­Юљ▓
	//
	// where ­Юљќ is a diagonal weight matrix (or the identity matrix
	// for the unweighted case).

	// TODO: Consider using orthogonal decomposition.

	if len(xs) != len(ys) {
		panic("len(xs) != len(ys)")
	}
	if weights != nil && len(xs) != len(weights) {
		panic("len(xs) != len(weights")
	}

	// Construct ­ЮљЌрхђ. This is the more convenient representation
	// for efficiently calling the term functions.
	xTVals := make([]float64, len(terms)*len(xs))
	for i, term := range terms {
		term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
	}
	XT := mat64.NewDense(len(terms), len(xs), xTVals)
	X := XT.T()

	// Construct ­ЮљЌрхђ­Юљќ.
	var XTW *mat64.Dense
	if weights == nil {
		// ­Юљќ is the identity matrix.
		XTW = XT
	} else {
		// Since ­Юљќ is a diagonal matrix, we do this directly.
		XTW = mat64.DenseCopyOf(XT)
		WDiag := mat64.NewVector(len(weights), weights)
		for row := 0; row < len(terms); row++ {
			rowView := XTW.RowView(row)
			rowView.MulElemVec(rowView, WDiag)
		}
	}

	// Construct ­Юљ▓.
	y := mat64.NewVector(len(ys), ys)

	// Compute ╬њ╠ѓ.
	lhs := mat64.NewDense(len(terms), len(terms), nil)
	lhs.Mul(XTW, X)

	rhs := mat64.NewVector(len(terms), nil)
	rhs.MulVec(XTW, y)

	BVals := make([]float64, len(terms))
	B := mat64.NewVector(len(terms), BVals)
	B.SolveVec(lhs, rhs)
	return BVals
}
Exemple #16
0
func (b batchPredictor) NewPredictor() predHelp.Predictor {
	return predictor{
		featureWeights:  b.featureWeights,
		inputMat:        mat64.NewDense(1, b.nFeatures, nil),
		outputMat:       mat64.NewDense(1, b.outputDim, nil),
		featurizedInput: make([]float64, b.nFeatures),
		order:           b.order,
	}
}
// f(x) = 2x + 2y
// Parameters should be really, really close to 2.
func TestSGD(t *testing.T) {
	x := mat64.NewDense(2, 2, []float64{1, 3, 5, 8})
	y := mat64.NewDense(2, 1, []float64{8, 26})
	theta := mat64.NewDense(2, 1, []float64{0, 0})
	results := StochasticGradientDescent(x, y, theta, 0.005, 10000, 30)
	if results.At(0, 0) <= 1.99 || results.At(0, 0) >= 2.01 {
		t.Error("Innaccurate convergence of batch gradient descent")
	}
}
Exemple #18
0
func TestLayeredXORInline(t *testing.T) {

	Convey("Given an inline XOR dataset...", t, func() {

		data := mat64.NewDense(4, 3, []float64{
			1, 0, 1,
			0, 1, 1,
			0, 0, 0,
			1, 1, 0,
		})

		XORData := base.InstancesFromMat64(4, 3, data)
		classAttr := base.GetAttributeByName(XORData, "2")
		XORData.AddClassAttribute(classAttr)

		net := NewMultiLayerNet([]int{3})
		net.MaxIterations = 20000
		net.Fit(XORData)

		Convey("After running for 20000 iterations, should have some predictive power...", func() {

			Convey("The right nodes should be connected in the network...", func() {
				So(net.network.GetWeight(1, 1), ShouldAlmostEqual, 1.000)
				So(net.network.GetWeight(2, 2), ShouldAlmostEqual, 1.000)

				for i := 1; i <= 6; i++ {
					So(net.network.GetWeight(6, i), ShouldAlmostEqual, 0.000)
				}

			})
			out := mat64.NewDense(6, 1, []float64{1.0, 0.0, 0.0, 0.0, 0.0, 0.0})
			net.network.Activate(out, 2)
			So(out.At(5, 0), ShouldAlmostEqual, 1.0, 0.1)

			Convey("And Predict() should do OK too...", func() {

				pred := net.Predict(XORData)

				for _, a := range pred.AllAttributes() {
					af, ok := a.(*base.FloatAttribute)
					So(ok, ShouldBeTrue)

					af.Precision = 1
				}

				So(base.GetClass(pred, 0), ShouldEqual, "1.0")
				So(base.GetClass(pred, 1), ShouldEqual, "1.0")
				So(base.GetClass(pred, 2), ShouldEqual, "0.0")
				So(base.GetClass(pred, 3), ShouldEqual, "0.0")

			})
		})

	})

}
// Sample generates rows(batch) samples using the Metropolis Hastings sample
// generation method. The initial location is NOT updated during the call to Sample.
//
// The number of columns in batch must equal len(m.Initial), otherwise Sample
// will panic.
func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
	burnIn := m.BurnIn
	rate := m.Rate
	if rate == 0 {
		rate = 1
	}
	r, c := batch.Dims()
	if len(m.Initial) != c {
		panic("metropolishastings: length mismatch")
	}

	// Use the optimal size for the temporary memory to allow the fewest calls
	// to MetropolisHastings. The case where tmp shadows samples must be
	// aligned with the logic after burn-in so that tmp does not shadow samples
	// during the rate portion.
	tmp := batch
	if rate > r {
		tmp = mat64.NewDense(rate, c, nil)
	}
	rTmp, _ := tmp.Dims()

	// Perform burn-in.
	remaining := burnIn
	initial := make([]float64, c)
	copy(initial, m.Initial)
	for remaining != 0 {
		newSamp := min(rTmp, remaining)
		MetropolisHastings(tmp.View(0, 0, newSamp, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)
		copy(initial, tmp.RawRowView(newSamp-1))
		remaining -= newSamp
	}

	if rate == 1 {
		MetropolisHastings(batch, initial, m.Target, m.Proposal, m.Src)
		return
	}

	if rTmp <= r {
		tmp = mat64.NewDense(rate, c, nil)
	}

	// Take a single sample from the chain.
	MetropolisHastings(batch.View(0, 0, 1, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)

	copy(initial, batch.RawRowView(0))
	// For all of the other samples, first generate Rate samples and then actually
	// accept the last one.
	for i := 1; i < r; i++ {
		MetropolisHastings(tmp, initial, m.Target, m.Proposal, m.Src)
		v := tmp.RawRowView(rate - 1)
		batch.SetRow(i, v)
		copy(initial, v)
	}
}
Exemple #20
0
Fichier : goem.go Projet : 6br/goem
func (em EM) norm(x []float64, j int) float64 {
	xMat := mat64.NewDense(1, len(x), x)
	muMat := mat64.NewDense(1, len(em.mu[j]), em.mu[j])
	first := mat64.NewDense(1, len(em.mu[j]), nil)
	first.Sub(xMat, muMat)
	second := mat64.DenseCopyOf(first.T())
	resultMat := mat64.NewDense(1, 1, nil)
	resultMat.Mul(first, second)
	var jisuu = 0.5 * float64(em.d)
	return math.Exp(resultMat.At(0, 0)/(-2.0)/(em.sigma[j]*em.sigma[j])) / math.Pow(2*math.Pi*em.sigma[j]*em.sigma[j], jisuu)
}
Exemple #21
0
func generateRandomSamples(n, nDim int) (x, y *mat64.Dense) {
	x = mat64.NewDense(n, nDim, nil)
	y = mat64.NewDense(n, nDim, nil)

	for i := 0; i < n; i++ {
		for j := 0; j < nDim; j++ {
			x.Set(i, j, rand.NormFloat64())
			y.Set(i, j, testfunc(x.At(i, j)))
		}
	}
	return
}
Exemple #22
0
func main() {

	runtime.GOMAXPROCS(runtime.NumCPU() - 2)

	gopath := os.Getenv("GOPATH")
	path := filepath.Join(gopath, "prof", "github.com", "reggo", "reggo", "nnet")

	nInputs := 10
	nOutputs := 3
	nLayers := 2
	nNeurons := 50
	nSamples := 1000000
	nRuns := 50

	config := &profile.Config{
		CPUProfile:  true,
		ProfilePath: path,
	}

	defer profile.Start(config).Stop()

	net, err := nnet.NewSimpleTrainer(nInputs, nOutputs, nLayers, nNeurons, nnet.Linear{})
	if err != nil {
		log.Fatal(err)
	}

	// Generate some random data
	inputs := mat64.NewDense(nSamples, nInputs, nil)
	outputs := mat64.NewDense(nSamples, nOutputs, nil)
	for i := 0; i < nSamples; i++ {
		for j := 0; j < nInputs; j++ {
			inputs.Set(i, j, rand.Float64())
		}
		for j := 0; j < nOutputs; j++ {
			outputs.Set(i, j, rand.Float64())
		}
	}

	// Create trainer
	prob := train.NewBatchGradBased(net, true, inputs, outputs, nil, nil, nil)
	nParameters := net.NumParameters()

	parameters := make([]float64, nParameters)
	derivative := make([]float64, nParameters)

	for i := 0; i < nRuns; i++ {
		net.RandomizeParameters()
		net.Parameters(parameters)
		prob.ObjGrad(parameters, derivative)
		fmt.Println(floats.Sum(derivative))
	}
}
func init() {
	flatValues = make([]float64, 80)
	flatLabels = make([]float64, 20)

	for i := 0; i < 80; i++ {
		flatValues[i] = float64(i + 1)
		// Replaces labels four times per run but who cares.
		flatLabels[int(i/4)] = float64(rand.Intn(2))
	}

	values = mat.NewDense(20, 4, flatValues)
	labels = mat.NewDense(20, 1, flatLabels)
}
Exemple #24
0
// Format the examples.
func Format(examples [][][]float64) (*mat64.Dense, *mat64.Dense) {
	var input, output []float64
	rows := len(examples)
	inCols := len(examples[0][0])
	outCols := len(examples[0][1])

	for _, example := range examples {
		output = append(output, example[1]...)
		input = append(input, example[0]...)
	}

	return mat64.NewDense(rows, inCols, input), mat64.NewDense(rows, outCols, output)
}
Exemple #25
0
// Stochastic gradient descent updates the parameters of theta on a random row selection from a matrix.
// It is faster as it does not compute the cost function over the entire dataset every time.
// It instead calculates the error parameters over only one row of the dataset at a time.
// In return, there is a trade off for accuracy. This is minimised by running multiple SGD processes
// (the number of goroutines spawned is specified by the procs variable) in parallel and taking an average of the result.
func StochasticGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch, procs int) *mat64.Dense {
	m, _ := y.Dims()
	resultPipe := make(chan *mat64.Dense)
	results := make([]*mat64.Dense, 0)

	for p := 0; p < procs; p++ {
		go func() {
			// Is this just a pointer to theta?
			thetaCopy := mat64.DenseCopyOf(theta)
			for i := 0; i < epoch; i++ {
				for k := 0; k < m; k++ {
					datXtemp := x.RowView(k)
					datYtemp := y.RowView(k)
					datX := mat64.NewDense(1, len(datXtemp), datXtemp)
					datY := mat64.NewDense(1, 1, datYtemp)
					datXFlat := mat64.DenseCopyOf(datX)
					datXFlat.TCopy(datXFlat)
					datX.Mul(datX, thetaCopy)
					datX.Sub(datX, datY)
					datXFlat.Mul(datXFlat, datX)

					// Horrible hack to get around the fact there is no elementwise division in mat64
					xFlatRow, _ := datXFlat.Dims()
					gradient := make([]float64, 0)
					for i := 0; i < xFlatRow; i++ {
						row := datXFlat.RowView(i)
						for i := range row {
							divd := row[i] / float64(m) * alpha
							gradient = append(gradient, divd)
						}
					}
					grows := len(gradient)
					grad := mat64.NewDense(grows, 1, gradient)
					thetaCopy.Sub(thetaCopy, grad)
				}

			}
			resultPipe <- thetaCopy
		}()
	}

	for {
		select {
		case d := <-resultPipe:
			results = append(results, d)
			if len(results) == procs {
				return averageTheta(results)
			}
		}
	}
}
func TestLinearRegression(t *testing.T) {
	t.Skip("Skipping for now")
	for _, test := range []struct {
		x          *mat64.Dense
		y          *mat64.Vector
		result     *mat64.Vector
		test       *mat64.Vector
		testResult float64
	}{
		{
			mat64.NewDense(3, 4, []float64{
				1, 3, 5, 6,
				1, 1, 2, 3,
				1, 9, 4, 2}),
			mat64.NewVector(3, []float64{1, 6, 4}),
			mat64.NewVector(4, []float64{8.0918, 0.8920, -3.7990, 1.5379}),
			mat64.NewVector(4, []float64{1, 1, 2, 3}),
			6.0,
		}, {
			mat64.NewDense(10, 4, []float64{
				1, 2, 3, 4,
				1, 3, 4, 5,
				1, 4, 5, 6,
				1, 5, 6, 7,
				1, 6, 7, 8,
				1, 7, 8, 9,
				1, 8, 9, 10,
				1, 9, 10, 11,
				1, 10, 11, 12,
				1, 11, 12, 13}),
			mat64.NewVector(10, []float64{20, 26, 32, 38, 44, 50, 56, 62, 68, 74}),
			mat64.NewVector(4, []float64{0, 1, 2, 3}),
			mat64.NewVector(4, []float64{1, 10, 11, 12}),
			68.0,
		},
	} {

		lr := NewLinearRegression(test.x, test.y)
		lr.Fit()
		if !mat64.EqualApprox(test.result, lr.Theta, 0.0001) {
			t.Errorf("LinearRegressions's return theta is expected to be equal to %v, found %v", test.result, lr.Theta)
		}
		predicted := lr.Predict(test.test)

		if math.Abs(test.testResult-predicted) > 0.0001 {
			t.Errorf("LinearRegression predict values are expected to be equal to %f, found %f", test.testResult, predicted)
		}

	}

}
Exemple #27
0
// Nearest returns the nearest grid point to p by rounding each dimensional
// position to the nearest grid point.  If the mesh basis is not the identity
// matrix, then p is transformed to the mesh basis before rounding and then
// retransformed back.
func (m *InfMesh) Nearest(p []float64) []float64 {
	if m.StepSize == 0 {
		return append([]float64{}, p...)
	} else if l := len(m.Center); l != 0 && l != len(p) {
		panic(fmt.Sprintf("origin len %v incompatible with point len %v", l, len(p)))
	}

	// set up origin and inverter matrix if necessary
	if len(m.Center) == 0 {
		m.Center = make([]float64, len(p))
	}
	if m.Basis != nil && m.inverter == nil {
		var err error
		m.inverter, err = mat64.Inverse(m.Basis)
		if err != nil {
			panic("basis inversion failed: " + err.Error())
		}
	}

	// translate p based on origin and transform to new vector space
	newp := make([]float64, len(p))
	for i := range newp {
		newp[i] = p[i] - m.Center[i]
	}
	v := mat64.NewDense(len(m.Center), 1, newp)
	rotv := v
	if m.inverter != nil {
		rotv.Mul(m.inverter, v)
	}

	// calculate nearest point
	nearest := mat64.NewDense(len(p), 1, nil)
	for i := range m.Center {
		n, rem := math.Modf(rotv.At(i, 0) / m.StepSize)
		if rem/m.StepSize > 0.5 {
			n++
		}
		nearest.Set(i, 0, float64(n)*m.StepSize)
	}

	// transform back to standard space
	if m.Basis != nil {
		nearest.Mul(m.Basis, nearest)
	}
	nv := nearest.Col(nil, 0)
	for i := range nv {
		nv[i] += m.Center[i]
	}
	return nv
}
Exemple #28
0
func mulMulti(a *mat64.Dense, b []float64, rows int) (r []float64) {
	var m, m2 mat64.Dense

	b1 := mat64.NewDense(1, 1, []float64{b[0]})
	b2 := mat64.NewDense(1, 1, []float64{b[1]})

	m.Mul(a.ColView(0), b1)
	m2.Mul(a.ColView(1), b2)

	for i := 0; i < rows; i++ {
		r = append(r, m.ColView(0).At(i, 0)+m2.ColView(0).At(i, 0))
	}
	return r
}
func main() {
	showLU(mat64.NewDense(3, 3, []float64{
		1, 3, 5,
		2, 4, 7,
		1, 1, 0,
	}))
	fmt.Println()
	showLU(mat64.NewDense(4, 4, []float64{
		11, 9, 24, 2,
		1, 5, 2, 6,
		3, 17, 18, 1,
		2, 5, 7, 1,
	}))
}
Exemple #30
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	if weights != nil {
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}

		diagWeight := diagonal.NewDiagonal(nFeatures, weights)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Mul(diagWeight, trueOutputs)
		weightedFeatures.Mul(diagWeight, features)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}
	if weights == nil {
		parameterMat := mat64.Solve(features, trueOutputs)
		return parameterMat.RawMatrix().Data

	}
	parameterMat := mat64.Solve(weightedFeatures, weightedOutput)

	return parameterMat.RawMatrix().Data
}