コード例 #1
0
ファイル: scale.go プロジェクト: reggo/reggo
func (n *InnerNormal) SetScale(data *mat64.Dense) error {
	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}
	means := make([]float64, dim)
	stds := make([]float64, dim)
	for i := 0; i < dim; i++ {
		// Filter out the extremes
		r := data.Col(nil, i)
		if len(r) != rows {
			panic("bad lengths")
		}
		sort.Float64s(r)

		lowerIdx := int(math.Floor(float64(rows) * n.LowerQuantile))
		upperIdx := int(math.Ceil(float64(rows) * n.UpperQuantile))

		trimmed := r[lowerIdx:upperIdx]

		mean, std := stat.MeanStdDev(trimmed, nil)
		//std := stat.StdDev(trimmed, mean, nil)
		means[i] = mean
		stds[i] = std
	}
	n.Mu = means
	n.Sigma = stds
	fmt.Println(n.Mu, n.Sigma)
	n.Dim = dim
	n.Scaled = true
	return nil
}
コード例 #2
0
func (nb *NaiveBayes) Predict(X *mat64.Dense) []Prediction {
	nSamples, _ := X.Dims()

	prediction := []Prediction{}

	for i := 0; i < nSamples; i++ {
		scores := map[int]float64{}
		for langIdx, _ := range nb.params.LangsCount {
			scores[langIdx] = nb.tokensProba(X.Row(nil, i), langIdx) + nb.langProba(langIdx)
		}

		bestScore := scores[0]
		bestLangIdx := 0

		for langIdx, score := range scores {
			if score > bestScore {
				bestScore = score
				bestLangIdx = langIdx
			}
		}

		prediction = append(prediction, Prediction{
			Label:    bestLangIdx,
			Language: "TODO: PENDING",
			Score:    bestScore,
		})
	}

	return prediction
}
コード例 #3
0
ファイル: gocv_calib3d.go プロジェクト: shautvast/go-opencv
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
	aspectRatio float64) (camMat *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...))
	}

	_imgSize := NewGcvSize2i(dims[0], dims[1])

	camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
		objPtsVec, imgPtsVec, _imgSize, aspectRatio))
	return camMat
}
コード例 #4
0
ファイル: optim.go プロジェクト: rwcarlsen/cloudlus
func StackConstr(low, A, up *mat64.Dense) (stackA, b *mat64.Dense, ranges []float64) {
	neglow := &mat64.Dense{}
	neglow.Scale(-1, low)
	b = &mat64.Dense{}
	b.Stack(up, neglow)

	negA := &mat64.Dense{}
	negA.Scale(-1, A)
	stackA = &mat64.Dense{}
	stackA.Stack(A, negA)

	// capture the range of each constraint from A because this information is
	// lost when converting from "low <= Ax <= up" via stacking to "Ax <= up".
	m, _ := A.Dims()
	ranges = make([]float64, m, 2*m)
	for i := 0; i < m; i++ {
		ranges[i] = up.At(i, 0) - low.At(i, 0)
		if ranges[i] == 0 {
			if up.At(i, 0) == 0 {
				ranges[i] = 1
			} else {
				ranges[i] = up.At(i, 0)
			}
		}
	}
	ranges = append(ranges, ranges...)

	return stackA, b, ranges
}
コード例 #5
0
ファイル: liblinear.go プロジェクト: shirayu/liblinear
func toFeatureNodes(X *mat64.Dense) []*C.struct_feature_node {
	featureNodes := []*C.struct_feature_node{}

	nRows, nCols := X.Dims()

	for i := 0; i < nRows; i++ {
		row := []C.struct_feature_node{}
		for j := 0; j < nCols; j++ {
			val := X.At(i, j)
			if val != 0 {
				row = append(row, C.struct_feature_node{
					index: C.int(j + 1),
					value: C.double(val),
				})
			}
		}

		row = append(row, C.struct_feature_node{
			index: C.int(-1),
			value: C.double(0),
		})
		featureNodes = append(featureNodes, &row[0])
	}

	return featureNodes
}
コード例 #6
0
ファイル: featurematrix.go プロジェクト: lytics/CloudForest
func (fm *FeatureMatrix) Mat64(header, transpose bool) *mat64.Dense {
	var (
		idx   int
		iter  fmIt
		dense *mat64.Dense
	)

	ncol := len(fm.Data)
	nrow := len(fm.CaseLabels)

	if !transpose {
		iter = rowIter(fm, header)
		dense = mat64.NewDense(nrow, ncol, nil)
	} else {
		iter = colIter(fm, header)
		dense = mat64.NewDense(ncol, nrow+1, nil)
	}

	for row, ok := iter(); ok; idx++ {
		for j, val := range row {
			flt, _ := strconv.ParseFloat(val, 64)
			dense.Set(idx, j, flt)
		}
		row, ok = iter()
	}

	return dense
}
コード例 #7
0
ファイル: gradient_descent.go プロジェクト: 24hours/golearn
// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
	m, _ := y.Dims()
	for i := 0; i < epoch; i++ {
		xFlat := mat64.DenseCopyOf(x)
		xFlat.TCopy(xFlat)
		temp := mat64.DenseCopyOf(x)

		// Calculate our best prediction, given theta
		temp.Mul(temp, theta)

		// Calculate our error from the real values
		temp.Sub(temp, y)
		xFlat.Mul(xFlat, temp)

		// Temporary hack to get around the fact there is no scalar division in mat64
		xFlatRow, _ := xFlat.Dims()
		gradient := make([]float64, 0)
		for k := 0; k < xFlatRow; k++ {
			row := xFlat.RowView(k)
			for v := range row {
				divd := row[v] / float64(m) * alpha
				gradient = append(gradient, divd)
			}
		}
		grows := len(gradient)
		grad := mat64.NewDense(grows, 1, gradient)
		theta.Sub(theta, grad)
	}
	return theta
}
コード例 #8
0
ファイル: linsolve.go プロジェクト: reggo/train
// Creates the features from the inputs. Features must be nSamples x nFeatures or nil
func FeaturizeTrainable(t Trainable, inputs common.RowMatrix, featurizedInputs *mat64.Dense) *mat64.Dense {
	nSamples, nDim := inputs.Dims()
	if featurizedInputs == nil {
		nFeatures := t.NumFeatures()
		featurizedInputs = mat64.NewDense(nSamples, nFeatures, nil)
	}

	rowViewer, isRowViewer := inputs.(mat64.RowViewer)
	var f func(start, end int)
	if isRowViewer {
		f = func(start, end int) {
			featurizer := t.NewFeaturizer()
			for i := start; i < end; i++ {
				featurizer.Featurize(rowViewer.RowView(i), featurizedInputs.RowView(i))
			}
		}
	} else {
		f = func(start, end int) {
			featurizer := t.NewFeaturizer()
			input := make([]float64, nDim)
			for i := start; i < end; i++ {
				inputs.Row(input, i)
				featurizer.Featurize(input, featurizedInputs.RowView(i))
			}
		}
	}

	common.ParallelFor(nSamples, common.GetGrainSize(nSamples, minGrain, maxGrain), f)
	return featurizedInputs
}
コード例 #9
0
ファイル: gradient_descent.go プロジェクト: erubboli/mlt
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
	// m = Number of Training Examples
	// n = Number of Features
	m, n := X.Dims()
	h := mat64.NewVector(m, nil)
	partials := mat64.NewVector(n, nil)
	new_theta := mat64.NewVector(n, nil)

Regression:
	for i := 0; i < maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(X, new_theta)
		for el := 0; el < m; el++ {
			val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
			h.SetVec(el, val)
		}
		partials.MulVec(X.T(), h)

		// Update theta values
		for el := 0; el < n; el++ {
			new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
			new_theta.SetVec(el, new_val)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= tolerance {
			break Regression
		}
	}
	return new_theta
}
コード例 #10
0
ファイル: geometric.go プロジェクト: rmera/gochem
//MassCenter centers in in the center of mass of oref. Mass must be
//A column vector. Returns the centered matrix and the displacement matrix.
func MassCenter(in, oref *v3.Matrix, mass *mat64.Dense) (*v3.Matrix, *v3.Matrix, error) {
	or, _ := oref.Dims()
	ir, _ := in.Dims()
	if mass == nil { //just obtain the geometric center
		tmp := ones(or)
		mass = mat64.NewDense(or, 1, tmp) //gnOnes(or, 1)
	}
	ref := v3.Zeros(or)
	ref.Copy(oref)
	gnOnesvector := gnOnes(1, or)
	f := func() { ref.ScaleByCol(ref, mass) }
	if err := gnMaybe(gnPanicker(f)); err != nil {
		return nil, nil, CError{err.Error(), []string{"v3.Matrix.ScaleByCol", "MassCenter"}}
	}
	ref2 := v3.Zeros(1)
	g := func() { ref2.Mul(gnOnesvector, ref) }
	if err := gnMaybe(gnPanicker(g)); err != nil {
		return nil, nil, CError{err.Error(), []string{"v3.gOnesVector", "MassCenter"}}
	}
	ref2.Scale(1.0/mass.Sum(), ref2)
	returned := v3.Zeros(ir)
	returned.Copy(in)
	returned.SubVec(returned, ref2)
	/*	for i := 0; i < ir; i++ {
			if err := returned.GetRowVector(i).Subtract(ref2); err != nil {
				return nil, nil, err
			}
		}
	*/
	return returned, ref2, nil
}
コード例 #11
0
func (lr *LogisticRegression) Predict(X *mat64.Dense) []Prediction {
	nSamples, _ := X.Dims()

	prediction := []Prediction{}

	for i := 0; i < nSamples; i++ {
		scores := liblinear.PredictProba(lr.model, X)
		_, nClasses := scores.Dims()

		bestScore := scores.At(i, 0)
		bestLangIdx := 0

		for langIdx := 0; langIdx < nClasses; langIdx++ {
			score := scores.At(i, langIdx)
			if score > bestScore {
				bestScore = score
				bestLangIdx = langIdx
			}
		}

		prediction = append(prediction, Prediction{
			Label:    bestLangIdx,
			Language: "TODO: PENDING",
			Score:    bestScore,
		})
	}

	return prediction
}
コード例 #12
0
// MetropolisHastings generates rows(batch) samples using the Metropolis Hastings
// algorithm (http://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm),
// with the given target and proposal distributions, starting at the intial location
// and storing the results in-place into samples. If src != nil, it will be used to generate random
// numbers, otherwise rand.Float64 will be used.
//
// Metropolis-Hastings is a Markov-chain Monte Carlo algorithm that generates
// samples according to the distribution specified by target by using the Markov
// chain implicitly defined by the proposal distribution. At each
// iteration, a proposal point is generated randomly from the current location.
// This proposal point is accepted with probability
//  p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current)))
// If the new location is accepted, it is stored into batch and becomes the
// new current location. If it is rejected, the current location remains and
// is stored into samples. Thus, a location is stored into batch at every iteration.
//
// The samples in Metropolis Hastings are correlated with one another through the
// Markov chain. As a result, the initial value can have a significant influence
// on the early samples, and so, typically, the first samples generated by the chain
// are ignored. This is known as "burn-in", and can be accomplished with slicing.
// The best choice for burn-in length will depend on the sampling and target
// distributions.
//
// Many choose to have a sampling "rate" where a number of samples
// are ignored in between each kept sample. This helps decorrelate
// the samples from one another, but also reduces the number of available samples.
// A sampling rate can be implemented with successive calls to MetropolisHastings.
func MetropolisHastings(batch *mat64.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
	f64 := rand.Float64
	if src != nil {
		f64 = src.Float64
	}
	if len(initial) == 0 {
		panic("metropolishastings: zero length initial")
	}
	r, _ := batch.Dims()
	current := make([]float64, len(initial))
	copy(current, initial)
	proposed := make([]float64, len(initial))
	currentLogProb := target.LogProb(initial)
	for i := 0; i < r; i++ {
		proposal.ConditionalRand(proposed, current)
		proposedLogProb := target.LogProb(proposed)
		probTo := proposal.ConditionalLogProb(proposed, current)
		probBack := proposal.ConditionalLogProb(current, proposed)

		accept := math.Exp(proposedLogProb + probBack - probTo - currentLogProb)
		if accept > f64() {
			copy(current, proposed)
			currentLogProb = proposedLogProb
		}
		batch.SetRow(i, current)
	}
}
コード例 #13
0
ファイル: poly_kernel.go プロジェクト: thedadams/golearn
// InnerProduct computes the inner product through a kernel trick
// K(x, y) = (x^T y + 1)^d
func (p *PolyKernel) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	subVectorX := vectorX.ColView(0)
	subVectorY := vectorY.ColView(0)
	result := mat64.Dot(subVectorX, subVectorY)
	result = math.Pow(result+1, float64(p.degree))

	return result
}
コード例 #14
0
ファイル: df.go プロジェクト: timkaye11/glasso
func DfFromMat(mat *mat64.Dense) *DataFrame {
	rows, cols := mat.Dims()
	return &DataFrame{
		data: mat,
		rows: rows,
		cols: cols,
	}
}
コード例 #15
0
ファイル: taskhandler.go プロジェクト: jmptrader/gautomator
func rowSum(matrix *mat64.Dense, rowId int) float64 {
	_, col := matrix.Dims()
	sum := float64(0)
	for c := 0; c < col; c++ {
		sum += matrix.At(rowId, c)
	}
	return sum
}
コード例 #16
0
ファイル: taskhandler.go プロジェクト: jmptrader/gautomator
func colSum(matrix *mat64.Dense, colId int) float64 {
	row, _ := matrix.Dims()
	sum := float64(0)
	for r := 0; r < row; r++ {
		sum += matrix.At(r, colId)
	}
	return sum
}
コード例 #17
0
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
コード例 #18
0
ファイル: layer.go プロジェクト: evilrobot69/NeuralGo
// TODO(ariw): Delete any matrix creation in layer operations.
func (self *Layer) Forward(previous *Layer) {
	self.resetForExamples(previous)
	self.Input = previous.Output
	var inputAndBias mat64.Dense
	inputAndBias.Augment(self.Input, self.Ones) // Add bias to input.
	self.Output.Mul(&inputAndBias, self.Weight)
	self.DActivationFunction(self.Output.T(), self.Derivatives)
	self.ActivationFunction(self.Output, self.Output)
}
コード例 #19
0
ファイル: linear.go プロジェクト: lazywei/lineargo
// double predict(const struct model *model_, const struct feature_node *x);
func Predict(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, nCols := X.Dims()
	cX := mapCDouble(X.RawMatrix().Data)
	y := mat64.NewDense(nRows, 1, nil)
	result := doubleToFloats(C.call_predict(
		model.cModel, &cX[0], C.int(nRows), C.int(nCols)), nRows)
	y.SetCol(0, result)
	return y
}
コード例 #20
0
ファイル: lsquares.go プロジェクト: rsc/go-misc
// LinearLeastSquares computes the least squares fit for the function
//
//   f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
//   РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
//   РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
	// The optimal parameters are found by solving for ╬њ╠ѓ in the
	// "normal equations":
	//
	//    (­ЮљЌрхђ­Юљќ­ЮљЌ)╬њ╠ѓ = ­ЮљЌрхђ­Юљќ­Юљ▓
	//
	// where ­Юљќ is a diagonal weight matrix (or the identity matrix
	// for the unweighted case).

	// TODO: Consider using orthogonal decomposition.

	if len(xs) != len(ys) {
		panic("len(xs) != len(ys)")
	}
	if weights != nil && len(xs) != len(weights) {
		panic("len(xs) != len(weights")
	}

	// Construct ­ЮљЌрхђ. This is the more convenient representation
	// for efficiently calling the term functions.
	xTVals := make([]float64, len(terms)*len(xs))
	for i, term := range terms {
		term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
	}
	XT := mat64.NewDense(len(terms), len(xs), xTVals)
	X := XT.T()

	// Construct ­ЮљЌрхђ­Юљќ.
	var XTW *mat64.Dense
	if weights == nil {
		// ­Юљќ is the identity matrix.
		XTW = XT
	} else {
		// Since ­Юљќ is a diagonal matrix, we do this directly.
		XTW = mat64.DenseCopyOf(XT)
		WDiag := mat64.NewVector(len(weights), weights)
		for row := 0; row < len(terms); row++ {
			rowView := XTW.RowView(row)
			rowView.MulElemVec(rowView, WDiag)
		}
	}

	// Construct ­Юљ▓.
	y := mat64.NewVector(len(ys), ys)

	// Compute ╬њ╠ѓ.
	lhs := mat64.NewDense(len(terms), len(terms), nil)
	lhs.Mul(XTW, X)

	rhs := mat64.NewVector(len(terms), nil)
	rhs.MulVec(XTW, y)

	BVals := make([]float64, len(terms))
	B := mat64.NewVector(len(terms), BVals)
	B.SolveVec(lhs, rhs)
	return BVals
}
コード例 #21
0
ファイル: kernel.go プロジェクト: reggo/kitchensink
// Generate generates a list of n random features given an input dimension d
func (iso IsoSqExp) Generate(n int, dim int, features *mat64.Dense) {
	scale := math.Exp(iso.LogScale)

	for i := 0; i < n; i++ {
		for j := 0; j < dim; j++ {
			features.Set(i, j, rand.NormFloat64()*scale)
		}
	}
}
コード例 #22
0
ファイル: liblinear.go プロジェクト: shirayu/liblinear
// double predict(const struct model *model_, const struct feature_node *x);
func Predict(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, _ := X.Dims()
	cXs := toFeatureNodes(X)
	y := mat64.NewDense(nRows, 1, nil)
	for i, cX := range cXs {
		y.Set(i, 0, float64(C.predict(model.cModel, cX)))
	}
	return y
}
コード例 #23
0
ファイル: scale.go プロジェクト: reggo/scale
// SetScale sets a linear scale between 0 and 1. If no data
// points. If the minimum and maximum value are identical in
// a dimension, the minimum and maximum values will be set to
// that value +/- 0.5 and a
func (l *Linear) SetScale(data *mat64.Dense) error {

	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}

	// Generate data for min and max if they don't already exist
	if len(l.Min) < dim {
		l.Min = make([]float64, dim)
	} else {
		l.Min = l.Min[0:dim]
	}
	if len(l.Max) < dim {
		l.Max = make([]float64, dim)
	} else {
		l.Max = l.Max[0:dim]
	}
	for i := range l.Min {
		l.Min[i] = math.Inf(1)
	}
	for i := range l.Max {
		l.Max[i] = math.Inf(-1)
	}
	// Find the minimum and maximum in each dimension
	for i := 0; i < rows; i++ {
		for j := 0; j < dim; j++ {
			val := data.At(i, j)
			if val < l.Min[j] {
				l.Min[j] = val
			}
			if val > l.Max[j] {
				l.Max[j] = val
			}
		}
	}
	l.Scaled = true
	l.Dim = dim

	var unifError *UniformDimension

	// Check that the maximum and minimum values are not identical
	for i := range l.Min {
		if l.Min[i] == l.Max[i] {
			if unifError == nil {
				unifError = &UniformDimension{}
			}
			unifError.Dims = append(unifError.Dims, i)
			l.Min[i] -= 0.5
			l.Max[i] += 0.5
		}
	}
	if unifError != nil {
		return unifError
	}
	return nil
}
コード例 #24
0
ファイル: samplemv.go プロジェクト: sbinet/gonum-stat
// SampleWeighted generates rows(batch) samples from the embedded Sampler type
// and sets all of the weights equal to 1. If rows(batch) and len(weights)
// of weights are not equal, SampleWeighted will panic.
func (w SampleUniformWeighted) SampleWeighted(batch *mat64.Dense, weights []float64) {
	r, _ := batch.Dims()
	if r != len(weights) {
		panic(badLengthMismatch)
	}
	w.Sample(batch)
	for i := range weights {
		weights[i] = 1
	}
}
コード例 #25
0
ファイル: kitchensink.go プロジェクト: reggo/kitchensink
func predictFeaturized(featurizedInput []float64, featureWeights *mat64.Dense, output []float64) {
	for i := range output {
		output[i] = 0
	}
	for j, zval := range featurizedInput {
		for i, weight := range featureWeights.RowView(j) {
			output[i] += weight * zval
		}
	}
}
コード例 #26
0
ファイル: dbscan.go プロジェクト: CTLife/golearn
func regionQuery(p int, ret *big.Int, dist *mat64.Dense, eps float64) *big.Int {
	rows, _ := dist.Dims()
	// Return any points within the Eps neighbourhood
	for i := 0; i < rows; i++ {
		if dist.At(p, i) <= eps {
			ret = ret.SetBit(ret, i, 1) // Mark as neighbour
		}
	}
	return ret
}
コード例 #27
0
ファイル: network.go プロジェクト: nickpoorman/golearn
// UpdateBias computes B = B + l.E and updates the bias weights
// from a size * 1 back-propagated error vector.
func (n *Network) UpdateBias(err *mat64.Dense, learnRate float64) {

	for i, b := range n.biases {
		if i < n.input {
			continue
		}
		n.biases[i] = b + err.At(i, 0)*learnRate
	}

}
コード例 #28
0
ファイル: scale.go プロジェクト: reggo/scale
func (n *None) SetScale(data *mat64.Dense) error {
	rows, cols := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}

	n.Dim = cols
	n.Scaled = true
	return nil
}
コード例 #29
0
ファイル: gp.go プロジェクト: reggo/reggo
// Add adds a new point to the gaussian process
func (gp *Trainer) Add(newInput []float64, newOutput []float64) {

	gp.nData++
	// See if we need to allocate new memory
	var inputAtCap bool
	if len(gp.inputData) == cap(gp.inputData) {
		inputAtCap = true
	}
	/*
		var outputAtCap bool
		if len(gp.outputData) == cap(gp.outputData) {
			outputAtCap = true
		}
	*/

	gp.inputData = append(gp.inputData, newInput)
	gp.outputData = append(gp.outputData, newOutput)

	// If we had to allocate memory, allocate new memory for the kernel matrix
	if gp.Implicit {
		// If it's implicit, just need to update matrix size, because the kernel
		// is computed on the fly
		//gp.kernelMat =
		panic("not coded")
	}
	var newKernelMatrix *mat64.Dense
	if inputAtCap {
		oldKernelMatrix := gp.kernelMatrix
		// If we had to allocate new memory for the inputs, then need to expand
		// the size of the matrix as well
		newKernelData := make([]float64, cap(gp.inputData)*cap(gp.inputData))

		panic("Need to use raw matrix")
		//newKernelMatrix = mat64.NewDense(gp.nData, gp.nData, newKernelData)

		// Copy the old kernel data into the new one. View and newKernelMatrix share
		// the same underlying array
		view := &mat64.Dense{}
		view.View(newKernelMatrix, 0, 0, gp.nData-1, gp.nData-1)
		view.Copy(oldKernelMatrix)

		gp.kernelData = newKernelData
	} else {
		// We aren't at capacity, so just need to increase the size
		newKernelMatrix = mat64.NewDense(nData, nData, gp.kernelData)
	}
	// Set the new values of the kernel matrix
	for i := 0; i < nData; i++ {
		oldInput := gp.inputData[i*gp.inputDim : (i+1)*gp.inputDim]
		ker := gp.Kernel(oldData, newInput)
		newKernelMatrix.Set(i, gp.nData, ker)
		newKernelMatrix.Set(gp.nData, i, ker)
	}
	gp.kernelMatrix = newKernelMatrix
}
コード例 #30
0
func main() {
	// task 1: show qr decomp of wp example
	a := mat64.NewDense(3, 3, []float64{
		12, -51, 4,
		6, 167, -68,
		-4, 24, -41,
	})
	var qr mat64.QR
	qr.Factorize(a)
	var q, r mat64.Dense
	q.QFromQR(&qr)
	r.RFromQR(&qr)
	fmt.Printf("q: %.3f\n\n", mat64.Formatted(&q, mat64.Prefix("   ")))
	fmt.Printf("r: %.3f\n\n", mat64.Formatted(&r, mat64.Prefix("   ")))

	// task 2: use qr decomp for polynomial regression example
	x := []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
	y := []float64{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}
	a = Vandermonde(x, 2)
	b := mat64.NewDense(11, 1, y)
	qr.Factorize(a)
	var f mat64.Dense
	f.SolveQR(&qr, false, b)
	fmt.Printf("polyfit: %.3f\n",
		mat64.Formatted(&f, mat64.Prefix("         ")))
}