Exemplo n.º 1
0
// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
	m, _ := y.Dims()
	for i := 0; i < epoch; i++ {
		xFlat := mat64.DenseCopyOf(x)
		xFlat.TCopy(xFlat)
		temp := mat64.DenseCopyOf(x)

		// Calculate our best prediction, given theta
		temp.Mul(temp, theta)

		// Calculate our error from the real values
		temp.Sub(temp, y)
		xFlat.Mul(xFlat, temp)

		// Temporary hack to get around the fact there is no scalar division in mat64
		xFlatRow, _ := xFlat.Dims()
		gradient := make([]float64, 0)
		for k := 0; k < xFlatRow; k++ {
			row := xFlat.RowView(k)
			for v := range row {
				divd := row[v] / float64(m) * alpha
				gradient = append(gradient, divd)
			}
		}
		grows := len(gradient)
		grad := mat64.NewDense(grows, 1, gradient)
		theta.Sub(theta, grad)
	}
	return theta
}
Exemplo n.º 2
0
func (nb *NaiveBayes) Predict(X *mat64.Dense) []Prediction {
	nSamples, _ := X.Dims()

	prediction := []Prediction{}

	for i := 0; i < nSamples; i++ {
		scores := map[int]float64{}
		for langIdx, _ := range nb.params.LangsCount {
			scores[langIdx] = nb.tokensProba(X.Row(nil, i), langIdx) + nb.langProba(langIdx)
		}

		bestScore := scores[0]
		bestLangIdx := 0

		for langIdx, score := range scores {
			if score > bestScore {
				bestScore = score
				bestLangIdx = langIdx
			}
		}

		prediction = append(prediction, Prediction{
			Label:    bestLangIdx,
			Language: "TODO: PENDING",
			Score:    bestScore,
		})
	}

	return prediction
}
Exemplo n.º 3
0
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
	// m = Number of Training Examples
	// n = Number of Features
	m, n := X.Dims()
	h := mat64.NewVector(m, nil)
	partials := mat64.NewVector(n, nil)
	new_theta := mat64.NewVector(n, nil)

Regression:
	for i := 0; i < maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(X, new_theta)
		for el := 0; el < m; el++ {
			val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
			h.SetVec(el, val)
		}
		partials.MulVec(X.T(), h)

		// Update theta values
		for el := 0; el < n; el++ {
			new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
			new_theta.SetVec(el, new_val)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= tolerance {
			break Regression
		}
	}
	return new_theta
}
Exemplo n.º 4
0
Arquivo: scale.go Projeto: reggo/reggo
func (n *InnerNormal) SetScale(data *mat64.Dense) error {
	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}
	means := make([]float64, dim)
	stds := make([]float64, dim)
	for i := 0; i < dim; i++ {
		// Filter out the extremes
		r := data.Col(nil, i)
		if len(r) != rows {
			panic("bad lengths")
		}
		sort.Float64s(r)

		lowerIdx := int(math.Floor(float64(rows) * n.LowerQuantile))
		upperIdx := int(math.Ceil(float64(rows) * n.UpperQuantile))

		trimmed := r[lowerIdx:upperIdx]

		mean, std := stat.MeanStdDev(trimmed, nil)
		//std := stat.StdDev(trimmed, mean, nil)
		means[i] = mean
		stds[i] = std
	}
	n.Mu = means
	n.Sigma = stds
	fmt.Println(n.Mu, n.Sigma)
	n.Dim = dim
	n.Scaled = true
	return nil
}
Exemplo n.º 5
0
func StackConstr(low, A, up *mat64.Dense) (stackA, b *mat64.Dense, ranges []float64) {
	neglow := &mat64.Dense{}
	neglow.Scale(-1, low)
	b = &mat64.Dense{}
	b.Stack(up, neglow)

	negA := &mat64.Dense{}
	negA.Scale(-1, A)
	stackA = &mat64.Dense{}
	stackA.Stack(A, negA)

	// capture the range of each constraint from A because this information is
	// lost when converting from "low <= Ax <= up" via stacking to "Ax <= up".
	m, _ := A.Dims()
	ranges = make([]float64, m, 2*m)
	for i := 0; i < m; i++ {
		ranges[i] = up.At(i, 0) - low.At(i, 0)
		if ranges[i] == 0 {
			if up.At(i, 0) == 0 {
				ranges[i] = 1
			} else {
				ranges[i] = up.At(i, 0)
			}
		}
	}
	ranges = append(ranges, ranges...)

	return stackA, b, ranges
}
func (lr *LogisticRegression) Predict(X *mat64.Dense) []Prediction {
	nSamples, _ := X.Dims()

	prediction := []Prediction{}

	for i := 0; i < nSamples; i++ {
		scores := liblinear.PredictProba(lr.model, X)
		_, nClasses := scores.Dims()

		bestScore := scores.At(i, 0)
		bestLangIdx := 0

		for langIdx := 0; langIdx < nClasses; langIdx++ {
			score := scores.At(i, langIdx)
			if score > bestScore {
				bestScore = score
				bestLangIdx = langIdx
			}
		}

		prediction = append(prediction, Prediction{
			Label:    bestLangIdx,
			Language: "TODO: PENDING",
			Score:    bestScore,
		})
	}

	return prediction
}
Exemplo n.º 7
0
// MetropolisHastings generates rows(batch) samples using the Metropolis Hastings
// algorithm (http://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm),
// with the given target and proposal distributions, starting at the intial location
// and storing the results in-place into samples. If src != nil, it will be used to generate random
// numbers, otherwise rand.Float64 will be used.
//
// Metropolis-Hastings is a Markov-chain Monte Carlo algorithm that generates
// samples according to the distribution specified by target by using the Markov
// chain implicitly defined by the proposal distribution. At each
// iteration, a proposal point is generated randomly from the current location.
// This proposal point is accepted with probability
//  p = min(1, (target(new) * proposal(current|new)) / (target(current) * proposal(new|current)))
// If the new location is accepted, it is stored into batch and becomes the
// new current location. If it is rejected, the current location remains and
// is stored into samples. Thus, a location is stored into batch at every iteration.
//
// The samples in Metropolis Hastings are correlated with one another through the
// Markov chain. As a result, the initial value can have a significant influence
// on the early samples, and so, typically, the first samples generated by the chain
// are ignored. This is known as "burn-in", and can be accomplished with slicing.
// The best choice for burn-in length will depend on the sampling and target
// distributions.
//
// Many choose to have a sampling "rate" where a number of samples
// are ignored in between each kept sample. This helps decorrelate
// the samples from one another, but also reduces the number of available samples.
// A sampling rate can be implemented with successive calls to MetropolisHastings.
func MetropolisHastings(batch *mat64.Dense, initial []float64, target distmv.LogProber, proposal MHProposal, src *rand.Rand) {
	f64 := rand.Float64
	if src != nil {
		f64 = src.Float64
	}
	if len(initial) == 0 {
		panic("metropolishastings: zero length initial")
	}
	r, _ := batch.Dims()
	current := make([]float64, len(initial))
	copy(current, initial)
	proposed := make([]float64, len(initial))
	currentLogProb := target.LogProb(initial)
	for i := 0; i < r; i++ {
		proposal.ConditionalRand(proposed, current)
		proposedLogProb := target.LogProb(proposed)
		probTo := proposal.ConditionalLogProb(proposed, current)
		probBack := proposal.ConditionalLogProb(current, proposed)

		accept := math.Exp(proposedLogProb + probBack - probTo - currentLogProb)
		if accept > f64() {
			copy(current, proposed)
			currentLogProb = proposedLogProb
		}
		batch.SetRow(i, current)
	}
}
Exemplo n.º 8
0
func (nb *NaiveBayes) Fit(X, y *mat64.Dense) {
	nSamples, nFeatures := X.Dims()

	tokensTotal := 0
	langsTotal, _ := y.Dims()

	langsCount := histogram(y.Col(nil, 0))

	tokensTotalPerLang := map[int]int{}
	tokenCountPerLang := map[int](map[int]int){}

	for i := 0; i < nSamples; i++ {
		langIdx := int(y.At(i, 0))

		for j := 0; j < nFeatures; j++ {
			tokensTotal += int(X.At(i, j))
			tokensTotalPerLang[langIdx] += int(X.At(i, j))

			if _, ok := tokenCountPerLang[langIdx]; !ok {
				tokenCountPerLang[langIdx] = map[int]int{}
			}
			tokenCountPerLang[langIdx][j] += int(X.At(i, j))
		}
	}

	params := nbParams{
		TokensTotal:        tokensTotal,
		LangsTotal:         langsTotal,
		LangsCount:         langsCount,
		TokensTotalPerLang: tokensTotalPerLang,
		TokenCountPerLang:  tokenCountPerLang,
	}

	nb.params = params
}
Exemplo n.º 9
0
// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input.
// Each column in the input matrix represents a point in real world (objPts) or
// in image (imgPts).
// Return: the camera matrix.
func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int,
	aspectRatio float64) (camMat *mat64.Dense) {

	objDim, nObjPts := objPts.Dims()
	imgDim, nImgPts := imgPts.Dims()

	if objDim != 3 || imgDim != 2 || nObjPts != nImgPts {
		panic("Invalid dimensions for objPts and imgPts")
	}

	objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts))
	imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts))

	for j := 0; j < nObjPts; j++ {
		objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...))
	}

	for j := 0; j < nObjPts; j++ {
		imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...))
	}

	_imgSize := NewGcvSize2i(dims[0], dims[1])

	camMat = GcvMatToMat64(GcvInitCameraMatrix2D_(
		objPtsVec, imgPtsVec, _imgSize, aspectRatio))
	return camMat
}
Exemplo n.º 10
0
// LatinHypercube generates rows(batch) samples using Latin hypercube sampling
// from the given distribution. If src is not nil, it will be used to generate
// random numbers, otherwise rand.Float64 will be used.
//
// Latin hypercube sampling divides the cumulative distribution function into equally
// spaced bins and guarantees that one sample is generated per bin. Within each bin,
// the location is randomly sampled. The distmv.UnitNormal variable can be used
// for easy generation from the unit interval.
func LatinHypercube(batch *mat64.Dense, q distmv.Quantiler, src *rand.Rand) {
	r, c := batch.Dims()
	var f64 func() float64
	var perm func(int) []int
	if src != nil {
		f64 = src.Float64
		perm = src.Perm
	} else {
		f64 = rand.Float64
		perm = rand.Perm
	}
	r64 := float64(r)
	for i := 0; i < c; i++ {
		p := perm(r)
		for j := 0; j < r; j++ {
			var v float64
			v = f64()
			v = v/r64 + float64(j)/r64
			batch.Set(p[j], i, v)
		}
	}
	p := make([]float64, c)
	for i := 0; i < r; i++ {
		copy(p, batch.RawRowView(i))
		q.Quantile(batch.RawRowView(i), p)
	}
}
Exemplo n.º 11
0
func toFeatureNodes(X *mat64.Dense) []*C.struct_feature_node {
	featureNodes := []*C.struct_feature_node{}

	nRows, nCols := X.Dims()

	for i := 0; i < nRows; i++ {
		row := []C.struct_feature_node{}
		for j := 0; j < nCols; j++ {
			val := X.At(i, j)
			if val != 0 {
				row = append(row, C.struct_feature_node{
					index: C.int(j + 1),
					value: C.double(val),
				})
			}
		}

		row = append(row, C.struct_feature_node{
			index: C.int(-1),
			value: C.double(0),
		})
		featureNodes = append(featureNodes, &row[0])
	}

	return featureNodes
}
Exemplo n.º 12
0
func DfFromMat(mat *mat64.Dense) *DataFrame {
	rows, cols := mat.Dims()
	return &DataFrame{
		data: mat,
		rows: rows,
		cols: cols,
	}
}
Exemplo n.º 13
0
func rowSum(matrix *mat64.Dense, rowId int) float64 {
	_, col := matrix.Dims()
	sum := float64(0)
	for c := 0; c < col; c++ {
		sum += matrix.At(rowId, c)
	}
	return sum
}
Exemplo n.º 14
0
func colSum(matrix *mat64.Dense, colId int) float64 {
	row, _ := matrix.Dims()
	sum := float64(0)
	for r := 0; r < row; r++ {
		sum += matrix.At(r, colId)
	}
	return sum
}
Exemplo n.º 15
0
// double predict(const struct model *model_, const struct feature_node *x);
func Predict(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, nCols := X.Dims()
	cX := mapCDouble(X.RawMatrix().Data)
	y := mat64.NewDense(nRows, 1, nil)
	result := doubleToFloats(C.call_predict(
		model.cModel, &cX[0], C.int(nRows), C.int(nCols)), nRows)
	y.SetCol(0, result)
	return y
}
Exemplo n.º 16
0
Arquivo: scale.go Projeto: reggo/scale
// SetScale sets a linear scale between 0 and 1. If no data
// points. If the minimum and maximum value are identical in
// a dimension, the minimum and maximum values will be set to
// that value +/- 0.5 and a
func (l *Linear) SetScale(data *mat64.Dense) error {

	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}

	// Generate data for min and max if they don't already exist
	if len(l.Min) < dim {
		l.Min = make([]float64, dim)
	} else {
		l.Min = l.Min[0:dim]
	}
	if len(l.Max) < dim {
		l.Max = make([]float64, dim)
	} else {
		l.Max = l.Max[0:dim]
	}
	for i := range l.Min {
		l.Min[i] = math.Inf(1)
	}
	for i := range l.Max {
		l.Max[i] = math.Inf(-1)
	}
	// Find the minimum and maximum in each dimension
	for i := 0; i < rows; i++ {
		for j := 0; j < dim; j++ {
			val := data.At(i, j)
			if val < l.Min[j] {
				l.Min[j] = val
			}
			if val > l.Max[j] {
				l.Max[j] = val
			}
		}
	}
	l.Scaled = true
	l.Dim = dim

	var unifError *UniformDimension

	// Check that the maximum and minimum values are not identical
	for i := range l.Min {
		if l.Min[i] == l.Max[i] {
			if unifError == nil {
				unifError = &UniformDimension{}
			}
			unifError.Dims = append(unifError.Dims, i)
			l.Min[i] -= 0.5
			l.Max[i] += 0.5
		}
	}
	if unifError != nil {
		return unifError
	}
	return nil
}
Exemplo n.º 17
0
// double predict(const struct model *model_, const struct feature_node *x);
func Predict(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, _ := X.Dims()
	cXs := toFeatureNodes(X)
	y := mat64.NewDense(nRows, 1, nil)
	for i, cX := range cXs {
		y.Set(i, 0, float64(C.predict(model.cModel, cX)))
	}
	return y
}
Exemplo n.º 18
0
func regionQuery(p int, ret *big.Int, dist *mat64.Dense, eps float64) *big.Int {
	rows, _ := dist.Dims()
	// Return any points within the Eps neighbourhood
	for i := 0; i < rows; i++ {
		if dist.At(p, i) <= eps {
			ret = ret.SetBit(ret, i, 1) // Mark as neighbour
		}
	}
	return ret
}
Exemplo n.º 19
0
// SampleWeighted generates rows(batch) samples from the embedded Sampler type
// and sets all of the weights equal to 1. If rows(batch) and len(weights)
// of weights are not equal, SampleWeighted will panic.
func (w SampleUniformWeighted) SampleWeighted(batch *mat64.Dense, weights []float64) {
	r, _ := batch.Dims()
	if r != len(weights) {
		panic(badLengthMismatch)
	}
	w.Sample(batch)
	for i := range weights {
		weights[i] = 1
	}
}
Exemplo n.º 20
0
Arquivo: scale.go Projeto: reggo/scale
func (n *None) SetScale(data *mat64.Dense) error {
	rows, cols := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}

	n.Dim = cols
	n.Scaled = true
	return nil
}
Exemplo n.º 21
0
// Importance sampling generates rows(batch) samples from the proposal distribution,
// and stores the locations and importance sampling weights in place.
//
// Importance sampling is a variance reduction technique where samples are
// generated from a proposal distribution, q(x), instead of the target distribution
// p(x). This allows relatively unlikely samples in p(x) to be generated more frequently.
//
// The importance sampling weight at x is given by p(x)/q(x). To reduce variance,
// a good proposal distribution will bound this sampling weight. This implies the
// support of q(x) should be at least as broad as p(x), and q(x) should be "fatter tailed"
// than p(x).
//
// If weights is nil, the weights are not stored. The length of weights must equal
// the length of batch, otherwise Importance will panic.
func Importance(batch *mat64.Dense, weights []float64, target distmv.LogProber, proposal distmv.RandLogProber) {
	r, _ := batch.Dims()
	if r != len(weights) {
		panic(badLengthMismatch)
	}
	for i := 0; i < r; i++ {
		v := batch.RawRowView(i)
		proposal.Rand(v)
		weights[i] = math.Exp(target.LogProb(v) - proposal.LogProb(v))
	}
}
Exemplo n.º 22
0
// Sample generates rows(batch) samples using the Metropolis Hastings sample
// generation method. The initial location is NOT updated during the call to Sample.
//
// The number of columns in batch must equal len(m.Initial), otherwise Sample
// will panic.
func (m MetropolisHastingser) Sample(batch *mat64.Dense) {
	burnIn := m.BurnIn
	rate := m.Rate
	if rate == 0 {
		rate = 1
	}
	r, c := batch.Dims()
	if len(m.Initial) != c {
		panic("metropolishastings: length mismatch")
	}

	// Use the optimal size for the temporary memory to allow the fewest calls
	// to MetropolisHastings. The case where tmp shadows samples must be
	// aligned with the logic after burn-in so that tmp does not shadow samples
	// during the rate portion.
	tmp := batch
	if rate > r {
		tmp = mat64.NewDense(rate, c, nil)
	}
	rTmp, _ := tmp.Dims()

	// Perform burn-in.
	remaining := burnIn
	initial := make([]float64, c)
	copy(initial, m.Initial)
	for remaining != 0 {
		newSamp := min(rTmp, remaining)
		MetropolisHastings(tmp.View(0, 0, newSamp, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)
		copy(initial, tmp.RawRowView(newSamp-1))
		remaining -= newSamp
	}

	if rate == 1 {
		MetropolisHastings(batch, initial, m.Target, m.Proposal, m.Src)
		return
	}

	if rTmp <= r {
		tmp = mat64.NewDense(rate, c, nil)
	}

	// Take a single sample from the chain.
	MetropolisHastings(batch.View(0, 0, 1, c).(*mat64.Dense), initial, m.Target, m.Proposal, m.Src)

	copy(initial, batch.RawRowView(0))
	// For all of the other samples, first generate Rate samples and then actually
	// accept the last one.
	for i := 1; i < r; i++ {
		MetropolisHastings(tmp, initial, m.Target, m.Proposal, m.Src)
		v := tmp.RawRowView(rate - 1)
		batch.SetRow(i, v)
		copy(initial, v)
	}
}
Exemplo n.º 23
0
// Convert *mat64.Dense to Mat
func Mat64ToGcvMat(mat *mat64.Dense) GcvMat {
	row, col := mat.Dims()

	rawData := NewGcvFloat64Vector(int64(row * col))

	for i := 0; i < row; i++ {
		for j := 0; j < col; j++ {
			rawData.Set(i*col+j, mat.At(i, j))
		}
	}

	return Mat64ToGcvMat_(row, col, rawData)
}
Exemplo n.º 24
0
Arquivo: scale.go Projeto: reggo/scale
// SetScale Finds the appropriate scaling of the data such that the dataset has
// a mean of 0 and a variance of 1. If the standard deviation of any of
// the data is zero (all of the entries have the same value),
// the standard deviation is set to 1.0 and a UniformDimension error is
// returned
func (n *Normal) SetScale(data *mat64.Dense) error {

	rows, dim := data.Dims()
	if rows < 2 {
		return errors.New("scale: less than two inputs")
	}

	// Need to find the mean input and the std of the input
	mean := make([]float64, dim)
	for i := 0; i < rows; i++ {
		for j := 0; j < dim; j++ {
			mean[j] += data.At(i, j)
		}
	}
	for i := range mean {
		mean[i] /= float64(rows)
	}

	// TODO: Replace this with something that has better numerical properties
	std := make([]float64, dim)
	for i := 0; i < rows; i++ {
		for j := 0; j < dim; j++ {
			diff := data.At(i, j) - mean[j]
			std[j] += diff * diff
		}
	}
	for i := range std {
		std[i] /= float64(rows)
		std[i] = math.Sqrt(std[i])
	}
	n.Scaled = true
	n.Dim = dim

	var unifError *UniformDimension
	for i := range std {
		if std[i] == 0 {
			if unifError == nil {
				unifError = &UniformDimension{}
			}
			unifError.Dims = append(unifError.Dims, i)
			std[i] = 1.0
		}
	}

	n.Mu = mean
	n.Sigma = std
	if unifError != nil {
		return unifError
	}
	return nil
}
Exemplo n.º 25
0
// Stochastic gradient descent updates the parameters of theta on a random row selection from a matrix.
// It is faster as it does not compute the cost function over the entire dataset every time.
// It instead calculates the error parameters over only one row of the dataset at a time.
// In return, there is a trade off for accuracy. This is minimised by running multiple SGD processes
// (the number of goroutines spawned is specified by the procs variable) in parallel and taking an average of the result.
func StochasticGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch, procs int) *mat64.Dense {
	m, _ := y.Dims()
	resultPipe := make(chan *mat64.Dense)
	results := make([]*mat64.Dense, 0)

	for p := 0; p < procs; p++ {
		go func() {
			// Is this just a pointer to theta?
			thetaCopy := mat64.DenseCopyOf(theta)
			for i := 0; i < epoch; i++ {
				for k := 0; k < m; k++ {
					datXtemp := x.RowView(k)
					datYtemp := y.RowView(k)
					datX := mat64.NewDense(1, len(datXtemp), datXtemp)
					datY := mat64.NewDense(1, 1, datYtemp)
					datXFlat := mat64.DenseCopyOf(datX)
					datXFlat.TCopy(datXFlat)
					datX.Mul(datX, thetaCopy)
					datX.Sub(datX, datY)
					datXFlat.Mul(datXFlat, datX)

					// Horrible hack to get around the fact there is no elementwise division in mat64
					xFlatRow, _ := datXFlat.Dims()
					gradient := make([]float64, 0)
					for i := 0; i < xFlatRow; i++ {
						row := datXFlat.RowView(i)
						for i := range row {
							divd := row[i] / float64(m) * alpha
							gradient = append(gradient, divd)
						}
					}
					grows := len(gradient)
					grad := mat64.NewDense(grows, 1, gradient)
					thetaCopy.Sub(thetaCopy, grad)
				}

			}
			resultPipe <- thetaCopy
		}()
	}

	for {
		select {
		case d := <-resultPipe:
			results = append(results, d)
			if len(results) == procs {
				return averageTheta(results)
			}
		}
	}
}
Exemplo n.º 26
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	if weights != nil {
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}

		diagWeight := diagonal.NewDiagonal(nFeatures, weights)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Mul(diagWeight, trueOutputs)
		weightedFeatures.Mul(diagWeight, features)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}
	if weights == nil {
		parameterMat := mat64.Solve(features, trueOutputs)
		return parameterMat.RawMatrix().Data

	}
	parameterMat := mat64.Solve(weightedFeatures, weightedOutput)

	return parameterMat.RawMatrix().Data
}
Exemplo n.º 27
0
Arquivo: cost.go Projeto: erubboli/mlt
func Cost(x *mat64.Dense, y, theta *mat64.Vector) float64 {
	//initialize receivers
	m, _ := x.Dims()
	h := mat64.NewDense(m, 1, make([]float64, m))
	squaredErrors := mat64.NewDense(m, 1, make([]float64, m))

	//actual calculus
	h.Mul(x, theta)
	squaredErrors.Apply(func(r, c int, v float64) float64 {
		return math.Pow(h.At(r, c)-y.At(r, c), 2)
	}, h)
	j := mat64.Sum(squaredErrors) * 1.0 / (2.0 * float64(m))

	return j
}
Exemplo n.º 28
0
// double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
func PredictProba(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, nCols := X.Dims()
	nrClasses := int(C.get_nr_class(model.cModel))

	cX := mapCDouble(X.RawMatrix().Data)
	y := mat64.NewDense(nRows, nrClasses, nil)

	result := doubleToFloats(C.call_predict_proba(
		model.cModel, &cX[0], C.int(nRows), C.int(nCols), C.int(nrClasses)),
		nRows*nrClasses)
	for i := 0; i < nRows; i++ {
		y.SetRow(i, result[i*nrClasses:(i+1)*nrClasses])
	}
	return y
}
Exemplo n.º 29
0
// double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
func PredictProba(model *Model, X *mat64.Dense) *mat64.Dense {
	nRows, _ := X.Dims()
	nrClasses := int(C.get_nr_class(model.cModel))

	cXs := toFeatureNodes(X)
	y := mat64.NewDense(nRows, nrClasses, nil)

	proba := make([]C.double, nrClasses, nrClasses)
	for i, cX := range cXs {
		C.predict_probability(model.cModel, cX, &proba[0])
		for j := 0; j < nrClasses; j++ {
			y.Set(i, j, float64(proba[j]))
		}
	}
	return y
}
Exemplo n.º 30
0
// Manhattan distance, also known as L1 distance.
// Compute sum of absolute values of elements.
func (self *Manhattan) Distance(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	r1, c1 := vectorX.Dims()
	r2, c2 := vectorY.Dims()
	if r1 != r2 || c1 != c2 {
		panic(mat64.ErrShape)
	}

	result := .0

	for i := 0; i < r1; i++ {
		for j := 0; j < c1; j++ {
			result += math.Abs(vectorX.At(i, j) - vectorY.At(i, j))
		}
	}
	return result
}