Exemplo n.º 1
0
// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
	m, _ := y.Dims()
	for i := 0; i < epoch; i++ {
		xFlat := mat64.DenseCopyOf(x)
		xFlat.TCopy(xFlat)
		temp := mat64.DenseCopyOf(x)

		// Calculate our best prediction, given theta
		temp.Mul(temp, theta)

		// Calculate our error from the real values
		temp.Sub(temp, y)
		xFlat.Mul(xFlat, temp)

		// Temporary hack to get around the fact there is no scalar division in mat64
		xFlatRow, _ := xFlat.Dims()
		gradient := make([]float64, 0)
		for k := 0; k < xFlatRow; k++ {
			row := xFlat.RowView(k)
			for v := range row {
				divd := row[v] / float64(m) * alpha
				gradient = append(gradient, divd)
			}
		}
		grows := len(gradient)
		grad := mat64.NewDense(grows, 1, gradient)
		theta.Sub(theta, grad)
	}
	return theta
}
Exemplo n.º 2
0
func (self *Layer) Update(learningConfiguration LearningConfiguration) {
	var deltas mat64.Dense
	deltas.Mul(self.Deltas, self.Input)
	rows, cols := self.Weight.Dims()
	weight := self.Weight.View(0, 0, rows-1, cols).(*mat64.Dense)
	if *learningConfiguration.Decay > 0 {
		var decay mat64.Dense
		decay.Scale(*learningConfiguration.Decay, weight)
		deltas.Sub(&deltas, decay.T())
	}
	deltas.Scale(*learningConfiguration.Rate, &deltas)
	weight.Sub(weight, deltas.T())
}
Exemplo n.º 3
0
Arquivo: nmf.go Projeto: postfix/nmf
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
	H = new(mat64.Dense)
	H.Clone(Ho)

	var WtV, WtW mat64.Dense
	WtV.Mul(W.T(), V)
	WtW.Mul(W.T(), W)

	alpha, beta := 1., 0.1

	decFilt := func(r, c int, v float64) float64 {
		// decFilt is applied to G, so v = G.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	G = new(mat64.Dense)
	for i = 0; i < outer; i++ {
		G.Mul(&WtW, H)
		G.Sub(G, &WtV)
		G.Apply(decFilt, G)

		if mat64.Norm(G, 2) < tol {
			break
		}

		var (
			reduce bool
			Hp     *mat64.Dense
			d, dQ  mat64.Dense
		)
		for j := 0; j < inner; j++ {
			var Hn mat64.Dense
			Hn.Scale(alpha, G)
			Hn.Sub(H, &Hn)
			Hn.Apply(posFilt, &Hn)

			d.Sub(&Hn, H)
			dQ.Mul(&WtW, &d)
			dQ.MulElem(&dQ, &d)
			d.MulElem(G, &d)

			sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0

			if j == 0 {
				reduce = !sufficient
				Hp = H
			}
			if reduce {
				if sufficient {
					H = &Hn
					ok = true
					break
				} else {
					alpha *= beta
				}
			} else {
				if !sufficient || mat64.Equal(Hp, &Hn) {
					H = Hp
					break
				} else {
					alpha /= beta
					Hp = &Hn
				}
			}
		}
	}

	return H, G, i, ok
}
Exemplo n.º 4
0
func (self *Layer) BackwardOutput(values *mat64.Dense,
	error_function ErrorFunction) {
	// TODO(ariw): ErrorFunction Delta use goes here.
	values.Sub(self.Output, values)
	self.Deltas.MulElem(values.T(), self.Derivatives)
}