Exemple #1
0
func (self *Layer) Update(learningConfiguration LearningConfiguration) {
	var deltas mat64.Dense
	deltas.Mul(self.Deltas, self.Input)
	rows, cols := self.Weight.Dims()
	weight := self.Weight.View(0, 0, rows-1, cols).(*mat64.Dense)
	if *learningConfiguration.Decay > 0 {
		var decay mat64.Dense
		decay.Scale(*learningConfiguration.Decay, weight)
		deltas.Sub(&deltas, decay.T())
	}
	deltas.Scale(*learningConfiguration.Rate, &deltas)
	weight.Sub(weight, deltas.T())
}
Exemple #2
0
// CovarianceMatrix calculates a covariance matrix (also known as a
// variance-covariance matrix) from a matrix of data, using a two-pass
// algorithm. The matrix returned will be symmetric and square.
//
// The weights wts should have the length equal to the number of rows in
// input data matrix x. If c is nil, then a new matrix with appropriate size will
// be constructed.  If c is not nil, it should be a square matrix with the same
// number of columns as the input data matrix x, and it will be used as the receiver
// for the covariance data.  Weights cannot be negative.
func CovarianceMatrix(cov *mat64.Dense, x mat64.Matrix, wts []float64) *mat64.Dense {
	// This is the matrix version of the two-pass algorithm. It doesn't use the
	// additional floating point error correction that the Covariance function uses
	// to reduce the impact of rounding during centering.

	// TODO(jonlawlor): indicate that the resulting matrix is symmetric, and change
	// the returned type from a *mat.Dense to a *mat.Symmetric.

	r, c := x.Dims()

	if cov == nil {
		cov = mat64.NewDense(c, c, nil)
	} else if covr, covc := cov.Dims(); covr != covc || covc != c {
		panic(mat64.ErrShape)
	}

	var xt mat64.Dense
	xt.TCopy(x)
	// Subtract the mean of each of the columns.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		// This will panic with ErrShape if len(wts) != len(v), so
		// we don't have to check the size later.
		mean := Mean(v, wts)
		floats.AddConst(-mean, v)
	}

	var n float64
	if wts == nil {

		n = float64(r)

		cov.MulTrans(&xt, false, &xt, true)

		// Scale by the sample size.
		cov.Scale(1/(n-1), cov)
		return cov
	}

	// Multiply by the sqrt of the weights, so that multiplication is symmetric.
	sqrtwts := make([]float64, r)
	for i, w := range wts {
		if w < 0 {
			panic("stat: negative covariance matrix weights")
		}
		sqrtwts[i] = math.Sqrt(w)
	}
	// Weight the rows.
	for i := 0; i < c; i++ {
		v := xt.RawRowView(i)
		floats.Mul(v, sqrtwts)
	}

	// Calculate the normalization factor.
	n = floats.Sum(wts)
	cov.MulTrans(&xt, false, &xt, true)

	// Scale by the sample size.
	cov.Scale(1/(n-1), cov)
	return cov
}
Exemple #3
0
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
	H = new(mat64.Dense)
	H.Clone(Ho)

	var WtV, WtW mat64.Dense
	WtV.Mul(W.T(), V)
	WtW.Mul(W.T(), W)

	alpha, beta := 1., 0.1

	decFilt := func(r, c int, v float64) float64 {
		// decFilt is applied to G, so v = G.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	G = new(mat64.Dense)
	for i = 0; i < outer; i++ {
		G.Mul(&WtW, H)
		G.Sub(G, &WtV)
		G.Apply(decFilt, G)

		if mat64.Norm(G, 2) < tol {
			break
		}

		var (
			reduce bool
			Hp     *mat64.Dense
			d, dQ  mat64.Dense
		)
		for j := 0; j < inner; j++ {
			var Hn mat64.Dense
			Hn.Scale(alpha, G)
			Hn.Sub(H, &Hn)
			Hn.Apply(posFilt, &Hn)

			d.Sub(&Hn, H)
			dQ.Mul(&WtW, &d)
			dQ.MulElem(&dQ, &d)
			d.MulElem(G, &d)

			sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0

			if j == 0 {
				reduce = !sufficient
				Hp = H
			}
			if reduce {
				if sufficient {
					H = &Hn
					ok = true
					break
				} else {
					alpha *= beta
				}
			} else {
				if !sufficient || mat64.Equal(Hp, &Hn) {
					H = Hp
					break
				} else {
					alpha /= beta
					Hp = &Hn
				}
			}
		}
	}

	return H, G, i, ok
}