Пример #1
0
func (g *gradientIterator) optimalStep(d linalg.Vector) float64 {
	// The optimal step size is (d'*b - c'*A*d)/(d'*A*d)
	// where d is the direction, A is the matrix, x is
	// the current approximate solution, and b is all 1's.

	dMat := &linalg.Matrix{
		Rows: len(d),
		Cols: 1,
		Data: d,
	}
	ad := linalg.Vector(g.matrix.Mul(dMat).Data)

	summer := kahan.NewSummer64()
	for _, x := range d {
		summer.Add(x)
	}

	numerator := summer.Sum() - g.solution.Dot(ad)
	denominator := d.Dot(ad)

	return numerator / denominator
}
Пример #2
0
// OptimalStep returns the value a which minimizes
// the squared distance between y and x0+a*x.
func (_ SquareLoss) OptimalStep(x0, x, y linalg.Vector) float64 {
	return (x.Dot(y) - x.Dot(x0)) / x.Dot(x)
}