Esempio n. 1
0
// Loss returns the weighted exponential loss.
// It determines which samples are positive vs. negative
// by checking the sign of the element in the expected
// vector.
func (w *WeightedExpLoss) Loss(actual autofunc.Result, expected linalg.Vector) autofunc.Result {
	expVar := &autofunc.Variable{Vector: expected.Copy().Scale(-1)}
	dots := autofunc.Mul(actual, expVar)
	exps := autofunc.Exp{}.Apply(dots)

	weightVec := &autofunc.Variable{Vector: make(linalg.Vector, len(expected))}
	for i, x := range expected {
		if x > 0 {
			weightVec.Vector[i] = w.PosWeight
		} else {
			weightVec.Vector[i] = 1
		}
	}

	return autofunc.SumAll(autofunc.Mul(exps, weightVec))
}
Esempio n. 2
0
func (s *SumClassifier) Classify(list SampleList) linalg.Vector {
	if len(s.Classifiers) == 0 {
		return make(linalg.Vector, list.Len())
	} else if len(s.Classifiers) != len(s.Weights) {
		panic("classifier count must match weight count")
	}
	var res linalg.Vector
	for i, c := range s.Classifiers {
		w := s.Weights[i]
		if res == nil {
			res = c.Classify(list).Scale(w)
		} else {
			res.Add(c.Classify(list).Scale(w))
		}
	}
	return res
}
Esempio n. 3
0
func (s *stateOutBlockRResult) PropagateRGradient(u, uR []linalg.Vector, su []RStateGrad,
	rg autofunc.RGradient, g autofunc.Gradient) []RStateGrad {
	downstream := make([]RStateGrad, len(s.WrappedOut.Outputs()))
	for i := range s.WrappedOut.Outputs() {
		var vec, vecR linalg.Vector
		if u != nil {
			vec = u[i].Copy()
			vecR = uR[i].Copy()
		}
		if su != nil && su[i] != nil {
			sVec := su[i].(VecRStateGrad)
			if vec == nil {
				vec = sVec.State.Copy()
				vecR = sVec.RState.Copy()
			} else {
				vec.Add(sVec.State)
				vecR.Add(sVec.RState)
			}
		}
		if vec != nil {
			downstream[i] = VecRStateGrad{State: vec, RState: vecR}
		}
	}
	return s.WrappedOut.PropagateRGradient(nil, nil, downstream, rg, g)
}
Esempio n. 4
0
func (g *gradientIterator) optimalStep(d linalg.Vector) float64 {
	// The optimal step size is (d'*b - c'*A*d)/(d'*A*d)
	// where d is the direction, A is the matrix, x is
	// the current approximate solution, and b is all 1's.

	dMat := &linalg.Matrix{
		Rows: len(d),
		Cols: 1,
		Data: d,
	}
	ad := linalg.Vector(g.matrix.Mul(dMat).Data)

	summer := kahan.NewSummer64()
	for _, x := range d {
		summer.Add(x)
	}

	numerator := summer.Sum() - g.solution.Dot(ad)
	denominator := d.Dot(ad)

	return numerator / denominator
}
Esempio n. 5
0
func (s *stateOutBlockResult) PropagateGradient(u []linalg.Vector, su []StateGrad,
	g autofunc.Gradient) []StateGrad {
	downstream := make([]StateGrad, len(s.WrappedOut.Outputs()))
	for i := range s.WrappedOut.Outputs() {
		var vec linalg.Vector
		if u != nil {
			vec = u[i].Copy()
		}
		if su != nil && su[i] != nil {
			sVec := su[i].(VecStateGrad)
			if vec == nil {
				vec = linalg.Vector(sVec).Copy()
			} else {
				vec.Add(linalg.Vector(sVec))
			}
		}
		if vec != nil {
			downstream[i] = VecStateGrad(vec)
		}
	}
	return s.WrappedOut.PropagateGradient(nil, downstream, g)
}
Esempio n. 6
0
// Step adds d.Scale(amount) to coeffs.
// If any of the entries in coeffs hits a
// constraint, then the step is stopped
// short and true is returned to indicate
// that a new constraint has been added.
//
// This may modify d in any way it pleases.
func (a *activeSet) Step(coeffs, d linalg.Vector, amount float64) bool {
	var maxStep, minStep float64
	var maxIdx, minIdx int
	isFirst := true
	for i, x := range d {
		if x == 0 {
			continue
		}
		coeff := coeffs[i]
		maxValue := (a.MaxCoeff - coeff) / x
		minValue := -coeff / x
		if x < 0 {
			maxValue, minValue = minValue, maxValue
		}
		if isFirst {
			isFirst = false
			minStep, maxStep = minValue, maxValue
			maxIdx, minIdx = i, i
		} else {
			if minValue > minStep {
				minStep = minValue
				minIdx = i
			}
			if maxValue < maxStep {
				maxStep = maxValue
				maxIdx = i
			}
		}
	}

	if isFirst {
		return false
	}

	if amount < minStep {
		coeffs.Add(d.Scale(minStep))
		a.addConstraint(coeffs, minIdx)
	} else if amount > maxStep {
		coeffs.Add(d.Scale(maxStep))
		a.addConstraint(coeffs, maxIdx)
	} else {
		coeffs.Add(d.Scale(amount))
		return false
	}
	return true
}
Esempio n. 7
0
// Loss returns the exponential loss, as given by
// exp(-actual*expected).
func (_ ExpLoss) Loss(actual autofunc.Result, expected linalg.Vector) autofunc.Result {
	expVar := &autofunc.Variable{Vector: expected.Copy().Scale(-1)}
	dots := autofunc.Mul(actual, expVar)
	exps := autofunc.Exp{}.Apply(dots)
	return autofunc.SumAll(exps)
}
Esempio n. 8
0
// OptimalStep returns the value a which minimizes
// the squared distance between y and x0+a*x.
func (_ SquareLoss) OptimalStep(x0, x, y linalg.Vector) float64 {
	return (x.Dot(y) - x.Dot(x0)) / x.Dot(x)
}
Esempio n. 9
0
// Loss returns the squared magnitude of the difference
// between actual and expected.
func (_ SquareLoss) Loss(actual autofunc.Result, expected linalg.Vector) autofunc.Result {
	expVar := &autofunc.Variable{Vector: expected.Copy().Scale(-1)}
	return autofunc.SumAll(autofunc.Square(autofunc.Add(actual, expVar)))
}
Esempio n. 10
0
func (_ MeanSquaredCost) CostR(v autofunc.RVector, a linalg.Vector,
	x autofunc.RResult) autofunc.RResult {
	aVar := &autofunc.Variable{a.Copy().Scale(-1)}
	aVarR := autofunc.NewRVariable(aVar, v)
	return autofunc.SquaredNorm{}.ApplyR(v, autofunc.AddR(aVarR, x))
}