示例#1
0
文件: logreg.go 项目: c4e8ece0/ml
// GradientRegularized returns the regularized gradient vector with respect to:
// the current sample wi
// the current target value:yi
// the current weights: WReg
//
func (lr *LogisticRegression) GradientRegularized(wi []float64, yi float64) ([]float64, error) {
	v := make([]float64, len(wi)+1)
	v[0] = yi
	for i, x := range wi {
		v[i+1] = yi * x
	}
	a := make([]float64, len(wi)+1)
	a[0] = 1
	for i := range wi {
		a[i+1] = wi[i]
	}
	b := make([]float64, len(lr.WReg))
	copy(b, lr.WReg)
	dot, err := ml.Vector(a).Dot(b)
	if err != nil {
		return nil, err
	}
	d := float64(1) + math.Exp(float64(yi)*dot)

	//vG = [-1.0 * x / d for x in vector] + lambda/N*Vector(wi)^2
	wi2, err := ml.Vector(wi).Dot(wi)
	if err != nil {
		log.Println("skiping regularizer step due to %v", err)
		wi2 = 1
	}
	reg := (lr.Lambda / float64(len(lr.WReg))) * wi2
	vg := make([]float64, len(v))
	for i := range v {
		vg[i] = (float64(-1) * v[i] / d) + reg
	}
	return vg, nil
}
示例#2
0
文件: logreg.go 项目: c4e8ece0/ml
// CrossEntropyError computes the cross entropy error
// given a sample X and its target, with respect to weight
// vector Wn based on formula:
// log(1 + exp(-y*sample*w))
//
func (lr *LogisticRegression) CrossEntropyError(sample []float64, Y float64) (float64, error) {
	dot, err := ml.Vector(sample).Dot(lr.Wn)
	if err != nil {
		return 0, err
	}
	return math.Log(float64(1) + math.Exp(-Y*dot)), nil
}
示例#3
0
文件: logreg.go 项目: c4e8ece0/ml
// EAugIn is the fraction of "in sample points" which got misclassified plus the term
// lambda / N * Sum(Wi^2)
// todo(santiaago): change this to use vector vector.
//
func (lr *LogisticRegression) EAugIn() float64 {

	gInSample := make([]float64, len(lr.Xn))
	for i := 0; i < len(lr.Xn); i++ {
		gi := float64(0)
		for j := 0; j < len(lr.Xn[0]); j++ {
			gi += lr.Xn[i][j] * lr.WReg[j]
		}
		gInSample[i] = ml.Sign(gi)
	}
	nEin := 0
	for i := 0; i < len(gInSample); i++ {
		if gInSample[i] != lr.Yn[i] {
			nEin++
		}
	}

	wi2, err := ml.Vector(lr.WReg).Dot(lr.WReg)
	if err != nil {
		log.Println("skiping regularizer step due to %v", err)
		wi2 = 1
	}
	reg := (lr.Lambda / float64(len(lr.WReg))) * wi2

	return float64(nEin)/float64(len(gInSample)) + reg
}
示例#4
0
文件: logreg.go 项目: c4e8ece0/ml
// Gradient returns the gradient vector with respect to:
// the current sample wi
// the current target value:yi
// the current weights: Wn
//
func (lr *LogisticRegression) Gradient(wi []float64, yi float64) ([]float64, error) {
	v := make([]float64, len(wi)+1)
	v[0] = yi
	for i, x := range wi {
		v[i+1] = yi * x
	}
	a := make([]float64, len(wi)+1)
	a[0] = 1
	for i := range wi {
		a[i+1] = wi[i]
	}
	b := make([]float64, len(lr.Wn))
	copy(b, lr.Wn)
	dot, err := ml.Vector(a).Dot(b)
	if err != nil {
		return nil, err
	}
	d := float64(1) + math.Exp(float64(yi)*dot)

	//vG = [-1.0 * x / d for x in vector]
	vg := make([]float64, len(v))
	for i := range v {
		vg[i] = float64(-1) * v[i] / d
	}
	return vg, nil
}
示例#5
0
文件: logreg.go 项目: c4e8ece0/ml
// ConvergedRegularized returns a boolean answer telling whether the old weight vector
// and the new vector have converted based on the epsilon value.
//
func (lr *LogisticRegression) ConvergedRegularized(wOld []float64) bool {
	diff := make([]float64, len(wOld))
	for i := range wOld {
		diff[i] = lr.WReg[i] - wOld[i]
	}
	norm, err := ml.Vector(diff).Norm()
	if err != nil {
		log.Println("forcing convergence as we fail to compute norm.")
		return true
	}
	return norm < lr.Epsilon
}
示例#6
0
文件: svm.go 项目: c4e8ece0/ml
// Learn will update the weight vector with the output of a svm algorithm,
// with respect of the training examples and labels (svm.Xn, svm.Yn).
//
// We use an implementation of the Mini-Batch Pegasos Algorithm.
//
// The implementation of the Mini-Batch Pegasos Algorithm is based on the work done by:
// Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan.
// Pegasos: Primal Estimated sub-GrAdient SOlver for SVM.
//
// Note: We might support other methods in the future like SMO, ...
//
func (svm *SVM) Learn() error {

	var err error
	svm.TrainingPoints = len(svm.Xn)
	svm.Wn = make([]float64, svm.VectorSize)

	for t := 0; t <= svm.T; t++ {

		// choose At where |At| = k, uniformly at random
		var At []int // vector of the selected indexes of size K
		for i := 0; i < svm.K; i++ {
			at := rand.Intn(svm.TrainingPoints)
			At = append(At, at)
		}

		// set At+ = {i in At : yi<wt, xi> < 1}
		var Atplus []int
		for _, i := range At {

			xi, yi := ml.Vector(svm.Xn[i]), svm.Yn[i]
			wt := ml.Vector(svm.Wn)
			var dot float64

			if dot, err = wt.Dot(xi); err != nil {
				return err
			}

			if yi*dot < 1 {
				Atplus = append(Atplus, i)
			}
		}

		// update eta = 1 / lambda*t
		svm.Eta = float64(1) / (svm.Lambda * float64(t+1))

		// set wt+1 = (1 - eta*lambda)*wt + eta/k * sum(for i in At+ of yixi)
		term1 := ml.Vector(svm.Wn).Scale(1 - svm.Eta*svm.Lambda)
		term2 := ml.Vector(make([]float64, svm.VectorSize))
		for _, i := range Atplus {
			xi, yi := ml.Vector(svm.Xn[i]), svm.Yn[i]
			xiyi := xi.Scale(yi)
			if term2, err = term2.Add(xiyi); err != nil {
				return err
			}
		}
		if svm.Wn, err = term1.Add(term2); err != nil {
			return err
		}

		// wt+1 = min{1, (1/sqrt(lambda))/||wt+1||} wt+1
		var norm float64
		if norm, err = ml.Vector(svm.Wn).Norm(); err != nil {
			return err
		}
		projection := float64(1) / (math.Sqrt(svm.Lambda) * norm)
		if 1 > projection {
			svm.Wn = ml.Vector(svm.Wn).Scale(projection)
		}
	}
	return nil
}
示例#7
0
文件: logreg.go 项目: c4e8ece0/ml
// Predict returns the result of the dot product between the x vector passed as param
// and the logistic regression vector of weights.
//
func (lr *LogisticRegression) Predict(x []float64) (float64, error) {
	if len(x) != len(lr.Wn) {
		return 0, fmt.Errorf("logreg.Predict, size of x and Wn vector are different")
	}
	return ml.Vector(x).Dot(lr.Wn)
}