func (solver *LbfgsbSolver) Solve(problem *optimization.Problem, x optimization.Point) (optimization.Point, float64) {
	optimizer := new(lbfgsb.Lbfgsb).SetFTolerance(1e-10).SetGTolerance(1e-10)
	point := optimization.VectorToDense(x)
	optimizer.SetLogger(func(info *lbfgsb.OptimizationIterationInformation) {
		if (info.Iteration-1)%10 == 0 {
			solver.Log(1000, info.Header())
		}
		solver.Log(1000, info.String())
	})
	objective := lbfgsb.GeneralObjectiveFunction{
		Function: func(p []float64) float64 {
			y := problem.Value(optimization.VectorDensePoint(p))
			return y
		},
		Gradient: func(p []float64) []float64 {
			g := problem.Gradient(optimization.VectorDensePoint(p))
			return optimization.VectorToDense(g)
		},
	}
	xfg, status := optimizer.Minimize(objective, point)
	stats := optimizer.OptimizationStatistics()
	log.Printf("stats: iters: %v; F evals: %v; G evals: %v", stats.Iterations, stats.FunctionEvaluations, stats.GradientEvaluations)
	log.Printf("status: %v", status)
	x = optimization.VectorDensePoint(xfg.X)
	if xfg.F != problem.Value(x) {
		log.Printf("error of value, %v != %v", xfg.F, problem.Value(x))
	}
	return x, xfg.F
}
Exemple #2
0
func OptimizeWeights(init_weights []float64, all_labels [][]float64) []float64 {
	log.Printf("optimize ...\n")
	var solver optimization.Solver
	if *solver_name == "gradient" {
		solver = &optimization.GradientDescentSolver{}
	}
	if *solver_name == "conjugate" {
		solver = &optimization.GradientDescentSolver{}
	}
	if solver == nil {
		solver = &optimization.LmBFGSSolver{}
	}
	solver.Init(map[string]interface{}{
		"MaxIter": *max_iter,
		"LogFunc": func(level int, message string) {
			log.Printf("solver[level=%v]:%v", level, message)
		},
	})
	problem := &optimization.Problem{
		ValueAndGradientFunc: func(p optimization.Point) (float64, optimization.Point) { return opt_func_grad(p, all_labels) },
	}
	m, v := solver.Solve(problem, optimization.VectorDensePoint(init_weights))
	log.Printf("solver min value %v #f=%v #g=%v at %v\n", v, problem.NumValue, problem.NumGradient, m.String())
	weights := optimization.VectorToDense(m)
	normalize(weights)
	return weights
}
func opt_func(a optimization.Point) float64 {
	value := 0.0
	vs := optimization.VectorToDense(a)
	for i := 0; i < len(vs)-1; i++ {
		value += square(1.0-a.Factor*vs[i]) +
			100.0*square(a.Factor*vs[i+1]-square(a.Factor*vs[i]))
	}
	return value
}
Exemple #4
0
func opt_grad(A []float64, b []float64, v optimization.Point) optimization.Point {
	r := mv(A, optimization.VectorToDense(v))
	for i := 0; i < len(r); i++ {
		r[i] *= v.Factor
		r[i] -= b[i]
	}
	gd := mtv(A, r)
	g := optimization.VectorDensePoint(gd).Scale(2.0)
	// log.Printf("caled grad(%s) = %s\n", v.String(), g.String())
	return g
}
func opt_grad(a optimization.Point) optimization.Point {
	vs := optimization.VectorToDense(a)
	gradient := make([]float64, len(vs))
	gradient[0] = -400.0*a.Factor*vs[0]*(a.Factor*vs[1]-square(a.Factor*vs[0])) -
		2.0*(1.0-a.Factor*vs[0])
	var i int
	for i = 1; i < len(vs)-1; i++ {
		gradient[i] = -400.0*a.Factor*vs[i]*(a.Factor*vs[i+1]-square(a.Factor*vs[i])) -
			2.0*(1.0-101.0*a.Factor*vs[i]+100.0*square(a.Factor*vs[i-1]))
	}
	gradient[i] = 200.0 * (a.Factor*vs[i] - square(a.Factor*vs[i-1]))
	return optimization.VectorDensePoint(gradient)
}
Exemple #6
0
func opt_func(A []float64, b []float64, v optimization.Point) float64 {
	if len(A) != *row**col || len(b) != *row {
		log.Fatalf("invalid size row=%v col=%v len(A)=%v len(b)=%v", row, col, len(A), len(b))
	}
	r := mv(A, optimization.VectorToDense(v))
	s := 0.0
	for i := 0; i < len(r); i++ {
		r[i] *= v.Factor
		r[i] -= b[i]
		s += square(r[i])
	}
	// log.Printf("caled func(%s) = %f\n", v.String(), s)
	return s
}
Exemple #7
0
func opt_func_grad(p optimization.Point, all_labels [][]float64) (float64, optimization.Point) {
	weights := optimization.VectorToDense(p)
	grads := make([]float64, len(weights))
	f := 0.0
	if *lost == "exp" {
		// e(-y)
		for _, labels := range all_labels {
			if len(labels) != len(weights)+1 {
				log.Fatalf("# label(%v) != # weight(%v) + 1", len(labels), len(weights))
			}
			s := 0.0
			for j, w := range weights {
				s += w * labels[j+1]
			}
			y := s * labels[0]
			var emy float64
			if y >= -100 {
				emy = math.Exp(-y)
			} else {
				emy = math.Exp(100)
			}
			f += emy
			for j, _ := range weights {
				grads[j] += -emy * labels[0] * labels[j+1]
			}
		}
	} else {
		for _, labels := range all_labels {
			if len(labels) != len(weights)+1 {
				log.Fatalf("# label(%v) != # weight(%v) + 1", len(labels), len(weights))
			}
			s := 0.0
			for j, w := range weights {
				s += w * labels[j+1]
			}
			y := s * labels[0]
			// log(1 + e(-y))
			if y > 0 {
				emy := math.Exp(-y)
				f += math.Log(1 + emy)
				for j, _ := range weights {
					grads[j] += -labels[0] * emy / (1 + emy) * labels[j+1]
				}
			} else {
				ey := math.Exp(y)
				f += -y + math.Log(1+ey)
				for j, _ := range weights {
					grads[j] += -labels[0] / (ey + 1) * labels[j+1]
				}
			}
		}
	}
	for j, _ := range grads {
		grads[j] /= float64(len(all_labels))
	}
	f /= float64(len(all_labels))
	if *regular2 != 0.0 {
		s := 0.0
		for j, w := range weights {
			s += w * w
			grads[j] += 2 * *regular2 * w
		}
		f += *regular2 * s
	}
	return f, optimization.VectorDensePoint(grads)
}