Пример #1
0
func (solver *LbfgsbSolver) Solve(problem *optimization.Problem, x optimization.Point) (optimization.Point, float64) {
	optimizer := new(lbfgsb.Lbfgsb).SetFTolerance(1e-10).SetGTolerance(1e-10)
	point := optimization.VectorToDense(x)
	optimizer.SetLogger(func(info *lbfgsb.OptimizationIterationInformation) {
		if (info.Iteration-1)%10 == 0 {
			solver.Log(1000, info.Header())
		}
		solver.Log(1000, info.String())
	})
	objective := lbfgsb.GeneralObjectiveFunction{
		Function: func(p []float64) float64 {
			y := problem.Value(optimization.VectorDensePoint(p))
			return y
		},
		Gradient: func(p []float64) []float64 {
			g := problem.Gradient(optimization.VectorDensePoint(p))
			return optimization.VectorToDense(g)
		},
	}
	xfg, status := optimizer.Minimize(objective, point)
	stats := optimizer.OptimizationStatistics()
	log.Printf("stats: iters: %v; F evals: %v; G evals: %v", stats.Iterations, stats.FunctionEvaluations, stats.GradientEvaluations)
	log.Printf("status: %v", status)
	x = optimization.VectorDensePoint(xfg.X)
	if xfg.F != problem.Value(x) {
		log.Printf("error of value, %v != %v", xfg.F, problem.Value(x))
	}
	return x, xfg.F
}
Пример #2
0
func main() {
	flag.Parse()
	A := make([]float64, *row**col)
	x := make([]float64, *col)
	for i := 0; i < len(A); i++ {
		A[i] = rand.Float64() * 10
	}
	for i := 0; i < len(x); i++ {
		x[i] = rand.Float64() * 10
	}
	b := mv(A, x)
	log.Printf("perfect solution at %v", x)
	log.Printf("perfect value %v", opt_func(A, b, optimization.VectorDensePoint(x)))
	log.Printf("perfect gradient %v", opt_grad(A, b, optimization.VectorDensePoint(x)).String())

	pd := make([]float64, *col)
	for i := 0; i < *col; i++ {
		pd[i] = rand.Float64()
	}
	p := optimization.VectorDensePoint(pd)
	log.Printf("init solution at %v", pd)
	test_solver(A, b, p, "lm_bfgs", &optimization.LmBFGSSolver{})
	test_solver(A, b, p, "gradient", &optimization.GradientDescentSolver{})
	test_solver(A, b, p, "conjugate", &optimization.ConjugateGradientSolver{})
	test_solver(A, b, p, "lbfgsb", &LbfgsbSolver{})
}
Пример #3
0
func OptimizeWeights(init_weights []float64, all_labels [][]float64) []float64 {
	log.Printf("optimize ...\n")
	var solver optimization.Solver
	if *solver_name == "gradient" {
		solver = &optimization.GradientDescentSolver{}
	}
	if *solver_name == "conjugate" {
		solver = &optimization.GradientDescentSolver{}
	}
	if solver == nil {
		solver = &optimization.LmBFGSSolver{}
	}
	solver.Init(map[string]interface{}{
		"MaxIter": *max_iter,
		"LogFunc": func(level int, message string) {
			log.Printf("solver[level=%v]:%v", level, message)
		},
	})
	problem := &optimization.Problem{
		ValueAndGradientFunc: func(p optimization.Point) (float64, optimization.Point) { return opt_func_grad(p, all_labels) },
	}
	m, v := solver.Solve(problem, optimization.VectorDensePoint(init_weights))
	log.Printf("solver min value %v #f=%v #g=%v at %v\n", v, problem.NumValue, problem.NumGradient, m.String())
	weights := optimization.VectorToDense(m)
	normalize(weights)
	return weights
}
Пример #4
0
func main() {
	x := []float64{11.0, 10.0}
	log.Printf("init solution at %v\n", x)
	p := optimization.VectorDensePoint(x)
	log.Printf("perfect solution at %v\n", []float64{1.0, 1.0})
	test_solver(p, "lm_bfgs", &optimization.LmBFGSSolver{})
	test_solver(p, "gradient", &optimization.GradientDescentSolver{})
	test_solver(p, "conjugate", &optimization.ConjugateGradientSolver{})
	test_solver(p, "lbfgsb", &LbfgsbSolver{})
}
Пример #5
0
func opt_grad(A []float64, b []float64, v optimization.Point) optimization.Point {
	r := mv(A, optimization.VectorToDense(v))
	for i := 0; i < len(r); i++ {
		r[i] *= v.Factor
		r[i] -= b[i]
	}
	gd := mtv(A, r)
	g := optimization.VectorDensePoint(gd).Scale(2.0)
	// log.Printf("caled grad(%s) = %s\n", v.String(), g.String())
	return g
}
Пример #6
0
func opt_grad(a optimization.Point) optimization.Point {
	vs := optimization.VectorToDense(a)
	gradient := make([]float64, len(vs))
	gradient[0] = -400.0*a.Factor*vs[0]*(a.Factor*vs[1]-square(a.Factor*vs[0])) -
		2.0*(1.0-a.Factor*vs[0])
	var i int
	for i = 1; i < len(vs)-1; i++ {
		gradient[i] = -400.0*a.Factor*vs[i]*(a.Factor*vs[i+1]-square(a.Factor*vs[i])) -
			2.0*(1.0-101.0*a.Factor*vs[i]+100.0*square(a.Factor*vs[i-1]))
	}
	gradient[i] = 200.0 * (a.Factor*vs[i] - square(a.Factor*vs[i-1]))
	return optimization.VectorDensePoint(gradient)
}
Пример #7
0
func opt_func_grad(p optimization.Point, all_labels [][]float64) (float64, optimization.Point) {
	weights := optimization.VectorToDense(p)
	grads := make([]float64, len(weights))
	f := 0.0
	if *lost == "exp" {
		// e(-y)
		for _, labels := range all_labels {
			if len(labels) != len(weights)+1 {
				log.Fatalf("# label(%v) != # weight(%v) + 1", len(labels), len(weights))
			}
			s := 0.0
			for j, w := range weights {
				s += w * labels[j+1]
			}
			y := s * labels[0]
			var emy float64
			if y >= -100 {
				emy = math.Exp(-y)
			} else {
				emy = math.Exp(100)
			}
			f += emy
			for j, _ := range weights {
				grads[j] += -emy * labels[0] * labels[j+1]
			}
		}
	} else {
		for _, labels := range all_labels {
			if len(labels) != len(weights)+1 {
				log.Fatalf("# label(%v) != # weight(%v) + 1", len(labels), len(weights))
			}
			s := 0.0
			for j, w := range weights {
				s += w * labels[j+1]
			}
			y := s * labels[0]
			// log(1 + e(-y))
			if y > 0 {
				emy := math.Exp(-y)
				f += math.Log(1 + emy)
				for j, _ := range weights {
					grads[j] += -labels[0] * emy / (1 + emy) * labels[j+1]
				}
			} else {
				ey := math.Exp(y)
				f += -y + math.Log(1+ey)
				for j, _ := range weights {
					grads[j] += -labels[0] / (ey + 1) * labels[j+1]
				}
			}
		}
	}
	for j, _ := range grads {
		grads[j] /= float64(len(all_labels))
	}
	f /= float64(len(all_labels))
	if *regular2 != 0.0 {
		s := 0.0
		for j, w := range weights {
			s += w * w
			grads[j] += 2 * *regular2 * w
		}
		f += *regular2 * s
	}
	return f, optimization.VectorDensePoint(grads)
}