コード例 #1
0
func ExampleLocal() {
	p := optimize.Problem{
		Func: functions.ExtendedRosenbrock{}.Func,
		Grad: functions.ExtendedRosenbrock{}.Grad,
	}

	x := []float64{1.3, 0.7, 0.8, 1.9, 1.2}
	settings := optimize.DefaultSettings()
	settings.Recorder = nil
	settings.GradientThreshold = 1e-12
	settings.FunctionConverge = nil

	result, err := optimize.Local(p, x, settings, &optimize.BFGS{})
	if err != nil {
		log.Fatal(err)
	}
	if err = result.Status.Err(); err != nil {
		log.Fatal(err)
	}
	fmt.Printf("result.Status: %v\n", result.Status)
	fmt.Printf("result.X: %v\n", result.X)
	fmt.Printf("result.F: %v\n", result.F)
	fmt.Printf("result.Stats.FuncEvaluations: %d\n", result.Stats.FuncEvaluations)
	// Output:
	// result.Status: GradientThreshold
	// result.X: [1 1 1 1 1]
	// result.F: 0
	// result.Stats.FuncEvaluations: 35
}
コード例 #2
0
ファイル: gp.go プロジェクト: btracey/gaussproc
// Train sets the paramters of the gaussian process. If noise == true,
// the noise parameter is adjusted, otherwise it is not.
// TODO(btracey): Need to implement barrier method for parameters. Steps get crazy.
func (g *GP) Train(trainNoise bool) error {
	// TODO(btracey): Implement a memory struct that can be passed around with
	// all of this data.

	initHyper := g.kernel.Hyper(nil)
	nKerHyper := len(initHyper)
	if trainNoise {
		initHyper = append(initHyper, math.Log(g.noise))
	}

	mem := newMargLikeMemory(len(initHyper), len(g.outputs))

	f := func(x []float64) float64 {
		fmt.Println("x =", x)
		obj := g.marginalLikelihood(x, trainNoise, mem)
		fmt.Println("obj =", obj)
		return obj
	}
	df := func(x, grad []float64) {
		g.marginalLikelihoodDerivative(x, grad, trainNoise, mem)
		fmt.Println("x = ", x)
		fmt.Println("grad = ", grad)
	}

	//	grad =  [0.4500442759224154 -3.074041876494095 0.42568788880060204]
	/*
		x := []float64{0.7287793210009457, -0.9371471942974932, -14.017213937483529}
		fofx := f(x)
		fmt.Println("fofx", fofx)

		set := fd.DefaultSettings()
		set.Method.Step = 1e-4
		fdGrad := fd.Gradient(nil, f, x, nil)
		fmt.Println("fd grad = ", fdGrad)
		grad := make([]float64, len(fdGrad))
		df(x, grad)
		fmt.Println("real grad = ", grad)
		os.Exit(1)
	*/

	problem := optimize.Problem{
		Func: f,
		Grad: df,
	}
	settings := optimize.DefaultSettings()
	settings.GradientThreshold = 1e-4
	result, err := optimize.Local(problem, initHyper, settings, nil)
	// set noise
	g.noise = math.Exp(result.X[len(result.X)-1])
	g.kernel.SetHyper(result.X[:nKerHyper])
	g.setKernelMat(g.k, g.noise)
	ok := g.cholK.Factorize(g.k)
	if !ok {
		return errors.New("gp: final kernel matrix is not positive definite")
	}
	v := mat64.NewVector(len(g.outputs), g.outputs)
	g.sigInvY.SolveCholeskyVec(g.cholK, v)
	return err
}
コード例 #3
0
ファイル: testopt.go プロジェクト: jacobxk/talks
func main() {
	settings := optimize.DefaultSettings()
	settings.Recorder = nil
	settings.GradientThreshold = 1e-6
	f := Rastrigin{}
	x := []float64{9.50160783681757, 0.3523567525151421, -8.042810467718468, -9.320723586564494, 0.025196429450302205}
	result, err := optimize.Local(f, x, settings, &optimize.LBFGS{})
	if err != nil {
		log.Fatal(err)
	}
	fmt.Println(result.F)
}
コード例 #4
0
ファイル: restart_gradientopt.go プロジェクト: jacobxk/talks
func worker(f optimize.Function, locs chan []float64, minima chan float64) {
	for x := range locs {
		settings := optimize.DefaultSettings()
		settings.Recorder = nil
		settings.GradientThreshold = 1e-4
		result, err := optimize.Local(f, x, settings, &optimize.LBFGS{})
		if err != nil {
			minima <- math.Inf(1)
		} else {
			minima <- result.F
		}
	}
}