Example #1
0
func main() {
	g := GA.New()

	N := len(dataPoint) / 2

	// is this "metric" good in this case? could we leverage the knowledge of how classical
	// more efficient fitting strategies work to choose a better fitness func?
	g.CalcFitnessFunc = func(m GA.Gene) float64 {
		c1 := m[0].(float64)
		lam1 := m[1].(float64)
		c2 := m[2].(float64)
		lam2 := m[3].(float64)
		s := 0.0
		for i := 0; i < N; i++ {
			d := fitFunc(dataPoint[2*i], c1, lam1, c2, lam2) - dataPoint[2*i+1]
			s += d * d
		}
		return s
	}

	// the crossover is given by the value in the middle (is it the best idea in this case?)
	g.CrossOverFunc = func(a GA.Gene, b GA.Gene) GA.Gene {
		var c1, c2, lam1, lam2 [2]float64
		c1[0] = a[0].(float64)
		lam1[0] = a[1].(float64)
		c2[0] = a[2].(float64)
		lam2[0] = a[3].(float64)
		c1[1] = b[0].(float64)
		lam1[1] = b[1].(float64)
		c2[1] = b[2].(float64)
		lam2[1] = b[3].(float64)
		return GA.Gene{
			(c1[0] + c1[1]) / 2.0,
			(lam1[0] + lam1[1]) / 2.0,
			(c2[0] + c2[1]) / 2.0,
			(lam2[0] + lam2[1]) / 2.0,
		}
	}

	// mutate a "gene"; we should adjust the function to make the fit "converge" faster
	g.MutationFunc = func(a GA.Gene) GA.Gene {
		f := [4]float64{a[0].(float64), a[1].(float64), a[2].(float64), a[3].(float64)}
		mutationProb := []float64{0.5, 0.5, 0.5, 0.5}
		for i, v := range mutationProb {
			p := rand.Float64()
			if p < v {
				f[i] += (2.0*rand.Float64() - 1.0) / 10.0
			}
		}
		return GA.Gene{f[0], f[1], f[2], f[3]}
	}

	// generate initial genes; w should be chosen so that
	// expected values are in (-w, w)
	g.ProduceGeneFunc = func() GA.Gene {
		w := 20.0 // the greater the value, the lesser we assume about the correct values
		c1 := 2.0*rand.Float64() - 1.0
		lam1 := 2.0*rand.Float64() - 1.0
		c2 := 2.0*rand.Float64() - 1.0
		lam2 := 2.0*rand.Float64() - 1.0
		return GA.Gene{
			c1 * w, lam1 * w, c2 * w, lam2 * w,
		}
	}

	g.Populate(100)
	g.Evolve(1500)
	r := g.Winner()

	// I obtain
	// [2.9054365850536343 1.4071391340733141 2.9903330149276592 10.678580361408077]
	// compare with 2.8891, 1.4003, 3.0068, 10.5869
	// though the values are reached after fewer iterations (6, 30 ...) :-)
	// conclusions: GA is not good for fitting (when compared to classical methods) ...

	fmt.Fprintf(os.Stderr, "data = %v\n", r)

	fmt.Println("x real fitted")
	for i := 0; i < N; i++ {
		fmt.Printf("%.5f %.5f %.5f\n",
			dataPoint[2*i],
			dataPoint[2*i+1],
			fitFunc(dataPoint[2*i], r[0].(float64), r[1].(float64), r[2].(float64), r[3].(float64)))
	}
}
Example #2
0
func main() {
	flag.Parse()

	g := GA.New()

	// the crossover is given by the value in the middle betweet a_i and b_i
	g.CrossOverFunc = func(a GA.Gene, b GA.Gene) GA.Gene {
		v00 := a[0].(float64)
		v01 := a[1].(float64)
		v02 := a[2].(float64)
		v10 := b[0].(float64)
		v11 := b[1].(float64)
		v12 := b[2].(float64)
		return GA.Gene{(v00 + v10) / 2.0, (v01 + v11) / 2.0, (v02 + v12) / 2.0}
	}

	// mutate a "gene"
	g.MutationFunc = func(a GA.Gene) GA.Gene {
		f := [3]float64{a[0].(float64), a[1].(float64), a[2].(float64)}
		for i, v := range mutationProb {
			p := rand.Float64()
			if p < v {
				f[i%len(f)] += (2.0*rand.Float64() - 1.0) / 10.0
			}
		}
		return GA.Gene{f[0], f[1], f[2]}
	}

	// the generic function we use as fit function
	theFunc := func(x, a, b, c float64) float64 {
		return a*x*x + b*x + c
	}

	// this is used to generate the values; we could (should?)
	// replace it with discrete data points;
	// because of perturbation, let's compute points only once
	fakeData := make(map[float64]float64)

	for x := *minVal; x < *maxVal; x += *step {
		fakeData[x] = theFunc(x, gfuncA, gfuncB, gfuncC) + *perturbation*(2.0*rand.Float64()-1.0)
	}
	actualFunc := func(x float64) float64 {
		if val, ok := fakeData[x]; ok {
			return val
		} else {
			if x > *maxVal || x < *minVal {
				return 0.0 // outside there are no data
			}
			// otherwise x is between defined values: we should find them and interpolate?
			// TODO ...
			// for now this won't happen, so just compute a new value, if we're going to really use it
			return theFunc(x, gfuncA, gfuncB, gfuncC) + *perturbation*(2.0*rand.Float64()-1.0)
		}
	}

	// fitness given by the avg square root distance between each point
	g.CalcFitnessFunc = func(m GA.Gene) float64 {
		c2 := m[0].(float64)
		c1 := m[1].(float64)
		c0 := m[2].(float64)
		s := 0.0
		for x := *minVal; x < *maxVal; x += *step {
			d := actualFunc(x) - theFunc(x, c2, c1, c0)
			s += d * d
		}
		return math.Sqrt(s) / ((*maxVal - *minVal) / *step)
	}

	// generate initial genes; w should be chosen so that
	// expected values are in (-w, w)
	g.ProduceGeneFunc = func() GA.Gene {
		w := *absRange // the greater the value, the lesser we assume about the correct values
		a := 2.0*rand.Float64() - 1.0
		b := 2.0*rand.Float64() - 1.0
		c := 2.0*rand.Float64() - 1.0
		return GA.Gene{
			a * w, b * w, c * w,
		}
	}

	g.Populate(*initialPop)
	g.Evolve(*iterations)
	res := g.Winner()

	if *printWinner {
		fmt.Printf("data = %v\nfitness = %v\n", res, g.CalcFitnessFunc(res))
	}

	if *dumpValues {
		fmt.Println("x real fitted")
		for x := *minVal; x < *maxVal; x += *dumpStep {
			fmt.Printf("%.5f %.5f %.5f\n", x, actualFunc(x), theFunc(x, res[0].(float64), res[1].(float64), res[2].(float64)))
		}
	}
}