Beispiel #1
0
// QuasiNewtonMin uses the Quasi-Newton method to carry out multi-dimensional minimisation
func QuasiNewtonMin(x0 matrix.Matrix, N int, f func(matrix.Matrix, int) float64) (x1 matrix.Matrix) {
	// Create G matrix, initially identity matrix
	G, _ := matrix.Identity(x0.Size().Y)

	// Central difference scheme to calculate gradient
	CDS := func(in matrix.Matrix) matrix.Matrix {
		res, _ := matrix.New(in.Size())
		var p, m float64
		res.Foreach(func(r, c int) {
			in[r][c] += 1e-8
			p = f(in, N)
			in[r][c] -= 2e-8
			m = f(in, N)
			in[r][c] += 1e-8
			res[r][c] = (p - m) / 2e-8
		})
		return res
	}

	a := 1.0
	r0 := CDS(x0)
	x1, _ = matrix.New(x0)
	var cond int
	var dx, ddx, tmp, tmp1, tmp2, r1, dr matrix.Matrix

	// Main minimisation loop
	for i := 0; i < 1000; i++ {

		// Determine a using Wolfe Conditions
		a, cond = 1.0, 0
		for {
			// tmp = p_k
			tmp, _ = G.Multiply(r0)
			tmp, _ = tmp.Multiply(-1)

			tmp1, _ = tmp.Multiply(a)
			tmp1, _ = x0.Add(tmp1)

			// Check last term only if condition not yet met
			if cond == 0 {
				tmp2, _ = tmp.Transpose().Multiply(r0)

				// Sufficient Decrease Condition
				if f(tmp1, N) <= f(x0, N)+a*1e-4*tmp2[0][0] {
					cond++
				}
			}

			// If first condition met, try second condition
			if cond > 0 {
				tmp1, _ = tmp.Transpose().Multiply(CDS(tmp1))
				tmp2, _ = tmp.Transpose().Multiply(r0)

				// Curvature Condition
				// fmt.Println(tmp1[0][0], 0.9*tmp2[0][0])
				if math.Abs(tmp1[0][0]) >= 0.9*math.Abs(tmp2[0][0]) {
					break
				}
			}

			// Backtracking line search
			a *= 0.5
			cond = 0 // Reset checked cond count
		}

		// Find next x
		tmp, _ = G.Multiply(a)
		tmp, _ = tmp.Multiply(r0)
		x1, _ = x0.Subtract(tmp)
		// fmt.Println(i, "x1:", x1)

		// Calculate change in x and grad
		r1 = CDS(x1)
		// fmt.Println(i, "r1:", r1)

		// Convergence condition:
		// when gradient no longer changes
		if r0[0][0] == r1[0][0] && r0[1][0] == r1[1][0] {
			break
		}

		dx, _ = x1.Subtract(x0)
		dr, _ = r1.Subtract(r0)

		// First term in increment of G
		tmp, _ = dr.Transpose().Multiply(dx)
		ddx, _ = dx.OuterProduct(dx)
		tmp1, _ = ddx.Multiply(1 / tmp[0][0])
		// fmt.Println(i, "G:", G)

		// Second term in increment of G
		tmp, _ = dr.Transpose().Multiply(G)
		tmp, _ = tmp.Multiply(dr)
		tmp2, _ = G.Multiply(dr)
		tmp2, _ = tmp2.OuterProduct(dr)
		tmp2, _ = tmp2.OuterProduct(G)
		tmp2, _ = tmp2.Multiply(1 / tmp[0][0])

		// Update G for next x
		G, _ = G.Add(tmp1)
		// fmt.Println(i, "Add -> G:", G)
		G, _ = G.Subtract(tmp2)
		// fmt.Println(i, "Subtract -> G:", G)

		// Update old x and grad for later dx and dgrad
		x0 = x1
		r0 = r1
	}
	return
}
Beispiel #2
0
func main() {
	/*
		Plot histograms of sigma with varying bin widths
	*/
	createHistogram(data[0], 50)
	createHistogram(data[0], 100)
	createHistogram(data[0], 500)
	createHistogram(data[0], 1000)
	createHistogram(data[0], 5000)
	fmt.Println("Plotted histogram of source data")

	/*
		Plot fit function graphs
	*/
	var x, y []float64
	N := len(data[0])
	tau := 0.1
	sigma := 0.1

	// vary sigma
	xs := make([][]float64, 5)
	ys := make([][]float64, 5)
	yls := make([]string, 5)
	for i := 0; i < 5; i++ {
		xs[i], ys[i] = calcFitFunc(0.0, 0.01, tau, sigma)
		yls[i] = fmt.Sprintf("sigma = %.1f", sigma)
		sigma += 0.2
	}
	createLine(xs, ys, yls, fmt.Sprintf("Fit Function (tau = %.1f)", tau))

	// vary tau
	tau, sigma = 0.1, 0.1
	for i := 0; i < 5; i++ {
		xs[i], ys[i] = calcFitFunc(0.0, 0.01, tau, sigma)
		yls[i] = fmt.Sprintf("tau = %.1f", tau)
		tau += 0.2
	}
	createLine(xs, ys, yls, fmt.Sprintf("Fit Function (sigma = %.1f)", sigma))
	fmt.Println("Plotted fit function with varying variables")

	/*
		Plot NLL vs tau for idea of what we're minimising
	*/
	tau = 0.1
	x = make([]float64, 0, 100)
	y = make([]float64, 0, 100)
	for i := 0; i < 100; i++ {
		x = append(x, tau)
		y = append(y, NLLFunc(tau, N))
		tau += 0.01
	}
	createScatter(x, y, "NLL vs tau")
	fmt.Println("Plotted NLL against tau")

	/*
		Parabolic Minimisation
	*/
	tau = parabolicMin(0.5, N, NLLFunc)
	fmt.Println("Minimum of NLL (no Background) found using parabolic method: tau =", tau, "NLL =", NLLFunc(tau, N))

	/*
		Plot std dev of 1-d fit result
	*/
	var min, tau_p, tau_m float64
	x = make([]float64, 0, 100)
	y = make([]float64, 0, 100)
	mins := make([]float64, 0, 100)
	for i := 0; i < 100; i++ {
		N = len(data[0]) - i*50
		tau_p, min, tau_m = tauFromNLL(0.5, N)
		x = append(x, float64(N))
		mins = append(mins, min)
		y = append(y, tau_p-tau_m)
	}
	createScatter(x, y, "Standard Deviation vs Sample Size")
	fmt.Println("Plotted graph of standard dev vs sample size")
	saveFile("stddev v samp size.txt", [][]float64{x, y})

	createScatter(x, mins, "Determined Minimum vs Sample Size")
	fmt.Println("Plotted graph of minimum found through parabolic minimisation vs sample size")
	saveFile("parabolic min v samp size.txt", [][]float64{x, mins})
	N = len(data[0])

	/*
		Find minimum tau and a for NLLFuncWithBG
	*/
	x0, _ := matrix.New("0.4;0.1")
	x0 = QuasiNewtonMin(x0, N, NLLFuncWithBG)
	fmt.Println("Minimum of NLL (w Background) found using Quasi-Newton DFP method: tau =",
		x0[0][0], "a =", x0[1][0], "NLL =", NLLFuncWithBG(x0, N))

	/*
		Plot of NLLFuncWithBG against tau to confirm minimum found in previous step
	*/
	tau = 0.1
	var mat matrix.Matrix
	x = make([]float64, 0, 100)
	y = make([]float64, 0, 100)
	for i := 0; i < 100; i++ {
		x = append(x, tau)
		mat, _ = matrix.New(fmt.Sprintf("%f;0.983684", tau))
		y = append(y, NLLFuncWithBG(mat, N))
		tau += 0.01
	}
	createScatter(x, y, fmt.Sprintf("NLL with BG, a = %f", x0[1][0]))

	/*
		Find error of NLLBG fit
	*/
	tau_p, tau_m = tauFromNLLBG(x0, 0.5, N)
	fmt.Println("Error of calculated NLL (w BG) is: ", tau_p-tau_m)
}