func TestRng(t *testing.T) { var n int = 10 rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for i := 0; i < n; i++ { u := rng.Uniform(r) fmt.Printf("%.5f\n", u) } fmt.Println() }
func TestRandist(t *testing.T) { var n int = 10 var mu float64 = 3.0 rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for i := 0; i < n; i++ { k := randist.Poisson(r, mu) fmt.Printf(" %d", k) } fmt.Println() }
func MakeData(start float64, end float64, inc float64) []float64 { var data []float64 rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for x := start; x < end; x += inc { y0 := math.Exp(x) sigma := inc * y0 dy := randist.Gaussian(r, sigma) data = append(data, []float64{x, y0 + dy, sigma}...) } return data }
func TestNtupleWrite(t *testing.T) { dataBuf := make([]Data, 1) nt := ntuple.Create(TEMP_FILE_NAME, dataBuf) rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for i := 0; i < 10000; i++ { dataBuf[0].x = randist.Ugaussian(r) dataBuf[0].y = randist.Ugaussian(r) dataBuf[0].z = randist.Ugaussian(r) ntuple.Write(nt) } ntuple.Close(nt) }
func TestSort(t *testing.T) { var n int = 100000 var k int = 5 x := make([]float64, n) small := make([]float64, k) rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for i := 0; i < n; i++ { x[i] = rng.Uniform(r) } sort.SortSmallest(small, k, x, 1, n) fmt.Printf("%d smallest values from %d\n", k, n) for i := 0; i < k; i++ { fmt.Printf("%d: %.18f\n", i, small[i]) } }
func TestSortVectorIndex(t *testing.T) { var n int = 10000 var k int = 5 v := vector.VectorAlloc(n) p := permutation.PermutationAlloc(n) rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) for i := 0; i < n; i++ { vector.Set(v, i, rng.Uniform(r)) } sort.SortVectorIndex(p, v) pData := p.Slice_().([]int) for i := 0; i < k; i++ { vpi := vector.Get(v, pData[i]) fmt.Printf("order = %d, value = %g\n", i, vpi) } }
func TestPermutation(t *testing.T) { var N int = 15 p := permutation.PermutationAlloc(N) q := permutation.PermutationAlloc(N) rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) fmt.Printf("initial permutation: ") permutation.PermutationInit(p) permutation.Fprintf(os.Stdout, p, " %u") os.Stdout.Sync() fmt.Printf("\n") fmt.Printf(" random permutation: ") randist.Shuffle(r, p.Slice_(), p.Len()) permutation.Fprintf(os.Stdout, p, " %u") fmt.Printf("\n") fmt.Printf(" inverse permutation: ") permutation.Inverse(q, p) permutation.Fprintf(os.Stdout, q, " %u") fmt.Printf("\n") }
func TestSiman(t *testing.T) { params := &siman.GslSimanParams{ NumTries: N_TRIES, ItersFixed: ITERS_FIXED_T, StepSize: STEP_SIZE, K: K, TInitial: T_INITIAL, Mu: MU_T, TMin: T_MIN, } siman.InitializeGslSimanParams(params) var xInitial float64 = 15.5 rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) fmt.Println(rng.Uniform(r)) siman.Solve(r, &xInitial, E1, S1, M1, P1, nil, nil, nil, params) }
func TestHistogram(t *testing.T) { h := histogram.Histogram2dAlloc(10, 10) histogram.Histogram2dSetRangesUniform(h, 0.0, 1.0, 0.0, 1.0) histogram.Histogram2dAccumulate(h, 0.3, 0.3, 1) histogram.Histogram2dAccumulate(h, 0.8, 0.1, 5) histogram.Histogram2dAccumulate(h, 0.7, 0.9, 0.5) rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) hDim := h.Dim() p := histogram.Histogram2dPdfAlloc(hDim[0], hDim[1]) histogram.Histogram2dPdfInit(p, h) for i := 0; i < 1000; i++ { u := rng.Uniform(r) v := rng.Uniform(r) _, x, y := histogram.Histogram2dPdfSample(p, u, v) fmt.Printf("%g %g\n", x, y) } }
func TestMonte(t *testing.T) { xl := []float64{0, 0, 0} xu := []float64{math.Pi, math.Pi, math.Pi} G := &monte.GslMonteFunction{ Function: g, Dim: 3, } monte.InitializeGslMonteFunction(G) var calls int = 500000 rng.EnvSetup() T := rng.DefaultRngType() r := rng.RngAlloc(T) sp := monte.PlainAlloc(3) _, res, err := monte.PlainIntegrate(G, xl, xu, 3, calls, r, sp) DisplayResults("plain", res, err) sm := monte.MiserAlloc(3) _, res, err = monte.MiserIntegrate(G, xl, xu, 3, calls, r, sm) DisplayResults("miser", res, err) sv := monte.VegasAlloc(3) _, res, err = monte.VegasIntegrate(G, xl, xu, 3, 10000, r, sv) DisplayResults("vegas warm-up", res, err) fmt.Printf("converging...\n") for { _, res, err = monte.VegasIntegrate(G, xl, xu, 3, calls/5, r, sv) fmt.Printf("result = % .6f sigma = % .6f "+ "chisq/dof = %.1f\n", res, err, monte.VegasChisq(sv)) if math.Abs(monte.VegasChisq(sv))-1.0 <= 0.5 { break } } }
func TestRobust(t *testing.T) { var p int = 2 // linear fit var a float64 = 1.45 // data slope var b float64 = 3.88 // data intercept var n int = 20 X := matrix.MatrixAlloc(n, p) x := vector.VectorAlloc(n) y := vector.VectorAlloc(n) c := vector.VectorAlloc(p) cOls := vector.VectorAlloc(p) cov := matrix.MatrixAlloc(p, p) r := rng.RngAlloc(rng.DefaultRngType()) // generate linear dataset for i := 0; i < n-3; i++ { dx := 10.0 / (float64(n) - 1.0) ei := rng.Uniform(r) xi := -5.0 + float64(i)*dx yi := a*xi + b vector.Set(x, i, xi) vector.Set(y, i, yi+ei) } // add a few outliers vector.Set(x, n-3, 4.7) vector.Set(y, n-3, -8.3) vector.Set(x, n-2, 3.5) vector.Set(y, n-2, -6.7) vector.Set(x, n-1, 4.1) vector.Set(y, n-1, -6.0) // construct design matrix X for linear fit for i := 0; i < n; i++ { xi := vector.Get(x, i) matrix.Set(X, i, 0, 1.0) matrix.Set(X, i, 1, xi) } // perform robust and OLS fit DoFit(multifit.GSL_MULTIFIT_ROBUST_OLS, X, y, cOls, cov) DoFit(multifit.GSL_MULTIFIT_ROBUST_BISQUARE, X, y, c, cov) // output data and model for i := 0; i < n; i++ { xi := vector.Get(x, i) yi := vector.Get(y, i) v := matrix.Row(X, i).Vector() _, yRob, _ := multifit.RobustEst(v, c, cov) _, yOls, _ := multifit.RobustEst(v, cOls, cov) fmt.Printf("%g %g %g %g\n", xi, yi, yRob, yOls) } fmt.Printf("# best fit: Y = %g + %g X\n", vector.Get(c, 0), vector.Get(c, 1)) fmt.Printf("# covariance matrix:\n") fmt.Printf("# [ %+.5e, %+.5e\n", matrix.Get(cov, 0, 0), matrix.Get(cov, 0, 1)) fmt.Printf("# %+.5e, %+.5e\n", matrix.Get(cov, 1, 0), matrix.Get(cov, 1, 1)) }
func TestBspline(t *testing.T) { var n int = 200 var ncoeffs int = 12 var nbreak int = ncoeffs - 2 rng.EnvSetup() r := rng.RngAlloc(rng.DefaultRngType()) // allocate a cubic bspline workspace (k = 4) bw := bspline.Alloc(4, nbreak) B := vector.VectorAlloc(ncoeffs) x := vector.VectorAlloc(n) y := vector.VectorAlloc(n) X := matrix.MatrixAlloc(n, ncoeffs) c := vector.VectorAlloc(ncoeffs) w := vector.VectorAlloc(n) cov := matrix.MatrixAlloc(ncoeffs, ncoeffs) mw := multifit.LinearAlloc(n, ncoeffs) fmt.Printf("#m=0,S=0\n") // this is the data to be fitted for i := 0; i < n; i++ { xi := (15.0 / (float64(n) - 1)) * float64(i) yi := math.Cos(xi) * math.Exp(-0.1*xi) sigma := 0.1 * yi dy := randist.Gaussian(r, sigma) vector.Set(x, i, xi) vector.Set(y, i, yi+dy) vector.Set(w, i, 1.0/(sigma*sigma)) fmt.Printf("%f %f\n", xi, yi+dy) } // use uniform breakpoints on [0, 15] bspline.KnotsUniform(0.0, 15.0, bw) // construct the fit matrix X for i := 0; i < n; i++ { xi := vector.Get(x, i) // compute B_j(xi) for all j bspline.Eval(xi, B, bw) // fill in row i of X for j := 0; j < ncoeffs; j++ { matrix.Set(X, i, j, vector.Get(B, j)) } } // do the fit _, chisq := multifit.Wlinear(X, w, y, c, cov, mw) dof := float64(n - ncoeffs) tss := stats.Wtss(w.Data_(), w.Stride(), y.Data_(), y.Stride(), n) rsq := 1.0 - chisq/tss fmt.Printf("chisq/dof = %e, Rsq = %f\n", chisq/dof, rsq) fmt.Printf("#m=1,S=0\n") for xi := 0.0; xi < 15.0; xi += 0.1 { bspline.Eval(xi, B, bw) _, yi, _ := multifit.LinearEst(B, c, cov) fmt.Printf("%f %f\n", xi, yi) } }