Example #1
0
func TestLegendre(t *testing.T) {
	for i, test := range []struct {
		f        func(float64) float64
		min, max float64
		n        []int
		tol      []float64
		ans      float64
	}{
		// Tolerances determined from intuition and a bit of post-hoc tweaking.
		{
			f:   func(x float64) float64 { return math.Exp(x) },
			min: -3,
			max: 5,
			n:   []int{3, 4, 6, 7, 15, 16, 300, 301},
			tol: []float64{5e-2, 5e-3, 5e-6, 1e-7, 1e-14, 1e-14, 1e-14, 1e-14},
			ans: math.Exp(5) - math.Exp(-3),
		},
	} {
		for j, n := range test.n {
			ans := Fixed(test.f, test.min, test.max, n, Legendre{}, 0)
			if !floats.EqualWithinAbsOrRel(ans, test.ans, test.tol[j], test.tol[j]) {
				t.Errorf("Mismatch. Case = %d, n = %d. Want %v, got %v", i, n, test.ans, ans)
			}
			ans2 := Fixed(test.f, test.min, test.max, n, Legendre{}, 3)
			if !floats.EqualWithinAbsOrRel(ans2, test.ans, test.tol[j], test.tol[j]) {
				t.Errorf("Mismatch concurrent. Case = %d, n = %d. Want %v, got %v", i, n, test.ans, ans)
			}
		}
	}
}
func NewActivationFunction(name ActivationName) ActivationFunction {
	switch name {
	case ActivationName_LINEAR:
		return func(x mat64.Matrix, y *mat64.Dense) { y.Clone(x) }
	case ActivationName_LOGISTIC:
		return func(x mat64.Matrix, y *mat64.Dense) {
			y.Apply(func(r, c int, v float64) float64 {
				return 1 / (1 + math.Exp(-v))
			}, x)
		}
	case ActivationName_RELU:
		return func(x mat64.Matrix, y *mat64.Dense) {
			y.Apply(func(r, c int, v float64) float64 { return math.Max(0, v) }, x)
		}
	case ActivationName_TANH:
		return func(x mat64.Matrix, y *mat64.Dense) {
			y.Apply(func(r, c int, v float64) float64 { return math.Tanh(v) }, x)
		}
	case ActivationName_SOFTMAX:
		return func(x mat64.Matrix, y *mat64.Dense) {
			r, c := x.Dims()
			for i := 0; i < r; i++ {
				exp_sum := 0.0
				for j := 0; j < c; j++ {
					exp_sum = exp_sum + math.Exp(x.At(i, j))
				}
				for j := 0; j < c; j++ {
					y.Set(i, j, math.Exp(x.At(i, j))/exp_sum)
				}
			}
		}
	}
	return nil
}
Example #3
0
func gradient(x float64, y float64) (xd float64, yd float64) {
	xd = (2 * x * math.Cos(math.Pow(x, 2)+math.Pow(y, 2)) * math.Cos(y+math.Exp(x))) -
		(math.Exp(x) * math.Sin(math.Pow(x, 2)+math.Pow(y, 2)) * math.Sin(y+math.Exp(x)))
	yd = (2 * y * math.Cos(math.Pow(x, 2)+math.Pow(y, 2)) * math.Cos(y+math.Exp(x))) -
		(math.Sin(math.Pow(x, 2)+math.Pow(y, 2)) * math.Sin(y+math.Exp(x)))
	return
}
Example #4
0
/* Test integrating e^(-x) over infinite domains*/
func TestNegativeExponential(t *testing.T) {
	const (
		h       = 1e-8
		correct = 1
	)

	f := func(x float64) float64 { return math.Exp(-x) }

	// Check (-Inf, 0]; should be +Inf
	if msg, ok := test_integral(f, math.Inf(-1), 0, h, math.Inf(1)); !ok {
		t.Error(msg)
	}

	// Check [0, +Inf); should be 1
	if msg, ok := test_integral(f, 0, math.Inf(1), h, 1); !ok {
		t.Error(msg)
	}

	// Now check that these results hold for -f
	f = func(x float64) float64 { return -math.Exp(-x) }

	// Check (-Inf, 0]; should be -Inf
	if msg, ok := test_integral(f, math.Inf(-1), 0, h, math.Inf(-1)); !ok {
		t.Error(msg)
	}

	// Check [0, +Inf); should be -1
	if msg, ok := test_integral(f, 0, math.Inf(1), h, -1); !ok {
		t.Error(msg)
	}
}
Example #5
0
File: exp.go Project: swook/gogsl
func Exp_mult_e(x float64, y float64, result *Result) error {
	ay := math.Abs(y)

	if y == 0.0 {
		result.val = 0.0
		result.err = 0.0
		return err.SUCCESS
	} else if (x < 0.5*gsl.LOG_DBL_MAX && x > 0.5*gsl.LOG_DBL_MIN) && (ay < 0.8*gsl.SQRT_DBL_MAX && ay > 1.2*gsl.SQRT_DBL_MIN) {
		ex := math.Exp(x)
		result.val = y * ex
		result.err = (2.0 + math.Abs(x)) * gsl.DBL_EPSILON * math.Abs(result.val)
		return err.SUCCESS
	}

	ly := math.Log(ay)
	lnr := x + ly

	if lnr > gsl.LOG_DBL_MAX-0.01 {
		return OverflowError(result)
	} else if lnr < gsl.LOG_DBL_MIN+0.01 {
		return UnderflowError(result)
	}

	sy := gsl.Sign(y)
	M := math.Floor(x)
	N := math.Floor(ly)
	a := x - M
	b := ly - N
	berr := 2.0 * gsl.DBL_EPSILON * (math.Abs(ly) + math.Abs(N))
	result.val = float64(sy) * math.Exp(M+N) * math.Exp(a+b)
	result.err = berr * math.Abs(result.val)
	result.err += 2.0 * gsl.DBL_EPSILON * (M + N + 1.0) * math.Abs(result.val)
	return err.SUCCESS
}
Example #6
0
// NormalArea calculates the area below the normal curve.
//
// den = 2^n*n!*(2*n+1)
//
// x - x(3,5,7...) / den
//
func NormalArea_(x float64) float64 {

	a := x
	// xx = x*x
	n := 1.0
	f := 1.0

	sign := true

	for {

		f = float64(Factorial(int64(n))) * math.Pow(2, n) * (2*n + 1)
		n += 2
		println(n, f, a)

		if sign {
			a -= math.Pow(x, n) / f
			sign = false
		} else {
			a += math.Pow(x, n) / f
			sign = true
		}

		if n > 10 {
			break
		}
	}

	println("a", a)
	println("/", math.Exp(-x*x/2.0)/k)

	return a * math.Exp(-x*x/2.0) / k
}
Example #7
0
func rbmExactExpectation(r *RBM, layer []bool, hidden bool) linalg.Vector {
	var normalizer kahan.Summer64
	var outcomeSum []kahan.Summer64
	if hidden {
		outcomeSum = make([]kahan.Summer64, rbmTestHiddenSize)
	} else {
		outcomeSum = make([]kahan.Summer64, rbmTestVisibleSize)
	}

	for i := 0; i < (1 << uint(len(outcomeSum))); i++ {
		variableVec := boolVecFromInt(i, len(outcomeSum))
		var prob float64
		if hidden {
			prob = math.Exp(-rbmEnergy(r, layer, variableVec))
		} else {
			prob = math.Exp(-rbmEnergy(r, variableVec, layer))
		}
		normalizer.Add(prob)
		for j, b := range variableVec {
			if b {
				outcomeSum[j].Add(prob)
			}
		}
	}

	expectation := make(linalg.Vector, len(outcomeSum))
	norm := 1.0 / normalizer.Sum()
	for i, s := range outcomeSum {
		expectation[i] = norm * s.Sum()
	}
	return expectation
}
Example #8
0
func (sh *ssHeap) Less(i, j int) bool {
	ss := sh.ss
	a, b := &ss.buckets[sh.h[i]], &ss.buckets[sh.h[j]]
	rateA, rateB := a.rate, b.rate
	lastA, lastB := a.lastTs, b.lastTs

	// Formula the same as recount(), inline is faster
	if lastA >= lastB {
		// optimization. if rateB is already smaller than rateA, there
		// is no need to compute real rates. It ain't gonna grow, and
		// we can avoid running expensive math.Exp().
		if rateB >= rateA {
			rateB *= math.Exp(float64(lastA-lastB) * ss.weightHelper)
		}
	} else {
		if rateA >= rateB {
			rateA *= math.Exp(float64(lastB-lastA) * ss.weightHelper)
		}
	}

	if rateA != rateB {
		return rateA < rateB
	} else {
		// This makes difference for unitialized buckets. Rate is
		// zero, but lastTs is modified. In such case make sure to use
		// the unintialized bucket first.
		return lastA < lastB
	}
}
Example #9
0
//Boost performs categorical adaptive boosting using the specified partition and
//returns the weight that tree that generated the partition should be given.
func (t *AdaBoostTarget) Boost(leaves *[][]int) (weight float64) {
	weight = 0.0
	for _, cases := range *leaves {
		weight += t.Impurity(&cases, nil)
	}
	if weight >= .5 {
		return 0.0
	}
	weight = .5 * math.Log((1-weight)/weight)

	for _, cases := range *leaves {
		m := t.Modei(&cases)
		for _, c := range cases {
			if t.IsMissing(c) == false {
				cat := t.Geti(c)
				if cat != m {
					t.Weights[c] = t.Weights[c] * math.Exp(weight)
				} else {
					t.Weights[c] = t.Weights[c] * math.Exp(-weight)
				}
			}

		}
	}
	normfactor := 0.0
	for _, v := range t.Weights {
		normfactor += v
	}
	for i, v := range t.Weights {
		t.Weights[i] = v / normfactor
	}
	return
}
Example #10
0
// Init initialises the model
func (o *RefIncRL1) Init(prms Prms) (err error) {

	// parameters
	for _, p := range prms {
		switch p.N {
		case "lam0":
			o.λ0 = p.V
		case "lam1":
			o.λ1 = p.V
		case "alp":
			o.α = p.V
		case "bet":
			o.β = p.V
		default:
			return chk.Err("ref-inc-rl1: parameter named %q is invalid", p.N)
		}
	}

	// set b
	o.b = -1.0 // not flipped
	if o.λ1 < o.λ0 {
		o.b = 1.0 // flipped
	}

	// constants
	o.c1 = o.β * o.b * (o.λ1 - o.λ0)
	o.c2 = math.Exp(o.β * o.b * o.α)
	o.c3 = math.Exp(o.β*o.b*(1.0-o.λ0)) - o.c2*math.Exp(o.c1)
	return
}
Example #11
0
// Upper incomplete gamma.
func ugamma(x, s float64, regularized bool) float64 {
	if x <= 1.1 || x <= s {
		if regularized {
			return 1 - lgamma(x, s, regularized)
		}
		return math.Gamma(s) - lgamma(x, s, regularized)
	}

	f := 1.0 + x - s
	C := f
	D := 0.0
	var a, b, chg float64

	for i := 1; i < 10000; i++ {
		a = float64(i) * (s - float64(i))
		b = float64(i<<1) + 1.0 + x - s
		D = b + a*D
		C = b + a/C
		D = 1.0 / D
		chg = C * D
		f *= chg
		if math.Abs(chg-1) < eps {
			break
		}
	}
	if regularized {
		logg, _ := math.Lgamma(s)
		return math.Exp(s*math.Log(x) - x - logg - math.Log(f))
	}
	return math.Exp(s*math.Log(x) - x - math.Log(f))
}
Example #12
0
// Evolve object within likelihood constraint
// logLstar: Likelihood constraint L > Lstar
func (Obj *Object) Explore(logLstar float64) {
	step := 0.1    // Initial guess suitable step-size in (0,1)
	m := 20        // MCMC counter (pre-judged # steps)
	accept := 0    // # MCMC acceptances
	reject := 0    // # MCMC rejections
	var Try Object // Trial object
	for ; m > 0; m-- {
		// Trial object
		Try.u = Obj.u + step*(2.*rand.Float64()-1.) // |move| < step
		Try.v = Obj.v + step*(2.*rand.Float64()-1.) // |move| < step
		Try.u -= math.Floor(Try.u)                  // wraparound to stay within (0,1)
		Try.v -= math.Floor(Try.v)                  // wraparound to stay within (0,1)
		Try.x = 4.0*Try.u - 2.0                     // map to x
		Try.y = 2.0 * Try.v                         // map to y
		Try.logL = logLhood(Try.x, Try.y)           // trial likelihood value
		// Accept if and only if within hard likelihood constraint
		if Try.logL > logLstar {
			*Obj = Try
			accept++
		} else {
			reject++
		}
		// Refine step-size to let acceptance ratio converge around 50%
		if accept > reject {
			step *= math.Exp(1.0 / float64(accept))
		}
		if accept < reject {
			step /= math.Exp(1.0 / float64(reject))
		}
	}
}
Example #13
0
// Init initialises the model
func (o *RefDecSp1) Init(prms Prms) (err error) {

	// parameters
	e := prms.Connect(&o.β, "bet", "ref-dec-sp1 function")
	e += prms.Connect(&o.λ1, "lam1", "ref-dec-sp1 function")
	e += prms.Connect(&o.ya, "ya", "ref-dec-sp1 function")
	e += prms.Connect(&o.yb, "yb", "ref-dec-sp1 function")
	if e != "" {
		err = chk.Err("%v\n", e)
		return
	}

	// check
	if o.yb >= o.ya {
		return chk.Err("yb(%g) must be smaller than ya(%g)", o.yb, o.ya)
	}

	// constants
	o.c1 = o.β * o.λ1
	o.c2 = math.Exp(-o.β * o.ya)
	o.c3 = math.Exp(-o.β*o.yb) - o.c2
	o.c1timestmax = 400

	// check
	if math.IsInf(o.c2, 0) || math.IsInf(o.c3, 0) {
		return chk.Err("β*ya or β*yb is too large:\n β=%v, ya=%v, yb=%v\n c1=%v, c2=%v, c3=%v", o.β, o.ya, o.yb, o.c1, o.c2, o.c3)
	}
	return
}
Example #14
0
func Anneal(state Annealable, maxTemp, minTemp float64, steps int) Annealable {
	factor := -math.Log(maxTemp / minTemp)
	state = state.Copy()
	bestState := state.Copy()
	bestEnergy := state.Energy()
	previousEnergy := bestEnergy
	for step := 0; step < steps; step++ {
		if step%100000 == 0 {
			showProgress(step, steps)
		}
		pct := float64(step) / float64(steps-1)
		temp := maxTemp * math.Exp(factor*pct)
		undo := state.DoMove()
		energy := state.Energy()
		change := energy - previousEnergy
		if change > 0 && math.Exp(-change/temp) < rand.Float64() {
			state.UndoMove(undo)
		} else {
			previousEnergy = energy
			if energy < bestEnergy {
				// pct := float64(step*100) / float64(steps)
				// fmt.Printf("step: %d of %d (%.1f%%), temp: %.3f, energy: %.3f\n",
				//     step, steps, pct, temp, energy)
				bestEnergy = energy
				bestState = state.Copy()
			}
		}
	}
	showProgress(steps, steps)
	return bestState
}
Example #15
0
// Init initialises the function
func (o *RefDecGen) Init(prms Prms) (err error) {

	// parameters
	for _, p := range prms {
		switch p.N {
		case "bet":
			o.β = p.V
		case "a":
			o.a = p.V
		case "b":
			o.b = p.V
		case "c":
			o.c = p.V
		case "A":
			o.A = p.V
		case "B":
			o.B = p.V
		case "xini":
			o.xini = p.V
		case "yini":
			o.yini = p.V
		default:
			return chk.Err("ref-dec-gen: parameter named %q is invalid", p.N)
		}
	}

	// constants
	o.c1 = o.β * (o.b*o.A - o.a)
	o.c2 = ((o.A - o.B) / (o.A - o.a/o.b)) * math.Exp(-o.β*o.c)
	o.c3 = math.Exp(o.β*o.b*(o.yini+o.A*o.xini)) - o.c2*math.Exp(o.c1*o.xini)
	return
}
Example #16
0
func Test_ExponentiallyDecayingSample_Kolmogorov(t *testing.T) {
	rand.Seed(time.Now().UnixNano())
	end := 10000000
	scale := 100000.0

	s := NewExponentiallyDecayingSample(1024, 0.3)
	s.last_t = 0

	for i := 0; i < end; i++ {
		s.Update(float64(i)/scale, float64(i)/scale)
	}

	cdf := func(x float64) float64 {
		w := math.Exp(0.3 * (x - s.last_t))
		top_w := math.Exp(0.3 * (float64(end-1)/scale - s.last_t))
		return w / top_w
	}
	sample := s.Sample()
	D, pvalue := KolmogorovTest(sample, cdf)

	if pvalue < 0.005 {
		t.Errorf("KolmogorovTest(sample) == %f, D == %f", pvalue, D)
	}

}
Example #17
0
func initExp() (testKe []uint32, testWe, testFe []float32) {
	const m2 = 1 << 32
	var (
		de float64 = re
		te         = de
		ve float64 = 3.9496598225815571993e-3
	)

	testKe = make([]uint32, 256)
	testWe = make([]float32, 256)
	testFe = make([]float32, 256)

	q := ve / math.Exp(-de)
	testKe[0] = uint32((de / q) * m2)
	testKe[1] = 0
	testWe[0] = float32(q / m2)
	testWe[255] = float32(de / m2)
	testFe[0] = 1.0
	testFe[255] = float32(math.Exp(-de))
	for i := 254; i >= 1; i-- {
		de = -math.Log(ve/de + math.Exp(-de))
		testKe[i+1] = uint32((de / te) * m2)
		te = de
		testFe[i] = float32(math.Exp(-de))
		testWe[i] = float32(de / m2)
	}
	return
}
Example #18
0
func initNorm() (testKn []uint32, testWn, testFn []float32) {
	const m1 = 1 << 31
	var (
		dn float64 = rn
		tn         = dn
		vn float64 = 9.91256303526217e-3
	)

	testKn = make([]uint32, 128)
	testWn = make([]float32, 128)
	testFn = make([]float32, 128)

	q := vn / math.Exp(-0.5*dn*dn)
	testKn[0] = uint32((dn / q) * m1)
	testKn[1] = 0
	testWn[0] = float32(q / m1)
	testWn[127] = float32(dn / m1)
	testFn[0] = 1.0
	testFn[127] = float32(math.Exp(-0.5 * dn * dn))
	for i := 126; i >= 1; i-- {
		dn = math.Sqrt(-2.0 * math.Log(vn/dn+math.Exp(-0.5*dn*dn)))
		testKn[i+1] = uint32((dn / tn) * m1)
		tn = dn
		testFn[i] = float32(math.Exp(-0.5 * dn * dn))
		testWn[i] = float32(dn / m1)
	}
	return
}
Example #19
0
// SelectArm returns 1 indexed arm to be tried next.
func (s *softmax) SelectArm() int {
	max, _ := bmath.Max(s.values)

	normalizer := 0.0
	for _, value := range s.values {
		normalizer += math.Exp((value - max) / s.tau)
	}

	if math.IsInf(normalizer, 0) {
		panic("normalizer in softmax too large")
	}

	cumulativeProb := 0.0
	draw := len(s.values) - 1
	z := s.rand.Float64()
	for i, value := range s.values {
		cumulativeProb = cumulativeProb + math.Exp((value-max)/s.tau)/normalizer
		if cumulativeProb > z {
			draw = i
			break
		}
	}
	s.counts[draw]++
	return draw + 1
}
Example #20
0
// check that the integration function works
func TestIntegrateMid(t *testing.T) {
	tests := []struct {
		fn     smoothFn
		x1, x2 float64
		Tot    float64
	}{
		// linear
		{func(x float64) float64 { return 0.5 * x }, 0.0, 1.0, 0.25},
		// normal distribution
		{func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -100, 100, 1.0},
		// normal distribution half
		{func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -100, 0, 0.5},
		// normal distribution segment
		{func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -2, -1, .1359051219835},
		// scaled gamma distribution (similar to my dissertation experiment 3)
		{func(x float64) float64 {
			k, theta, a := 1.5, 2.0, 1.0/600
			return a / (math.Gamma(k) * math.Pow(theta, k)) * math.Sqrt(x*a) * math.Exp(-x*a/2)
		}, 0, 2400, 0.73853606463},
	}

	for i, test := range tests {
		got := integrateMid(test.fn, test.x1, test.x2, 10000)
		if diff := math.Abs(got - test.Tot); diff > 1e-10 {
			t.Errorf("case %v (integral from %v to %v): got %v, want %v", i+1, test.x1, test.x2, got, test.Tot)
		}
	}
}
Example #21
0
// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function
func erfinv(y float64) float64 {
	if y < -1.0 || y > 1.0 {
		panic("invalid input")
	}

	var (
		a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331}
		b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801}
		c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311}
		d = [2]float64{3.543889200, 1.637067800}
	)

	const y0 = 0.7
	var x, z float64

	if math.Abs(y) == 1.0 {
		x = -y * math.Log(0.0)
	} else if y < -y0 {
		z = math.Sqrt(-math.Log((1.0 + y) / 2.0))
		x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0)
	} else {
		if y < y0 {
			z = y * y
			x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0)
		} else {
			z = math.Sqrt(-math.Log((1.0 - y) / 2.0))
			x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0)
		}
		x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x))
		x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x))
	}

	return x
}
Example #22
0
// Random gamma variable when shape<1
// See Kundu and Gupta 2007
// "A convenient way of generating gamma random variables using generalized exponential distribution"
func (rg RandGen) rgamma2(shape float64) float64 {
	if shape <= 0.0 || shape >= 1.0 {
		panic("Illegal parameter. Shape must be positive and no greater than one")
	}

	d := 1.0334 - 0.0766*math.Exp(2.2942*shape) // Constants from paper
	a := math.Exp2(shape) * math.Pow(-math.Expm1(-d/2), shape)
	pdsh := math.Pow(d, shape-1.0)
	b := shape * pdsh * math.Exp(-d)
	c := a + b

start:
	u := rg.Float64()
	var x float64
	if u <= a/c {
		x = -2.0 * math.Log1p(-math.Pow(c*u, 1.0/shape)/2.0)
	} else {
		x = -math.Log(c * (1.0 - u) / (shape * pdsh))
	}
	v := rg.Float64()
	if x <= d {
		p := math.Pow(x, shape-1.0) * math.Exp(-x/2.0) / (math.Exp2(shape-1.0) * math.Pow(-math.Expm1(-x/2.0), shape-1.0))
		if v > p {
			goto start
		}
	} else {
		if v > math.Pow(d/x, 1.0-shape) {
			goto start
		}
	}

	return x
}
Example #23
0
// InitBoundedLog initializes a Histogram instance from the given array
// of values with the given number of bins which fall between the given limits.
// The logarithms of bin centers are uniformly dist. Any
// values outside of these limits are ignored. The returned integer is the
// number of such ignored values. Because of this, infinte and non-positive
// values do not cause a panic.
//
// The first returned value is the initialized Histogram.
//
// InitBoundedLog panics if given a non-positive number of bins or
// a low bound as large or larger than the high bound or if given infinite bounds.
func (hist *Histogram) InitBoundedLog(xs []float64, binNum int, low, high float64) (*Histogram, int) {
	if hist.init {
		panic("stats.Histogram.InitBoundedLog called on initialized struct.")
	} else if binNum < 1 {
		panic(fmt.Sprintf("stats.Histogram.InitBoundedLog given binNum of %d", binNum))
	} else if low >= high || low <= 0 || math.IsInf(low, 0) ||
		math.IsInf(high, 0) || math.IsNaN(low) || math.IsNaN(high) {
		panic(fmt.Sprintf("stats.Histogram.InitBoundedLog given range [%d, %d]", low, high))
	}

	hist.init = true
	hist.Bins = make([]int, binNum)
	hist.BinValues = make([]float64, binNum)
	hist.BinEdges = make([]float64, binNum+1)

	hist.logHistogram = true

	hist.lowLim = math.Log(low)
	hist.highLim = math.Log(high)
	hist.binWidth = (hist.highLim - hist.lowLim) / float64(binNum)

	for i := 0; i < binNum; i++ {
		hist.BinEdges[i] = math.Exp(hist.lowLim + hist.binWidth*float64(i))
		hist.BinValues[i] = math.Exp(hist.lowLim + hist.binWidth*(float64(i)+0.5))
	}

	hist.BinEdges[binNum] = hist.highLim

	return hist, hist.AddArray(xs)
}
Example #24
0
// Init initialises the model
func (o *RefDecSp1) Init(prms Prms) (err error) {

	// parameters
	for _, p := range prms {
		switch p.N {
		case "bet":
			o.β = p.V
		case "lam1":
			o.λ1 = p.V
		case "ya":
			o.ya = p.V
		case "yb":
			o.yb = p.V
		default:
			return chk.Err("ref-dec-sp1: parameter named %q is invalid", p.N)
		}
	}

	// check
	if o.yb >= o.ya {
		return chk.Err("yb(%g) must be smaller than ya(%g)", o.yb, o.ya)
	}

	// constants
	o.c1 = o.β * o.λ1
	o.c2 = math.Exp(-o.β * o.ya)
	o.c3 = math.Exp(-o.β*o.yb) - o.c2
	o.c1timestmax = 400

	// check
	if math.IsInf(o.c2, 0) || math.IsInf(o.c3, 0) {
		return chk.Err("β*ya or β*yb is too large:\n β=%v, ya=%v, yb=%v\n c1=%v, c2=%v, c3=%v", o.β, o.ya, o.yb, o.c1, o.c2, o.c3)
	}
	return
}
Example #25
0
// H returns ∂²y/∂t²_cteX = H(t, x)
func (o RefDecSp1) H(t float64, x []float64) float64 {
	if o.c1*t > o.c1timestmax {
		return 0.0
	}
	d := o.c3 + o.c2*math.Exp(o.c1*t)
	return -(o.c1 * o.c1 * o.c2 * o.c3 * math.Exp(o.c1*t)) / (o.β * d * d)
}
Example #26
0
func Test_dist_lognormal_01(tst *testing.T) {

	//verbose()
	chk.PrintTitle("dist_lognormal_01")

	_, dat, err := io.ReadTable("data/lognormal.dat")
	if err != nil {
		tst.Errorf("cannot read comparison results:\n%v\n", err)
		return
	}

	X, ok := dat["x"]
	if !ok {
		tst.Errorf("cannot get x values\n")
		return
	}
	N, ok := dat["n"]
	if !ok {
		tst.Errorf("cannot get n values\n")
		return
	}
	Z, ok := dat["z"]
	if !ok {
		tst.Errorf("cannot get z values\n")
		return
	}
	YpdfCmp, ok := dat["ypdf"]
	if !ok {
		tst.Errorf("cannot get ypdf values\n")
		return
	}
	YcdfCmp, ok := dat["ycdf"]
	if !ok {
		tst.Errorf("cannot get ycdf values\n")
		return
	}

	var dist DistLogNormal

	nx := len(X)
	for i := 0; i < nx; i++ {
		w := Z[i] * Z[i]
		μ := math.Exp(N[i] + w/2.0)
		σ := μ * math.Sqrt(math.Exp(w)-1.0)
		dist.Init(&VarData{M: μ, S: σ})
		Ypdf := dist.Pdf(X[i])
		Ycdf := dist.Cdf(X[i])
		err := chk.PrintAnaNum("ypdf", 1e-14, YpdfCmp[i], Ypdf, chk.Verbose)
		if err != nil {
			tst.Errorf("pdf failed: %v\n", err)
			return
		}
		err = chk.PrintAnaNum("ycdf", 1e-15, YcdfCmp[i], Ycdf, chk.Verbose)
		if err != nil {
			tst.Errorf("cdf failed: %v\n", err)
			return
		}
	}
}
Example #27
0
func CreateGaussianFilter(params *ParamSet) *GaussianFilter {
	xw := params.FindFloatParam("xwidth", 2.0)
	yw := params.FindFloatParam("ywidth", 2.0)
	alpha := params.FindFloatParam("alpha", 2.0)
	expX := math.Exp(-alpha * xw * xw)
	expY := math.Exp(-alpha * yw * yw)
	return &GaussianFilter{FilterData{xw, yw, 1.0 / xw, 1.0 / yw}, alpha, expX, expY}
}
Example #28
0
func TestLogAddExp(t *testing.T) {
	x := LogAddExp(1e-10, math.Inf(-1))
	if x != 1e-10 {
		t.Errorf("expected exactly 1e-10, got %g", x)
	}
	t.Logf("naïve version returns %g",
		math.Log(math.Exp(1e-10)+math.Exp(math.Inf(-1))))
}
Example #29
0
// Skewness returns the skewness of the distribution.
func (w Weibull) Skewness() float64 {
	stdDev := w.StdDev()
	firstGamma, firstGammaSign := math.Lgamma(1 + 3/w.K)
	logFirst := firstGamma + 3*(math.Log(w.Lambda)-math.Log(stdDev))
	logSecond := math.Log(3) + math.Log(w.Mean()) + 2*math.Log(stdDev) - 3*math.Log(stdDev)
	logThird := 3 * (math.Log(w.Mean()) - math.Log(stdDev))
	return float64(firstGammaSign)*math.Exp(logFirst) - math.Exp(logSecond) - math.Exp(logThird)
}
Example #30
0
func (r *Rocket) compute_max_velocity(thrust float64, time float64) {
	//max velocity given a rocket's burn time and thrust
	g := 9.8 // 9.8 m / s^2
	q := math.Sqrt((thrust - r.mass.value*g) / r.wind_resistance.value)
	x := 2 * r.wind_resistance.value * q / r.mass.value
	r.max_velocity.value = q * (1 - math.Exp(-1*x*time)) /
		(1 + math.Exp(-1*x*time))
}