func (pdf *PdfBergstrom) ScaledValue(x, alpha, beta float64) (float64, error) { var err error zeta := beta * math.Tan(0.5*math.Pi*alpha) eps := pdf.eps / x * math.Pi done := false n := 1 sum := 0.0 for !done { a := 1.0 if n%2 == 0 { a = -1.0 } a *= math.Gamma(float64(n)*alpha+1) / math.Gamma(float64(n)+1) a *= math.Pow(1+zeta*zeta, 0.5*float64(n)) a *= math.Sin(float64(n) * (0.5*math.Pi*alpha + math.Atan(zeta))) delta := a * math.Pow(x, -alpha*float64(n)) sum += delta if math.Abs(delta) < eps { done = true } if n >= pdf.limit { done = true err = fmt.Errorf("Iteration limit in tail approximation exceeded (%d)", pdf.limit) } n++ } sum /= x * math.Pi return sum, err }
func (b *Beta) CalcPDF(x float64) (float64, error) { if x <= 0.0 || x >= 1.0 { return 0, &RangeError{ Offender: x, Min: 0.0, Max: 1.0, } } if b.A <= 0.0 { return 0.0, &RangeError{ ValueName: "Alpha", Offender: b.A, Min: 0, Max: math.MaxFloat64, } } if b.B <= 0.0 { return 0.0, &RangeError{ ValueName: "Beta", Offender: b.B, Min: 0.0, Max: math.MaxFloat64, } } p1 := math.Gamma(b.A+b.B) / (math.Gamma(b.A) + math.Gamma(b.B)) p2 := math.Pow(x, b.A-1.0) * math.Pow(1.0-x, b.B-1.0) return p1 * p2, nil }
func Test_2dinteg02(tst *testing.T) { //verbose() chk.PrintTitle("2dinteg02. bidimensional integral") // Γ(1/4, 1) gamma_1div4_1 := 0.2462555291934987088744974330686081384629028737277219 x := utl.LinSpace(0, 1, 11) y := utl.LinSpace(0, 1, 11) m, n := len(x), len(y) f := la.MatAlloc(m, n) for i := 0; i < m; i++ { for j := 0; j < n; j++ { f[i][j] = 8.0 * math.Exp(-math.Pow(x[i], 2)-math.Pow(y[j], 4)) } } dx, dy := x[1]-x[0], y[1]-y[0] Vt := Trapz2D(dx, dy, f) Vs := Simps2D(dx, dy, f) Vc := math.Sqrt(math.Pi) * math.Erf(1) * (math.Gamma(1.0/4.0) - gamma_1div4_1) io.Pforan("Vt = %v\n", Vt) io.Pforan("Vs = %v\n", Vs) io.Pfgreen("Vc = %v\n", Vc) chk.Scalar(tst, "Vt", 0.0114830435645548, Vt, Vc) chk.Scalar(tst, "Vs", 1e-4, Vs, Vc) }
// Upper incomplete gamma. func ugamma(x, s float64, regularized bool) float64 { if x <= 1.1 || x <= s { if regularized { return 1 - lgamma(x, s, regularized) } return math.Gamma(s) - lgamma(x, s, regularized) } f := 1.0 + x - s C := f D := 0.0 var a, b, chg float64 for i := 1; i < 10000; i++ { a = float64(i) * (s - float64(i)) b = float64(i<<1) + 1.0 + x - s D = b + a*D C = b + a/C D = 1.0 / D chg = C * D f *= chg if math.Abs(chg-1) < eps { break } } if regularized { logg, _ := math.Lgamma(s) return math.Exp(s*math.Log(x) - x - logg - math.Log(f)) } return math.Exp(s*math.Log(x) - x - math.Log(f)) }
// Chisquare returns the p-value of Pr(X^2 > cv). // Compare this value to the significance level assumed. If chisquare < sigval, then we cannot // accept the null hypothesis and thus the two variables are dependent. // // Thanks to Jacob F. W. for a tutorial on chi-square distributions. // Source: http://www.codeproject.com/Articles/432194/How-to-Calculate-the-Chi-Squared-P-Value func Chisquare(df int, cv float64) float64 { //fmt.Println("Running chi-square...") if cv < 0 || df < 1 { return 0.0 } k := float64(df) / 2.0 x := cv / 2.0 //if df == 1 { //return math.Exp(-x/2.0) / (math.Sqrt2 * math.SqrtPi * math.Sqrt(x)) //return (math.Pow(x, (k/2.0)-1.0) * math.Exp(-x/2.0)) / (math.Pow(2, k/2.0) * math.Gamma(k/2.0)) //return lgamma(k/2.0, x/2.0, false) / math.Gamma(k/2.0) //} else if df == 2 { if df == 2 { return math.Exp(-x) } //fmt.Println("Computing incomplete lower gamma function...") pval := lgamma(x, k, false) if math.IsNaN(pval) || math.IsInf(pval, 0) || pval <= 1e-8 { return 1e-14 } //fmt.Println("Computing gamma function...") pval /= math.Gamma(k) //fmt.Println("Returning chi-square value...") return 1.0 - pval }
// Lower incomplete gamma. func lgamma(x, s float64, regularized bool) float64 { if x == 0 { return 0 } if x < 0 || s <= 0 { return math.NaN() } if x > 1.1 && x > s { if regularized { return 1.0 - ugamma(x, s, regularized) } return math.Gamma(s) - ugamma(x, s, regularized) } var ft float64 r := s c := 1.0 pws := 1.0 if regularized { logg, _ := math.Lgamma(s) ft = s*math.Log(x) - x - logg } else { ft = s*math.Log(x) - x } ft = math.Exp(ft) for c/pws > eps { r++ c *= x / r pws += c } return pws * ft / s }
// check that the integration function works func TestIntegrateMid(t *testing.T) { tests := []struct { fn smoothFn x1, x2 float64 Tot float64 }{ // linear {func(x float64) float64 { return 0.5 * x }, 0.0, 1.0, 0.25}, // normal distribution {func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -100, 100, 1.0}, // normal distribution half {func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -100, 0, 0.5}, // normal distribution segment {func(x float64) float64 { return 1 / math.Sqrt(2*math.Pi) * math.Exp(-(x*x)/2) }, -2, -1, .1359051219835}, // scaled gamma distribution (similar to my dissertation experiment 3) {func(x float64) float64 { k, theta, a := 1.5, 2.0, 1.0/600 return a / (math.Gamma(k) * math.Pow(theta, k)) * math.Sqrt(x*a) * math.Exp(-x*a/2) }, 0, 2400, 0.73853606463}, } for i, test := range tests { got := integrateMid(test.fn, test.x1, test.x2, 10000) if diff := math.Abs(got - test.Tot); diff > 1e-10 { t.Errorf("case %v (integral from %v to %v): got %v, want %v", i+1, test.x1, test.x2, got, test.Tot) } } }
func main() { for true { r := bufio.NewReader(os.Stdin) s, err := r.ReadString('\n') if err == os.EOF { break } s = strings.TrimRight(s, "\n") a := strings.Split(s, " ") f := a[0] x, err := strconv.Atof64(a[1]) switch f { case "erf": fmt.Println(math.Erf(x)) case "expm1": fmt.Println(math.Expm1(x)) case "phi": fmt.Println(phi.Phi(x)) case "NormalCDFInverse": fmt.Println(normal_cdf_inverse.NormalCDFInverse(x)) case "Gamma": fmt.Println(math.Gamma(x)) case "LogGamma": r, _ := math.Lgamma(x) fmt.Println(r) case "LogFactorial": fmt.Println(log_factorial.LogFactorial(int(x))) default: fmt.Println("Unknown function: " + f) return } } }
func lgammafn(x float64) float64 { /* For IEEE double precision DBL_EPSILON = 2^-52 = 2.220446049250313e-16 : xmax = DBL_MAX / log(DBL_MAX) = 2^1024 / (1024 * log(2)) = 2^1014 / log(2) dxrel = sqrt(DBL_EPSILON) = 2^-26 = 5^26 * 1e-26 (is *exact* below !) */ const ( xmax = 2.5327372760800758e+305 dxrel = 1.490116119384765696e-8 ) if isNaN(x) { return x } if x <= 0 && x == trunc(x) { /* Negative integer argument */ return posInf /* +Inf, since lgamma(x) = log|gamma(x)| */ } y := abs(x) if y < 1e-306 { // denormalized range return -log(x) } if y <= 10 { return log(abs(math.Gamma(x))) } // ELSE y = |x| > 10 if y > xmax { return posInf } if x > 0 { /* i.e. y = x > 10 */ if x > 1e17 { return (x * (log(x) - 1)) } else if x > 4934720. { return (lnSqrt2π + (x-0.5)*log(x) - x) } else { return lnSqrt2π + (x-0.5)*log(x) - x + lgammacor(x) } } /* else: x < -10; y = -x */ sinpiy := abs(sin(π * y)) if sinpiy == 0 { // Negative integer argument // Now UNNECESSARY: caught above, should NEVER happen! return nan } ans := lnSqrtπd2 + (x-0.5)*log(y) - x - log(sinpiy) - lgammacor(y) if abs((x-trunc(x-0.5))*ans/x) < dxrel { panic("precision") } return ans }
func main() { fmt.Println("Running...") start := time.Now() A2 = make([]float64, 2) A3 = make([]float64, 3) A1 = 1 A2[0] = 1.0 / (3.0 * LaguerreD(2, z2[0]) * Laguerre(3, z2[0])) A2[1] = 1.0 / (3.0 * LaguerreD(2, z2[1]) * Laguerre(3, z2[1])) A3[0] = 1.0 / (4.0 * LaguerreD(3, z3[0]) * Laguerre(4, z3[0])) A3[1] = 1.0 / (4.0 * LaguerreD(3, z3[1]) * Laguerre(4, z3[1])) A3[2] = 1.0 / (4.0 * LaguerreD(3, z3[2]) * Laguerre(4, z3[2])) pt = make(plotter.XYs, nPlot) x = make([]float64, nPlot) dx := (xmax - xmin) / float64(nPlot-1) for i := range x { x[i] = dx*float64(i) + xmin pt[i].X = x[i] pt[i].Y = (math.Gamma(x[i])) } var p1, p2, p3 plotter.XYs p1 = make(plotter.XYs, nPlot) p2 = make(plotter.XYs, nPlot) p3 = make(plotter.XYs, nPlot) for i := range x { p1[i].X = x[i] p2[i].X = x[i] p3[i].X = x[i] p1[i].Y = (A1 * math.Pow(z1, x[i]-1)) p2[i].Y = (A2[0]*math.Pow(z2[0], x[i]-1) + A2[1]*math.Pow(z2[1], x[i]-1)) p3[i].Y = (A3[0]*math.Pow(z3[0], x[i]-1) + A3[1]*math.Pow(z3[1], x[i]-1) + A3[2]*math.Pow(z3[2], x[i]-1)) } p, err := plot.New() if err != nil { panic(err) } p.Title.Text = fmt.Sprintf("Gamma Function Approximations") p.Y.Label.Text = "Log(y)" p.X.Label.Text = "x" plotutil.AddLinePoints(p, "Log(Gamma)", pt, "m=0", p1, "m=1", p2, "m=2", p3) // Save the plot to a PNG file. if err := p.Save(6, 4, "gammaLow.png"); err != nil { panic(err) } fmt.Println(time.Since(start)) fmt.Println("...program terminated successfully!") }
func chiSquaredPdf(k float64, x float64) float64 { if x < 0 { return 0 } top := math.Pow(x, (k/2)-1) * math.Exp(-x/2) bottom := math.Pow(2, k/2) * math.Gamma(k/2) return top / bottom }
// Probability density function func Gamma_PDF(k float64, θ float64) func(x float64) float64 { return func(x float64) float64 { if x < 0 { return 0 } return math.Pow(x, k-1) * math.Exp(-x/θ) / (math.Gamma(k) * math.Pow(θ, k)) } }
// LnBeta returns the value of the log beta function. Translation of the Fortran code by W. Fullerton of Los Alamos Scientific Laboratory. func LnBeta(a, b float64) float64 { var corr float64 if isNaN(a) || isNaN(b) { return a + b } q := a p := q if b < p { p = b } if b > q { q = b } /* both arguments must be >= 0 */ if p < 0 { return nan } else if p == 0 { return posInf } else if isInf(q, 0) { /* q == +Inf */ return negInf } if p >= 10 { /* p and q are big. */ corr = lgammacor(p) + lgammacor(q) - lgammacor(p+q) return log(q)*-0.5 + lnSqrt2π + corr + (p-0.5)*log(p/(p+q)) + q*log1p(-p/(p+q)) } else if q >= 10 { /* p is small, but q is big. */ corr = lgammacor(q) - lgammacor(p+q) return lgammafn(p) + corr + p - p*log(p+q) + (q-0.5)*log1p(-p/(p+q)) } /* p and q are small: p <= q < 10. */ if p < 1e-306 { return LnΓ(p) + (LnΓ(q) - LnΓ(p+q)) } return log(math.Gamma(p) * (math.Gamma(q) / math.Gamma(p+q))) }
func gammaIncQ(a, x float64) float64 { aa1 := a - 1 var f ifctn = func(t float64) float64 { return math.Pow(t, aa1) * math.Exp(-t) } y := aa1 h := 1.5e-2 for f(y)*(x-y) > 2e-8 && y < x { y += .4 } if y > x { y = x } return 1 - simpson38(f, 0, y, int(y/h/math.Gamma(a))) }
// this was used in my dissertation to generate equi-probable sample points // for my disruption probability distribution. func testSamplePoints(t *testing.T) { fn := func(x float64) float64 { k, theta, a := 1.5, 2.0, 1.0/600 return a / (math.Gamma(k) * math.Pow(theta, k)) * math.Sqrt(x*a) * math.Exp(-x*a/2) } x1, x2 := 0.0, 2400.0 xs := sampleUniformProb(fn, x1, x2, 10, 10000) fmt.Println(xs) fmt.Printf("x1-x0 = %v\n", xs[0]) for i, x := range xs[:len(xs)-1] { fmt.Printf("x%v-x%v = %v\n", i+2, i+1, xs[i+1]-x) } }
func (self *JSDivFingerprint) calcSignificance(other *JSDivFingerprint) float64 { p := self.histogram q := self.histogram n := len(p) m := make([]float64, n) for i := range p { m[i] = 0.5 * (p[i] + q[i]) } v := 0.5 * float64(n-1) D := calcS(m) - (0.5*calcS(p) + 0.5*calcS(q)) inc := apporxIncompleteGamma(v, float64(n)*ln2*D) gamma := math.Gamma(v) return inc / gamma }
func LowerGamma(a, x float64) (z float64) { // x**a Γ(a) e**-x Σ{k=0..∞}x**k/Γ(a+k+1) const ε = 1e-20 if x == 0 { return 0 // γ(a, x) is an integral from 0 to x } d := math.Gamma(a) m := math.Pow(x, a) * d * math.Exp(-x) if m == 0 { // overflow return d // lim{x→∞}γ(a, x) = Γ(a) } s := 1 / (d * a) // x**0 / Γ(a+0+1) z = s for k := a + 1.0; s > ε; k += 1.0 { s *= x / k z += s } return m * z }
// Survival Parity applies Survival to the parity of the random values. func SurvivalParity(r rand.Source) float64 { consec := 0 var parity int64 = -1 // prevent the first iteration from matching counts := map[int]int{0: -1} for i := 0; i < survivalParityN; i++ { // software parity because I do not enjoy setting up asm to be used // http://www-graphics.stanford.edu/~seander/bithacks.html#ParityMultiply x := r.Int63() x ^= x >> 1 x ^= x >> 2 x = (x & 0x1111111111111111) * 0x1111111111111111 x = x >> 60 & 1 // parity complete if x == parity { consec++ } else { parity = x counts[consec] = counts[consec] + 1 consec = 0 } } // copypasta var maximum int for i := range counts { if i > maximum { maximum = i } } // If the source is truly random, then there should be half as many hits // for counts[n] as there were for counts[n-1], with counts[0] being the // maximum. //TODO: E should be calculated from the median. This is causing crc64-ecma to NaN. E := float64(counts[0]) var chi2 float64 for i := 1; i < maximum; i++ { E /= 2 d := float64(counts[i]) - E chi2 += d * d / E } k_2 := float64(maximum-2) / 2 return 1 - LowerGamma(k_2, chi2/2)/math.Gamma(k_2) }
// The lagged survival test applies the survival test to an RNG, skipping N // iterates between each sample. func LaggedSurvival(r rand.Source, N int) float64 { // hax teh copypasta //TODO: prevent first iteration from matching consec := [63]int{} bits := [63]bool{} counts := make(map[int]int) for i := 0; i < laggedSurvN; i++ { x := r.Int63() for b := 0; b < 63; b++ { if ((x & (1 << uint(b))) != 0) == bits[b] { // bit survived consec[b]++ } else { // bit changed bits[b] = !bits[b] counts[consec[b]] = counts[consec[b]] + 1 consec[b] = 0 } } for n := 0; n < N; n++ { r.Int63() } } maximum := len(counts) for i := range counts { if i > maximum { maximum = i } } // If the source is truly random, then there should be half as many hits // for counts[n] as there were for counts[n-1], with counts[0] being the // maximum. //TODO: E should be calculated from the median E := float64(counts[0]) var chi2 float64 for i := 1; i < maximum; i++ { E /= 2 d := float64(counts[i]) - E chi2 += d * d / E } k_2 := float64(maximum-2) / 2 return 1 - LowerGamma(k_2, chi2/2)/math.Gamma(k_2) }
func (self *JSDivFingerprint) calcSignificance(other *JSDivFingerprint) float64 { p := self.histogram q := other.histogram m := make(histogram, len(p)+len(q)) min := self.minIndex max := self.maxIndex for i := range p { if i < min { min = i } if i > max { max = i } m[i] = 0.5 * p[i] } for i := range q { if i < min { min = i } if i > max { max = i } m[i] += 0.5 * q[i] } k := max - min v := 0.5 * float64(k-1) D := calcS(m) - (0.5*calcS(p) + 0.5*calcS(q)) inc := apporxIncompleteGamma(v, float64(self.count+other.count)*ln2*D) gamma := math.Gamma(v) return inc / gamma }
func Test_frechet_03(tst *testing.T) { //verbose() chk.PrintTitle("dist_frechet_03") μ := 10.0 σ := 5.0 δ := σ / μ d := 1.0 + δ*δ io.Pforan("μ=%v σ=%v δ=%v d=%v\n", μ, σ, δ, d) if chk.Verbose { plt.AxHline(d, "color='k'") FrechetPlotCoef("/tmp/gosl", "fig_frechet_coef.eps", 3.0, 5.0) } k := 0.2441618 α := 1.0 / k l := μ - math.Gamma(1.0-k) io.Pfpink("l=%v α=%v\n", l, α) l = 8.782275 α = 4.095645 var dist DistFrechet dist.Init(&VarData{L: l, A: α}) io.Pforan("dist = %+#v\n", dist) io.Pforan("mean = %v\n", dist.Mean()) io.Pforan("var = %v\n", dist.Variance()) io.Pforan("σ = %v\n", math.Sqrt(dist.Variance())) if chk.Verbose { plot_frechet(l, 1, α, 8, 16) plt.SaveD("/tmp/gosl", "rnd_dist_frechet_03.eps") } }
// Variance returns the variance func (o DistFrechet) Variance() float64 { if o.A > 2.0 { return o.C * o.C * (math.Gamma(1.0-2.0/o.A) - math.Pow(math.Gamma(1.0-1.0/o.A), 2.0)) } return math.Inf(1) }
// float32 version of math.Gamma func Gamma(x float32) float32 { return float32(math.Gamma(float64(x))) }
// gammIPow is a shortcut for computing the gamma function to a power. func (w Weibull) gammaIPow(i, pow float64) float64 { return math.Pow(math.Gamma(1+i/w.K), pow) }
func (pdf *PdfZ) ScaledValue(x, alpha, beta float64) (float64, error) { if closeTo(alpha, 2, pdf.alpha_tol) { // Gaussian case, for appropriately normalised levy distribution return math.Exp(-0.25*x*x) / math.Sqrt(4.0*math.Pi), nil } else if closeTo(alpha, 1, pdf.alpha_tol) && !closeTo(beta, 0, pdf.beta_tol) { // This tends to suffer from small oscillations in the integrated distribution // Need futher integration to sort this out gamma := math.Exp(-0.5 * math.Pi * x / beta) a := -0.5 * math.Pi b := 0.5 * math.Pi p, err := pdf.integrate( func(theta float64) float64 { return componentEq1(theta, beta)*gamma - 1.0 }, func(theta float64) float64 { return integrandEq1(theta, beta, gamma) }, a, b) p *= 0.5 * gamma / math.Abs(beta) return p, err } else if closeTo(alpha, 1, pdf.alpha_tol) && closeTo(beta, 0, pdf.beta_tol) { // Cauchy distribution return 1.0 / ((1.0 + x*x) * math.Pi), nil } else if !closeTo(alpha, 1, pdf.alpha_tol) { zeta := -beta * math.Tan(0.5*math.Pi*alpha) if x == zeta { eps := math.Atan(-zeta) / alpha p := math.Gamma(1+1/alpha) * math.Cos(eps) / (math.Pi * math.Pow(1+zeta*zeta, 0.5/alpha)) return p, nil } else if x > zeta { eps := math.Atan(-zeta) / alpha gamma := math.Pow(x-zeta, alpha/(alpha-1.0)) a := -eps b := 0.5 * math.Pi p, err := pdf.integrate( func(theta float64) float64 { return componentNeq1(theta, alpha, beta, eps)*gamma - 1.0 }, func(theta float64) float64 { return integrandNeq1(theta, alpha, beta, eps, gamma) }, a, b) p *= alpha * math.Pow(x-zeta, 1.0/(alpha-1.0)) / (math.Pi * math.Abs(alpha-1)) return p, err } else if x < zeta { return pdf.ScaledValue(-x, alpha, -beta) } } return 0, nil }
// ExKurtosis returns the excess kurtosis of the distribution. func (w Weibull) ExKurtosis() float64 { return (-6*w.gammaIPow(1, 4) + 12*w.gammaIPow(1, 2)*math.Gamma(1+2/w.K) - 3*w.gammaIPow(2, 2) - 4*math.Gamma(1+1/w.K)*math.Gamma(1+3/w.K) + math.Gamma(1+4/w.K)) / math.Pow(math.Gamma(1+2/w.K)-w.gammaIPow(1, 2), 2) }
// Variance returns the variance of the probability distribution. func (w Weibull) Variance() float64 { return math.Pow(w.Lambda, 2) * (math.Gamma(1+2/w.K) - w.gammaIPow(1, 2)) }
// Mean returns the mean of the probability distribution. func (w Weibull) Mean() float64 { return w.Lambda * math.Gamma(1+1/w.K) }
func main() { fmt.Println(" x math.Gamma Lanczos7") for _, x := range []float64{-.5, .1, .5, 1, 1.5, 2, 3, 10, 140, 170} { fmt.Printf("%5.1f %24.16g %24.16g\n", x, math.Gamma(x), lanczos7(x)) } }
func nchoosek(n int, k int) float64 { return math.Gamma(float64(n+1)) / (math.Gamma(float64(k+1)) * math.Gamma(float64(n-k+1))) }