// NormPostSimNoPrior returns a simulated sample from the joint posterior distribution of the mean and variance for a normal // sampling prior. func NormPostSimNoPrior(data []float64, m int) (postMu, postS2 []float64) { // Arguments: // data - vector of observations // m - number of simulations desired // Returns: // mu - vector of simulated draws of normal mean // sigma2 - vector of simulated draws of normal variance xbar := mean(data) n := len(data) diff2 := make([]float64, n) for i, val := range data { diff2[i] = (val - xbar) * (val - xbar) } s := sum(diff2) postS2 = make([]float64, m) postMu = make([]float64, m) for i, _ := range postMu { postS2[i] = s / dst.ChiSquareNext(int64(n)-1) sd := sqrt(postS2[i]) / sqrt(float64(n)) postMu[i] = dst.NormalNext(xbar, sd) } return }
// Metropolis within Gibbs sampling algorithm of a posterior distribution. func Gibbs(logpost func([]float64) float64, start []float64, m int, scale []float64) (vth [][]float64, arate []float64) { // Arguments: // logpost - function defining the log posterior density // start - array with a single row that gives the starting value of the parameter vector // m - the number of iterations of the chain // scale - vector of scale parameters for the random walk Metropolis steps // Returns: // par - a matrix of simulated values where each row corresponds to a value of the vector parameter // accept - vector of acceptance rates of the Metropolis steps of the algorithm p := len(start) //vth=array(0,dim=c(m,p)) // make vth matrix nCol := p s := make([]float64, m*nCol) vth = make([][]float64, m) for i, p := 0, 0; i < m; i++ { vth[i] = s[p : p+nCol] p += nCol } f0 := logpost(start) //arate=array(0,dim=c(1,p)) // make arate vector arate = make([]float64, p) th0 := make([]float64, p) for i, val := range start { th0[i] = val } for i := 0; i < m; i++ { for j := 0; j < p; j++ { th1 := make([]float64, p) for k, val := range th0 { th1[k] = val } th1[j] = th0[j] + dst.NormalNext(0, 1)*scale[j] f1 := logpost(th1) // u=runif(1)<exp(f1-f0) // th0[j]=th1[j]*(u==1)+th0[j]*(u==0) // f0=f1*(u==1)+f0*(u==0) if rand.Float64() < exp(f1-f0) { th0[j] = th1[j] f0 = f1 arate[j] += 1 } vth[i][j] = th0[j] } } for i, _ := range arate { arate[i] /= float64(m) } return }
// NormPostSim returns a simulated sample from the joint posterior distribution of the mean and variance for a normal // sampling prior with a noninformative or informative prior. The prior assumes mu and sigma2 are // independent with mu assigned a normal prior with mean mu0 and variance tau2, and sigma2 is // assigned a inverse gamma prior with parameters a and b. func NormPostSim(data []float64, a, b, mu0, tau2 float64, m int) (postMu, postS2 []float64) { // Arguments: // data - vector of observations // prior params: // a // b // mu0 // tau2 // m - number of simulations desired // Returns: // mu - vector of simulated draws of normal mean // sigma2 - vector of simulated draws of normal variance xbar := mean(data) n := len(data) diff2 := make([]float64, n) for i, val := range data { diff2[i] = (val - xbar) * (val - xbar) } s := sum(diff2) postS2 = make([]float64, m) postMu = make([]float64, m) sigma2 := s / float64(n) for j := 0; j < m; j++ { prec := float64(n)/sigma2 + 1/tau2 mu1 := (xbar*float64(n)/sigma2 + mu0/tau2) / prec v1 := 1 / prec // mu=rnorm(1,mu1,sqrt(v1)) mu := dst.NormalNext(mu1, sqrt(v1)) a1 := a + float64(n)/2 d2 := make([]float64, n) for i, val := range data { d2[i] = (val - mu) * (val - mu) } b1 := b + sum(d2)/2 sigma2 := rigamma(a1, b1) postS2[j] = sigma2 postMu[j] = mu } return }
// Test against R:moments func TestSampleSkewness(t *testing.T) { fmt.Println("Testing Skewness") m := 10000000 mu := 0.0 sd := 1.0 d := make([]float64, m) for i, _ := range d { d[i] = dst.NormalNext(mu, sd) } x := Skew(d) y := 0.0 if abs(x-y) > 1e-3 { fmt.Println("failed: x, y ", x, y) t.Error() } }
func TestSampleGeary(t *testing.T) { fmt.Println("Testing Geary kurtosis") m := 1000000 mu := 0.0 sd := 1.0 d := make([]float64, m) for i, _ := range d { d[i] = dst.NormalNext(mu, sd) } x := Geary(d) y := 0.7979113 if abs(x-y) > 1e-2 { fmt.Println("failed: x, y ", x, y) t.Error() } }
func TestSampleMoments(t *testing.T) { fmt.Println("Testing Moments") m := 10000000 mu := 0.0 sd := 1.0 d := make([]float64, m) for i, _ := range d { d[i] = dst.NormalNext(mu, sd) } order := 4 central := false absolute := false x := moment(d, order, central, absolute) y := 3.0 if abs(x-y) > 1e-2 { fmt.Println("failed: x, y ", x, y) t.Error() } }
// NormPostNoPriorNext returns a sampled tuple from the joint posterior distribution of the mean and variance for a normal // sampling prior. func NormPostNoPriorNext(data []float64) (postMu, postS2 float64) { // Arguments: // data - vector of observations // Returns: // postMu - simulated draw of normal mean // postS2 - simulated draw of normal variance xbar := mean(data) n := len(data) diff2 := make([]float64, n) for i, val := range data { diff2[i] = (val - xbar) * (val - xbar) } s := sum(diff2) postS2 = s / dst.ChiSquareNext(int64(n)-1) sd := sqrt(postS2) / sqrt(float64(n)) postMu = dst.NormalNext(xbar, sd) return }
// NormPostInfPriorNext returns a simulated tuple from the joint posterior distribution of the mean and variance for a normal // sampling prior with a noninformative or informative prior. The prior assumes mu and sigma2 are // independent with mu assigned a normal prior with mean mu0 and variance tau2, and sigma2 is // assigned a inverse gamma prior with parameters a and b. func NormPostInfPriorNext(data []float64, a, b, mu0, tau2 float64) (postMu, postS2 float64) { // Arguments: // data - vector of observations // prior params: // a // b // mu0 // tau2 // Returns: // postMu - simulated draw of normal mean // postS2 - simulated draw of normal variance xbar := mean(data) n := len(data) diff2 := make([]float64, n) for i, val := range data { diff2[i] = (val - xbar) * (val - xbar) } s := sum(diff2) postS2 = s / float64(n) prec := float64(n)/postS2 + 1/tau2 mu1 := (xbar*float64(n)/postS2 + mu0/tau2) / prec v1 := 1 / prec postMu = dst.NormalNext(mu1, sqrt(v1)) a1 := a + float64(n)/2 d2 := make([]float64, n) for i, val := range data { d2[i] = (val - postMu) * (val - postMu) } b1 := b + sum(d2)/2 postS2 = rigamma(a1, b1) return }
// Test against R:LearningBayes:bfindep() func TestFactCTableIndep(t *testing.T) { y := [][]float64{ {1, 10}, {5, 20}, } k := 20.0 m := 1000000 fmt.Println("Testing ldirichlet") c5 := ldirichlet(y) fmt.Println("ldirichlet: ", c5) fmt.Println("should be: -2.302585 -12.266791") fmt.Println("Testing meanSd") nrm := make([]float64, m) for i, _ := range nrm { nrm[i] = dst.NormalNext(5, 0.6) } mu, sd := meanSd(nrm) fmt.Println("mu, sd :", mu, sd) fmt.Println("cols2vec") fmt.Println(cols2vec(y)) fmt.Println("should be 1 5 10 20") fmt.Println("Testing FactCTableIndep #1") c1, c2 := FactCTableIndep(y, k, m) c3, c4 := 0.6768298, 0.002508772 if !check(c1, c3) || !check(c2, c4) { t.Error() fmt.Println("FactCTableIndep: bf = ", c1, "should be ", c3, "+/- 0.015") fmt.Println(" nse = ", c2, "should be ", c4, "+/- 0.003") } }