Пример #1
0
// NormMeanTestOneSided does a Bayesian test of the hypothesis that a normal mean is less than or equal to a specified value.
func NormMeanTestOneSided(m0, priMean, priSD, smpMean float64, smpSize int, popSd float64) (bf, priOdds, postOdds, postH float64) {
	//
	// Arguments
	// m0 - value of the normal mean to be tested
	// priMean - mean of the normal prior distribution
	// priSD -  standard deviation of the normal prior distribution
	// smpMean - sample mean
	// smpSize - sample size
	// popSd - known value of the population standard deviation
	//
	// Returns
	// bf Bayes factor in support of the null hypothesis
	// priOdds prior odds of the null hypothesis
	// postOdds posterior odds of the null hypothesis
	// postH posterior probability of the null hypothesis
	//
	//
	n := float64(smpSize)
	priVar := priSD * priSD
	priH := dst.NormalCDFAt(priMean, priSD, m0)
	priA := 1 - priH
	priOdds = priH / priA

	popVar := popSd * popSd
	postPrecision := 1/priVar + n/popVar
	postVar := 1 / postPrecision
	postSd := sqrt(postVar)
	postMean := (smpMean*n/popVar + priMean/priVar) / postPrecision
	postH = dst.NormalCDFAt(postMean, postSd, m0)
	postA := 1 - postH
	postOdds = postH / postA
	bf = postOdds / priOdds
	return
}
Пример #2
0
// Anscombe performs Anscombe-Glynn test of kurtosis for normally distributed data vector.
func Anscombe(x []float64, alternative int) (kurt, z, pVal float64) {
	// Arguments:
	// x - vector of observations
	// alternative - 0 = "twoSided", 1 = "less", 2 = "greater"
	//
	// Details:
	// Under the hypothesis of normality, data should have kurtosis equal to 3. This test has such null
	// hypothesis and is useful to detect a significant difference of kurtosis in normally distributed data.
	//
	// Returns:
	// kurt - kurtosis estimator
	// z - its transformation
	// pVal - the p-value for the test.

	const (
		twoSided = iota
		less
		greater
	)

	sort.Float64s(x)
	n := float64(len(x))
	dm := diffMean(x)
	d4 := make([]float64, len(dm))
	for i, val := range dm {
		d4[i] = val * val * val * val
	}

	d2 := make([]float64, len(dm))
	for i, val := range dm {
		d2[i] = val * val
	}

	//b <- n*sum( (x-mean(x))^4 )/(sum( (x-mean(x))^2 )^2);
	sum2 := sum(d2)
	kurt = n * sum(d4) / (sum2 * sum2)

	eb2 := 3 * (n - 1) / (n + 1)
	vb2 := 24 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1) * (n + 3) * (n + 5))
	m3 := (6 * (n*n - 5*n + 2) / ((n + 7) * (n + 9))) * sqrt((6*(n+3)*(n+5))/(n*(n-2)*(n-3)))
	a := 6 + (8/m3)*(2/m3+sqrt(1+4/(m3*m3)))
	xx := (kurt - eb2) / sqrt(vb2)
	z0 := (1 - 2/a) / (1 + xx*sqrt(2/(a-4)))
	z = (1 - 2/(9*a) - pow(z0, 1.0/3.0)) / sqrt(2/(9*a))
	pVal = 1 - dst.NormalCDFAt(0, 1, z)

	switch alternative {
	case twoSided:
		pVal = 2 * pVal
		if pVal > 1 {
			pVal = 2 - pVal
		}
	case less: // do nothing
	case greater:
		pVal = 1 - pVal
	}

	return kurt, z, pVal
}
Пример #3
0
// Agostino performs D’Agostino test for skewness in normally distributed data vector.
func Agostino(x []float64, alternative int) (skew, z, pVal float64) {
	// Arguments:
	// x - vector of observations
	// alternative - 0 = "twoSided", 1 = "less", 2 = "greater"
	//
	// Details:
	// Under the hypothesis of normality, data should be symmetrical (i.e. skewness should be equal to
	// zero). This test has such null hypothesis and is useful to detect a significant skewness in normally
	// distributed data.
	//
	// Returns:
	// skew - skewness estimator
	// z - its transformation
	// pVal - the p-value for the test.

	const (
		twoSided = iota
		less
		greater
	)

	sort.Float64s(x)
	n := float64(len(x))
	dm := diffMean(x)
	d3 := make([]float64, len(dm))
	for i, val := range dm {
		d3[i] = val * val * val
	}

	d2 := make([]float64, len(dm))
	for i, val := range dm {
		d2[i] = val * val
	}

	//skew <- (sum((x-mean(x))^3)/n)/(sum((x-mean(x))^2)/n)^(3/2)

	skew = (sum(d3) / n) / pow((sum(d2)/n), 1.5)
	y := skew * sqrt((n+1)*(n+3)/(6*(n-2)))
	b2 := 3 * (n*n + 27*n - 70) * (n + 1) * (n + 3) / ((n - 2) * (n + 5) * (n + 7) * (n + 9))
	w := sqrt(-1 + sqrt(2*(b2-1)))
	d := 1 / sqrt(log10(w))
	a := sqrt(2 / (w*w - 1))
	z = d * log10(y/a+sqrt((y/a)*(y/a)+1))
	pVal = 1 - dst.NormalCDFAt(0, 1, z)

	switch alternative {
	case twoSided:
		pVal = 2 * pVal
		if pVal > 1 {
			pVal = 2 - pVal
		}
	case less: // do nothing
	case greater:
		pVal = 1 - pVal
	}
	return skew, z, pVal
}
Пример #4
0
// Bonett performs Bonett-Seier test of Geary’s measure of kurtosis for normally distributed data vector.
func Bonett(x []float64, alternative int) (kurt, z, pVal float64) {
	// Arguments:
	// x - vector of observations
	// alternative - 0 = "twoSided", 1 = "less", 2 = "greater"
	//
	// Details:
	// Under the hypothesis of normality, data should have Geary’s kurtosis equal to sqrt(2/pi) (0.7979).
	// This test has such null hypothesis and is useful to detect a significant difference of Geary’s kurtosis
	// in normally distributed data.
	//
	// Returns:
	// kurt - kurtosis estimator
	// z - its transformation
	// pVal - the p-value for the test.

	const (
		twoSided = iota
		less
		greater
	)

	sort.Float64s(x)
	n := float64(len(x))
	dm := diffMean(x)

	d2 := make([]float64, len(dm))
	for i, val := range dm {
		d2[i] = val * val
	}

	adm := make([]float64, len(dm))
	for i, val := range dm {
		adm[i] = abs(val)
	}

	rho := sqrt(sum(d2) / n)
	kurt = sum(adm) / n
	omega := 13.29 * (log(rho) - log(kurt))
	z = sqrt(n+2) * (omega - 3) / 3.54
	pVal = 1 - dst.NormalCDFAt(0, 1, z)

	switch alternative {
	case twoSided:
		pVal = 2 * pVal
		if pVal > 1 {
			pVal = 2 - pVal
		}
	case less: // do nothing
	case greater:
		pVal = 1 - pVal
	}

	return kurt, z, pVal
}