Beispiel #1
0
func (nba *Multinomial) Classify(doc string) (string, error) {
	var bestClass string
	var bestClassLogProbability float64 = -math.MaxFloat64

	for class, classPrior := range nba.classPriors {
		// Get the total class model for the point conditiond on this class
		var logSum float64 = 0
		for _, word := range words(doc) {
			logSum += math.Log(float64(nba.wordCount[class][word]+1) / float64(nba.classSize[class]+nba.vocabularySize))
		}

		// Bayes theorem: P(c|e) = (P(e|c)P(c)) / P(e)
		// We drop P(e) as it is constant
		logProbability := logSum + math.Log(classPrior)

		// Update current best class
		if logProbability > bestClassLogProbability {
			bestClassLogProbability = logProbability
			bestClass = class
		}
	}

	if bestClassLogProbability == -math.MaxFloat64 {
		return "", NoClassificationError
	}

	return bestClass, nil
}
Beispiel #2
0
// Generate a Mel Scale for sampling frequency _sampfreq_ and return a
// normalized vector of length _vectorlength_ containing equally
// spaced points between 0 and (sampfreq/2)
func MelScaleVector(sampfreq int, vectorlength int) []float64 {
	var i int
	var step float64
	var melscalevector []float64

	step = (float64(sampfreq) / 2.0) / float64(vectorlength)

	melscalevector = make([]float64, vectorlength, vectorlength)

	for i = 0; i < vectorlength; i++ {
		var melscale float64

		// Equations taken from Wikipedia
		f := step * float64(i)
		melscale = (1000.0 / math.Log(2)) * math.Log(1.0+(f/1000.0))

		melscalevector[i] = melscale
	}

	// Normalize the Vector. Values are already positive, and
	// monotonically increasing, so divide by max
	max := melscalevector[vectorlength-1]
	for i = 0; i < vectorlength; i++ {
		melscalevector[i] /= max
	}

	return melscalevector
}
Beispiel #3
0
func (nba *Gaussian) Classify(point Point) (string, error) {
	if len(point) != nba.dimensionality {
		return "", WrongDimensionError
	}

	var bestClass string
	var bestClassLogProbability float64 = -math.MaxFloat64

	for class, classPrior := range nba.classPriors {

		// Get the total class model for the point conditiond on this class
		var logSum float64 = 0
		for i, prior := range nba.classModel[class] {
			logSum += math.Log(prior.Likelihood(point[i]))
		}

		// Bayes theorem: P(c|e) = (P(e|c)P(c)) / P(e)
		// We drop P(e) as it is constant
		logProbability := logSum + math.Log(classPrior)

		// Update current best class
		if logProbability > bestClassLogProbability {
			bestClassLogProbability = logProbability
			bestClass = class
		}
	}

	if bestClassLogProbability == -math.MaxFloat64 {
		return "", NoClassificationError
	}

	return bestClass, nil
}
Beispiel #4
0
func estimates64(n uint64, p float64) (uint64, uint64) {
	nf := float64(n)
	log2 := math.Log(2)
	m := -1 * nf * math.Log(p) / math.Pow(log2, 2)
	k := math.Ceil(log2 * m / nf)
	return uint64(m), uint64(k)
}
Beispiel #5
0
// LogarithmicRegression returns an logarithmic regression on data series
func LogarithmicRegression(s Series) (regressions Series, err error) {

	if len(s) == 0 {
		return nil, errors.New("Input must not be empty")
	}

	var sum [4]float64

	i := 0
	for ; i < len(s); i++ {
		sum[0] += math.Log(s[i].X)
		sum[1] += s[i].Y * math.Log(s[i].X)
		sum[2] += s[i].Y
		sum[3] += math.Pow(math.Log(s[i].X), 2)
	}

	f := float64(i)
	a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0])
	b := (sum[2] - a*sum[0]) / f

	for j := 0; j < len(s); j++ {
		regressions = append(regressions, Coordinate{
			X: s[j].X,
			Y: b + a*math.Log(s[j].X),
		})
	}

	return regressions, nil

}
Beispiel #6
0
func scalarMercatorProject(lng, lat float64, level uint64) (x, y uint64) {
	var factor uint64

	factor = 1 << level
	maxtiles := float64(factor)

	lng = lng/360.0 + 0.5
	x = uint64(lng * maxtiles)

	// bound it because we have a top of the world problem
	siny := math.Sin(lat * math.Pi / 180.0)

	if siny < -0.9999 {
		lat = 0.5 + 0.5*math.Log((1.0+siny)/(1.0-siny))/(-2*math.Pi)
		y = 0
	} else if siny > 0.9999 {
		lat = 0.5 + 0.5*math.Log((1.0+siny)/(1.0-siny))/(-2*math.Pi)
		y = factor - 1
	} else {
		lat = 0.5 + 0.5*math.Log((1.0+siny)/(1.0-siny))/(-2*math.Pi)
		y = uint64(lat * maxtiles)
	}

	return
}
Beispiel #7
0
// NormFloat64 returns a normally distributed float64 in the range
// [-math.MaxFloat64, +math.MaxFloat64] with
// standard normal distribution (mean = 0, stddev = 1).
// To produce a different normal distribution, callers can
// adjust the output using:
//
//  sample = NormFloat64() * desiredStdDev + desiredMean
//
func (r *Rand) NormFloat64() float64 {
	for {
		j := int32(r.Uint32()) // Possibly negative
		i := j & 0x7F
		x := float64(j) * float64(wn[i])
		if absInt32(j) < kn[i] {
			// This case should be hit better than 99% of the time.
			return x
		}

		if i == 0 {
			// This extra work is only required for the base strip.
			for {
				x = -math.Log(r.Float64()) * (1.0 / rn)
				y := -math.Log(r.Float64())
				if y+y >= x*x {
					break
				}
			}
			if j > 0 {
				return rn + x
			}
			return -rn - x
		}
		if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
			return x
		}
	}
}
Beispiel #8
0
// HarmonicMean returns the weighted harmonic mean of the dataset
//  \sum_i {w_i} / ( sum_i {w_i / x_i} )
// This only applies with positive x and positive weights.
// If weights is nil then all of the weights are 1. If weights is not nil, then
// len(x) must equal len(weights).
func HarmonicMean(x, weights []float64) float64 {
	if weights != nil && len(x) != len(weights) {
		panic("stat: slice length mismatch")
	}
	// TODO: Fix this to make it more efficient and avoid allocation

	// This can be numerically unstable (for example if x is very small)
	// W = \sum_i {w_i}
	// hm = exp(log(W) - log(\sum_i w_i / x_i))

	logs := make([]float64, len(x))
	var W float64
	for i := range x {
		if weights == nil {
			logs[i] = -math.Log(x[i])
			W++
			continue
		}
		logs[i] = math.Log(weights[i]) - math.Log(x[i])
		W += weights[i]
	}

	// Sum all of the logs
	v := floats.LogSumExp(logs) // this computes log(\sum_i { w_i / x_i})
	return math.Exp(math.Log(W) - v)
}
Beispiel #9
0
func (GulfResearchAndDevelopment) Grad(grad, x []float64) {
	if len(x) != 3 {
		panic("dimension of the problem must be 3")
	}
	if len(x) != len(grad) {
		panic("incorrect size of the gradient")
	}

	for i := range grad {
		grad[i] = 0
	}
	for i := 1; i <= 99; i++ {
		arg := float64(i) / 100
		r := math.Pow(-50*math.Log(arg), 2.0/3.0) + 25 - x[1]
		t1 := math.Pow(math.Abs(r), x[2]) / x[0]
		t2 := math.Exp(-t1)
		t := t2 - arg
		s1 := t1 * t2 * t
		grad[0] += s1
		grad[1] += s1 / r
		grad[2] -= s1 * math.Log(math.Abs(r))
	}
	grad[0] *= 2 / x[0]
	grad[1] *= 2 * x[2]
	grad[2] *= 2
}
Beispiel #10
0
func TestLogSquared(t *testing.T) {
	prediction := []float64{1, -2, 3}
	truth := []float64{1.1, -2.2, 2.7}
	trueloss := (math.Log(.1*.1+1) + math.Log(.2*.2+1) + math.Log(.3*.3+1)) / 3
	derivative := []float64{0, 0, 0}

	sq := LogSquared{}
	loss := sq.Loss(prediction, truth)
	if math.Abs(loss-trueloss) > TOL {
		t.Errorf("loss doesn't match from Loss(). Expected %v, Found: %v", trueloss, loss)
	}

	loss = sq.LossDeriv(prediction, truth, derivative)
	if math.Abs(loss-trueloss) > TOL {
		t.Errorf("loss doesn't match from LossDeriv()")
	}
	derivative, fdDerivative := finiteDifferenceLosser(sq, prediction, truth)
	if !floats.EqualApprox(derivative, fdDerivative, FDTol) {
		t.Errorf("Derivative doesn't match. \n deriv: %v \n fdDeriv: %v ", derivative, fdDerivative)
	}
	err := common.InterfaceTestMarshalAndUnmarshal(sq)
	if err != nil {
		t.Errorf("Error marshaling and unmarshaling")
	}
}
Beispiel #11
0
// Primes is using Segmented sieve. This method will reduce memory usae of Sieve of Eratosthenes considerably.
// besides memory allocation for Prime numbers slice, there is only O(sqrt(n)) extra memory required for the operation
// You can learn more about it in https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes.
func Primes(n uint64) (allPrimes []uint64) {
	if uint64(math.Log(float64(n))-1) == 0 {
		return SieveOfEratosthenes(n)
	}

	// There is a function pi(x) in math that will returns approximate number of prime numbers below n.
	allPrimes = make([]uint64, 0, n/uint64(math.Log(float64(n))-1))
	segSize := uint64(math.Sqrt(float64(n)))

	csegPool.New = func() interface{} {
		return make([]bool, segSize)
	}

	basePrimes := SieveOfEratosthenes(segSize)
	allPrimes = append(allPrimes, basePrimes...)

	cores := runtime.NumCPU()
	next := make(chan bool, cores)
	var nextTurn []chan bool
	nextTurn = make([]chan bool, n/segSize+1)
	for i := uint64(0); i < n/segSize+1; i++ {
		nextTurn[i] = make(chan bool)
	}
	for segNum := uint64(1); segNum <= n/segSize; segNum++ {
		go fillSegments(n, basePrimes, &allPrimes, segSize, segNum, next, nextTurn)
		next <- true
	}
	for i := 0; i < cores; i++ {
		next <- true
	}

	return allPrimes
}
Beispiel #12
0
// Predict takes in a document, predicts the
// class of the document based on the training
// data passed so far, and returns the class
// estimated for the document.
func (b *NaiveBayes) Predict(sentence string) uint8 {
	sums := make([]float64, len(b.Count))

	sentence, _, _ = transform.String(b.sanitize, sentence)
	words := b.Tokenizer.Tokenize(sentence)
	for _, word := range words {
		w, ok := b.Words.Get(word)
		if !ok {
			continue
		}

		for i := range sums {
			sums[i] += math.Log(float64(w.Count[i]+1) / float64(w.Seen+b.DictCount))
		}
	}

	for i := range sums {
		sums[i] += math.Log(b.Probabilities[i])
	}

	// find best class
	var maxI int
	for i := range sums {
		if sums[i] > sums[maxI] {
			maxI = i
		}
	}

	return uint8(maxI)
}
Beispiel #13
0
// Predict takes in a document, predicts the
// class of the document based on the training
// data passed so far, and returns the class
// estimated for the document.
func (b *NaiveBayes) Predict(sentence string) uint8 {
	sums := make([]float64, len(b.Count))

	sentence, _, _ = transform.String(b.sanitize, sentence)
	w := strings.Split(strings.ToLower(sentence), " ")
	for _, word := range w {
		if _, ok := b.Words[word]; !ok {
			continue
		}

		for i := range sums {
			sums[i] += math.Log(float64(b.Words[word].Count[i]+1) / float64(b.Words[word].Seen+b.DictCount))
		}
	}

	for i := range sums {
		sums[i] += math.Log(b.Probabilities[i])
	}

	// find best class
	var maxI int
	for i := range sums {
		if sums[i] > sums[maxI] {
			maxI = i
		}
	}

	return uint8(maxI)
}
// mandelbrotColor computes a Mandelbrot value and then assigns a color from the
// color table.
func mandelbrotColor(c complex128, zoom int) color.RGBA {
	// Scale so we can fit the entire set in one tile when zoomed out.
	c = c*3.5 - complex(2.5, 1.75)

	z := complex(0, 0)
	iter := 0
	for ; iter < iterations; iter++ {
		z = z*z + c
		r, i := real(z), imag(z)
		absSquared := r*r + i*i
		if absSquared >= 4 {
			// This is the "Continuous (smooth) coloring" described in Wikipedia:
			// http://en.wikipedia.org/wiki/Mandelbrot_set#Continuous_.28smooth.29_coloring
			v := float64(iter) - math.Log2(math.Log(cmplx.Abs(z))/math.Log(4))

			// We are scaling the value based on the zoom level so things don't get
			// too busy as we get further in.
			v = math.Abs(v) * float64(colorDensity) / math.Max(float64(zoom), 1)
			minValue = math.Min(float64(v), minValue)
			maxValue = math.Max(float64(v), maxValue)
			colorIdx := (int(v) + numColors*zoom/len(colorStops)) % numColors
			return colors[colorIdx]
		}
	}

	return centerColor
}
Beispiel #15
0
func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
	printClientConfig(config)

	// Set running environment like how many cores to use.
	setupClientEnv(config)

	conns, closeConns, err := createConns(config)
	if err != nil {
		return nil, err
	}

	rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
	bc := &benchmarkClient{
		histogramOptions: stats.HistogramOptions{
			NumBuckets:     int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1,
			GrowthFactor:   config.HistogramParams.Resolution,
			BaseBucketSize: (1 + config.HistogramParams.Resolution),
			MinValue:       0,
		},
		lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)),

		stop:          make(chan bool),
		lastResetTime: time.Now(),
		closeConns:    closeConns,
	}

	if err = performRPCs(config, conns, bc); err != nil {
		// Close all connections if performRPCs failed.
		closeConns()
		return nil, err
	}

	return bc, nil
}
Beispiel #16
0
func PlotSpectogram(sr Reader, outpath string, from, to int) error {
	d, err := dft(sr, from, to)
	if err != nil {
		return err
	}

	// make image
	height, width := len(d[0]), len(d)
	image := image.NewGray(image.Rect(0, 0, width, height))

	// find maximum value in DFT
	max := maxAmp(d)
	max = math.Log(1 + max)

	// plot each point on image
	for i, col := range d {
		for j, amp := range col {
			// Log(Xk+1) will give us a positive value
			// Using log here allows low amplitudes to be more visible
			bright := uint8(float64(255) * math.Log(1+amp) / max)
			image.Set(i, height-j, color.RGBA{bright, bright, bright, 255})
		}
	}

	of, err := os.Create(outpath)
	if err != nil {
		return err
	}

	return png.Encode(of, image)
}
Beispiel #17
0
// Returns the offset into the color slices (PixelDiffColor,
// or PixelAlphaDiffColor) based on the delta passed in.
//
// The number passed in is the difference between two colors,
// on a scale from 1 to 1024.
func deltaOffset(n int) int {
	ret := int(math.Ceil(math.Log(float64(n))/math.Log(3) + 0.5))
	if ret < 1 || ret > 7 {
		glog.Fatalf("Input: %d", n)
	}
	return ret - 1
}
Beispiel #18
0
// InitBoundedLog initializes a Histogram instance from the given array
// of values with the given number of bins which fall between the given limits.
// The logarithms of bin centers are uniformly dist. Any
// values outside of these limits are ignored. The returned integer is the
// number of such ignored values. Because of this, infinte and non-positive
// values do not cause a panic.
//
// The first returned value is the initialized Histogram.
//
// InitBoundedLog panics if given a non-positive number of bins or
// a low bound as large or larger than the high bound or if given infinite bounds.
func (hist *Histogram) InitBoundedLog(xs []float64, binNum int, low, high float64) (*Histogram, int) {
	if hist.init {
		panic("stats.Histogram.InitBoundedLog called on initialized struct.")
	} else if binNum < 1 {
		panic(fmt.Sprintf("stats.Histogram.InitBoundedLog given binNum of %d", binNum))
	} else if low >= high || low <= 0 || math.IsInf(low, 0) ||
		math.IsInf(high, 0) || math.IsNaN(low) || math.IsNaN(high) {
		panic(fmt.Sprintf("stats.Histogram.InitBoundedLog given range [%d, %d]", low, high))
	}

	hist.init = true
	hist.Bins = make([]int, binNum)
	hist.BinValues = make([]float64, binNum)
	hist.BinEdges = make([]float64, binNum+1)

	hist.logHistogram = true

	hist.lowLim = math.Log(low)
	hist.highLim = math.Log(high)
	hist.binWidth = (hist.highLim - hist.lowLim) / float64(binNum)

	for i := 0; i < binNum; i++ {
		hist.BinEdges[i] = math.Exp(hist.lowLim + hist.binWidth*float64(i))
		hist.BinValues[i] = math.Exp(hist.lowLim + hist.binWidth*(float64(i)+0.5))
	}

	hist.BinEdges[binNum] = hist.highLim

	return hist, hist.AddArray(xs)
}
Beispiel #19
0
// LogProb computes the natural logarithm of the value of the probability
// density function at x. Zero is returned if x is less than zero.
//
// Special cases occur when x == 0, and the result depends on the shape
// parameter as follows:
//  If 0 < K < 1, LogProb returns +Inf.
//  If K == 1, LogProb returns 0.
//  If K > 1, LogProb returns -Inf.
func (w Weibull) LogProb(x float64) float64 {
	if x < 0 {
		return 0
	} else {
		return math.Log(w.K) - math.Log(w.Lambda) + (w.K-1)*(math.Log(x)-math.Log(w.Lambda)) - math.Pow(x/w.Lambda, w.K)
	}
}
Beispiel #20
0
func genNormal(rng RNG) float64 {
	for {
		x := rng.Uint64()
		j := int32(x)
		i := int(x >> 32 & 255)
		if uint32(j+j>>31^j>>31) < normalK[i] {
			return float64(j) * float64(normalW[i])
		}
		if i != 0 {
			v := float64(j) * float64(normalW[i])
			if (normalF[i] + rng.Float32()*(normalF[i-1]-normalF[i])) < float32(math.Exp(-0.5*v*v)) {
				return v
			}
		} else {
			var v float64
			for {
				// Do 1-x to prevent log(0).
				x := -math.Log(1-rng.Float64()) / normalR
				y := -math.Log(1 - rng.Float64())
				if y+y >= x*x {
					v = x + normalR
					break
				}
			}
			if x>>63 != 0 {
				return -v
			}
			return v
		}
	}
}
Beispiel #21
0
func NewSpherical(d, k, L int) *Spherical {
	nvertex := 2.0 * d
	hbits := int(math.Ceil(math.Log(float64(nvertex)) / math.Log(2)))
	kmax := int(HashBits / hbits)
	if k > kmax {
		k = kmax
	}
	vAll := make([][][]float64, k*L)
	// r := make([]*rand.Rand, d);
	// for i := 0; i < d; i++ {
	//     r[i] = rand.New();
	// }
	// rotationMatrices := vAll;
	// for i := 0; i < k * L; i++ {
	//     rotationMatrices[i] = utils.RandomRotation(d, r);
	// }
	return &Spherical{
		vAll:     vAll,
		hbits:    hbits,
		d:        d,
		k:        k,
		l:        L,
		distance: 0.0,
		variance: 1.0,
	}
}
Beispiel #22
0
func NewSpherical(numDimensions, numHashFuncs, numSearchCopies int) *Spherical {
	nvertex := 2.0 * numDimensions
	hbits := int(math.Ceil(math.Log(float64(nvertex)) / math.Log(2)))
	kmax := int(HashBits / hbits)
	if numHashFuncs > kmax {
		numHashFuncs = kmax
	}
	vAll := make([][][]float64, numHashFuncs*numSearchCopies)
	r := make([]*rand.Rand, numDimensions)
	for i := 0; i < numDimensions; i++ {
		r[i] = rand.New(rand.NewSource(int64(i)))
	}
	rotationMatrices := vAll
	for i := 0; i < numHashFuncs*numSearchCopies; i++ {
		rotationMatrices[i] = utils.RandomRotation(numDimensions, r)
	}
	vAll = rotationMatrices
	return &Spherical{
		vAll:            vAll,
		hbits:           hbits,
		numDimensions:   numDimensions,
		numHashFuncs:    numHashFuncs,
		numSearchCopies: numSearchCopies,
		distance:        0.0,
		variance:        1.0,
	}
}
Beispiel #23
0
// ExponentialRegression returns an exponential regression on data series
func ExponentialRegression(s Series) (regressions Series, err error) {

	if len(s) == 0 {
		return nil, errors.New("Input must not be empty")
	}

	var sum [6]float64

	for i := 0; i < len(s); i++ {
		sum[0] += s[i].X
		sum[1] += s[i].Y
		sum[2] += s[i].X * s[i].X * s[i].Y
		sum[3] += s[i].Y * math.Log(s[i].Y)
		sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y)
		sum[5] += s[i].X * s[i].Y
	}

	denominator := (sum[1]*sum[2] - sum[5]*sum[5])
	a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator)
	b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator

	for j := 0; j < len(s); j++ {
		regressions = append(regressions, Coordinate{
			X: s[j].X,
			Y: a * math.Pow(2.718281828459045, b*s[j].X),
		})
	}

	return regressions, nil

}
Beispiel #24
0
// NewHistogram returns a pointer to a new Histogram object that was created
// with the provided options.
func NewHistogram(opts HistogramOptions) *Histogram {
	if opts.NumBuckets == 0 {
		opts.NumBuckets = 32
	}
	if opts.BaseBucketSize == 0.0 {
		opts.BaseBucketSize = 1.0
	}
	h := Histogram{
		Buckets: make([]HistogramBucket, opts.NumBuckets),
		Min:     math.MaxInt64,
		Max:     math.MinInt64,

		opts:                          opts,
		logBaseBucketSize:             math.Log(opts.BaseBucketSize),
		oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor),
	}
	m := 1.0 + opts.GrowthFactor
	delta := opts.BaseBucketSize
	h.Buckets[0].LowBound = float64(opts.MinValue)
	for i := 1; i < opts.NumBuckets; i++ {
		h.Buckets[i].LowBound = float64(opts.MinValue) + delta
		delta = delta * m
	}
	return &h
}
Beispiel #25
0
func num2str(n int) (r string) {
	if n < 0 {
		return
	}

	if n >= 1000 {
		group := int(math.Floor(math.Log(float64(n)) / math.Log(1000)))
		r = num2str(n/1000) + " " + groups[group] + " " + num2str(n%1000)
	} else if n >= 100 {
		r = num2str(n/100) + " hundred"

		if n%100 > 0 {
			r = r + " and " + num2str(n%100)
		}
	} else if n >= 20 {
		s := tens[n/10]

		r = s

		if n%10 > 0 {
			r = r + "-" + num2str(n%10)
		}
	} else {
		s := numbers[n]

		r = s
		n = 0
	}

	return
}
Beispiel #26
0
Datei: nn.go Projekt: jcla1/nn
func CostFunction(data []TrainingExample, thetas Parameters, lambda float64) float64 {
	cost := float64(0)
	var estimation []float64
	var expected_output []float64

	// Cost

	for _, datum := range data {
		estimation = Hypothesis(thetas, datum).Values()
		expected_output = datum.ExpectedOutput.Values()

		for k, y := range expected_output {
			// heart of the cost function
			cost += y*math.Log(estimation[k]) + (1-y)*math.Log(1-estimation[k])
		}
	}

	// Regularization
	regularizationCost := float64(0)

	for _, theta := range thetas {
		for i, param := range theta.Values() {

			// ignore theta0
			if i%theta.C() == 0 {
				continue
			}

			regularizationCost += param * param
		}
	}

	return -cost/float64(len(data)) + (lambda/(2*float64(len(data))))*regularizationCost
}
Beispiel #27
0
// LogSpan returns a set of n equally spaced points in log space between,
// l and u where N is equal to len(dst). The first element of the
// resulting dst will be l and the final element of dst will be u.
// Panics if len(dst) < 2
// Note that this call will return NaNs if either l or u are negative, and
// will return all zeros if l or u is zero.
// Also returns the mutated slice dst, so that it can be used in range, like:
//
//     for i, x := range LogSpan(dst, l, u) { ... }
func LogSpan(dst []float64, l, u float64) []float64 {
	Span(dst, math.Log(l), math.Log(u))
	for i := range dst {
		dst[i] = math.Exp(dst[i])
	}
	return dst
}
// I complexity (Becher & Heiber, 2012)
func (idx *Index) I() float64 {
	var sum float64 = 0
	for _, v := range idx.lcp {
		sum += (math.Log(float64(v+2)) - math.Log(float64(v+1))) / math.Log(4.0)
	}
	return sum
}
Beispiel #29
0
// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function
func erfinv(y float64) float64 {
	if y < -1.0 || y > 1.0 {
		panic("invalid input")
	}

	var (
		a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331}
		b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801}
		c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311}
		d = [2]float64{3.543889200, 1.637067800}
	)

	const y0 = 0.7
	var x, z float64

	if math.Abs(y) == 1.0 {
		x = -y * math.Log(0.0)
	} else if y < -y0 {
		z = math.Sqrt(-math.Log((1.0 + y) / 2.0))
		x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0)
	} else {
		if y < y0 {
			z = y * y
			x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0)
		} else {
			z = math.Sqrt(-math.Log((1.0 - y) / 2.0))
			x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0)
		}
		x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x))
		x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x))
	}

	return x
}
Beispiel #30
0
// BetaInc returns the value of the regularized incomplete beta
// function Iₓ(a, b).
//
// This is not to be confused with the "incomplete beta function",
// which can be computed as BetaInc(x, a, b)*Beta(a, b).
//
// If x < 0 or x > 1, returns NaN.
func BetaInc(x, a, b float64) float64 {
	// Based on Numerical Recipes in C, section 6.4. This uses the
	// continued fraction definition of I:
	//
	//  (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...))))))
	//
	// where B(a,b) is the beta function and
	//
	//  d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1))
	//  d_{2m}   = m(b-m)x/((a+2m-1)(a+2m))
	if x < 0 || x > 1 {
		return math.NaN()
	}
	bt := 0.0
	if 0 < x && x < 1 {
		// Compute the coefficient before the continued
		// fraction.
		bt = math.Exp(lgamma(a+b) - lgamma(a) - lgamma(b) +
			a*math.Log(x) + b*math.Log(1-x))
	}
	if x < (a+1)/(a+b+2) {
		// Compute continued fraction directly.
		return bt * betacf(x, a, b) / a
	} else {
		// Compute continued fraction after symmetry transform.
		return 1 - bt*betacf(1-x, b, a)/b
	}
}