Пример #1
0
// cheesy tournament selection - we consider everyone to be in the tournament.
// Alternatively we could select a random number of genomes from the population
// and select the fittest among those.
func (pop *Population) makeSelection() {
	scores := make([]float64, 0)
	for i := 0; i < len(pop.genomes); i++ {
		scores = append(scores, pop.genomes[i].score)
	}
	sort.Float64s(scores)

	// choose the top 50% of scores
	topScores := make([]float64, 0)
	topN := int(float64(len(scores)) * 0.5)
	for i := len(scores) - 1; i >= len(scores)-topN; i-- {
		topScores = append(topScores, scores[i])
	}
	sort.Float64s(topScores)
	min := topScores[0]
	max := topScores[len(topScores)-1]

	// build a new list of genomes that are in topScores
	selectedGenomes := make([]*Genome, 0)
	fmt.Printf("min, max scores: %f, %f\n", min, max)
	for i := 0; i < len(pop.genomes); i++ {
		genome := pop.genomes[i]
		if genome.score >= min && genome.score <= max {
			selectedGenomes = append(selectedGenomes, genome)
		}
	}

	pop.genomes = selectedGenomes
}
Пример #2
0
Файл: reg.go Проект: enodev0/rat
// GridScale determines what scale should be used on the graph, for
// both the X and Y axes
func (n *New) GridScale(column string, gridCount float64) float64 {
	xsort := make([]float64, len(n.Xvalues))
	ysort := make([]float64, len(n.Yvalues))
	copy(xsort, n.Xvalues)
	copy(ysort, n.Yvalues)
	sort.Float64s(xsort)
	sort.Float64s(ysort)
	// Courtesy: Stack Overflow
	microAdjust := func(span, grids float64) float64 {
		width := (span / (grids - 1))
		x := math.Ceil(math.Log10(width) - 1)
		p := math.Pow(10, x)
		rspace := math.Ceil(width/p) * p
		return rspace
	}

	switch column {
	case "X":
		rangeX := n.DataRange("X")
		return microAdjust(rangeX, gridCount)
	case "Y":
		rangeY := n.DataRange("Y")
		return microAdjust(rangeY, gridCount)
	default:
		return 0
	}
}
Пример #3
0
func TestRunIncomes(t *testing.T) {
	JsonObj := []byte(`{"Age":22, "Retirement_age":65, "Terminal_age":90, "Effective_tax_rate":0.3, "Returns_tax_rate":0.3, "N": 20000, 
						"Non_Taxable_contribution":17500, "Taxable_contribution": 0, "Non_Taxable_balance":0, "Taxable_balance": 0, 
						"Yearly_social_security_income":0, "Asset_volatility": 0.15, "Expected_rate_of_return": 0.07, "Inflation_rate":0.035}`)
	rc, err := NewRetCalcFromJSON(JsonObj)
	if err != nil {
		t.Error(err)
	}
	runIncomes := rc.RunIncomes()
	sort.Float64s(runIncomes)
	incomePerRun := make([]float64, rc.N, rc.N)
	for i := range incomePerRun {
		incomePerRun[i] = rc.IncomeOnPath(i)
	}
	sort.Float64s(incomePerRun)
	incomesOk := true
	for i := range incomePerRun {
		if incomePerRun[i] != runIncomes[i] {
			incomesOk = false
			fmt.Printf("RunIncomes: %f, IncomeOnPath: %f\n", runIncomes[i], incomePerRun[i])
		}
	}
	if !incomesOk {
		t.Errorf("Incomes do not calculate correctly for RunIncomes()")
	}
	if !sort.Float64sAreSorted(runIncomes) {
		t.Errorf("Incomes from RetCalc.RunIncomes() should be sorted on return")
	}
}
Пример #4
0
// MedianDist computes the median (med) and the median absolute deviation (mad)
// of the spherical distances between point q and points p₀,p₁,...
// on the unit sphere S². This function panics if len(p) = 0.
func MedianDist(q Geo, p []Geo) (med, mad float64) {
	n := len(p)
	if n == 0 {
		panic("MedianDist: len(p) = 0")
	}

	m := n / 2

	d := make([]float64, n)
	for i, t := range p {
		d[i] = q.Dist(t)
	}
	sort.Float64s(d)
	if !odd(n) {
		a, b := d[m-1], d[m]
		med = a + 0.5*(b-a)
	} else {
		med = d[m]
	}

	for i := range d {
		d[i] = math.Abs(med - d[i])
	}
	sort.Float64s(d)
	if !odd(n) {
		a, b := d[m-1], d[m]
		mad = a + 0.5*(b-a)
	} else {
		mad = d[m]
	}

	return
}
Пример #5
0
Файл: reg.go Проект: enodev0/rat
// DataRange determines the range of both the
// predictor and response variables
func (n *New) DataRange(column string) float64 {
	// TODO: some cleanup
	xval := make([]float64, len(n.Xvalues))
	copy(xval, n.Xvalues)
	yval := make([]float64, len(n.Yvalues))
	copy(yval, n.Yvalues)
	switch column {
	case "X":
		sort.Float64s(xval)
		upperX := (xval[len(xval)-1])
		lowerX := (xval[0])
		if upperX == lowerX { // integrate with error handler
			panic("Range calculates to zero on X")
		}
		return (upperX - lowerX)
	case "Y":
		sort.Float64s(yval)
		upperY := (yval[len(yval)-1])
		lowerY := (yval[0])
		if upperY == lowerY {
			panic("Range calculates to zero on Y")
		}
		return (upperY - lowerY)
	default:
		return -1
	}
}
Пример #6
0
// A simple comparison checking if minimum and maximums in both datasets are within allowedVariance
// If this function changes, PrintToStdout should be updated accordingly.
func isResourceUsageSimilarEnough(left, right percentileUsageData, allowedVariance float64) bool {
	if len(left.cpuData) == 0 || len(left.memData) == 0 || len(right.cpuData) == 0 || len(right.memData) == 0 {
		glog.V(4).Infof("Length of at least one data vector is zero. Returning false for the lack of data.")
		return false
	}

	sort.Float64s(left.cpuData)
	sort.Float64s(right.cpuData)
	sort.Sort(int64arr(left.memData))
	sort.Sort(int64arr(right.memData))

	leftCPUMin := math.Max(left.cpuData[0], minCPU)
	leftCPUMax := math.Max(left.cpuData[len(left.cpuData)-1], minCPU)
	leftMemMin := max(left.memData[0], minMem)
	leftMemMax := max(left.memData[len(left.memData)-1], minMem)
	rightCPUMin := math.Max(right.cpuData[0], minCPU)
	rightCPUMax := math.Max(right.cpuData[len(right.cpuData)-1], minCPU)
	rightMemMin := max(right.memData[0], minMem)
	rightMemMax := max(right.memData[len(right.memData)-1], minMem)

	return leq(leftCPUMin, allowedVariance*rightCPUMin) &&
		leq(rightCPUMin, allowedVariance*leftCPUMin) &&
		leq(leftCPUMax, allowedVariance*rightCPUMax) &&
		leq(rightCPUMax, allowedVariance*leftCPUMax) &&
		leq(float64(leftMemMin), allowedVariance*float64(rightMemMin)) &&
		leq(float64(rightMemMin), allowedVariance*float64(leftMemMin)) &&
		leq(float64(leftMemMax), allowedVariance*float64(rightMemMax)) &&
		leq(float64(rightMemMax), allowedVariance*float64(leftMemMax))
}
Пример #7
0
//HistoAnalyze calls a Scanner to get items and convert items to numbers and do analysis to get Top N and Histogram of those numbers
// it returns an error if a item read in cannot be converted to a float64 number or the scanner encountered an error
// When there is no error detected, it returns
// 		1)number count read in
// 		2) top N numbers as a slice
// 		3) histogram data as a map
//		4) nil (no error)
func histoAnalyze(s Scanner, N int, binWidth float64) (int64, []float64, map[float64]int64, error) {

	//init return parameters
	count := int64(0)
	topN := []float64{}
	histo := map[float64]int64{}
	err := error(nil)

	//check pass in parameter
	if N < 1 || binWidth < 0.0 {
		return -1, nil, nil, fmt.Errorf("HistoAnalyze: negitive N or bin width - %d,%f", N, binWidth)
	}

	for s.Scan() {
		count++
		// fmt.Printf("\n%s", s.Text())

		// func ParseFloat(s string, bitSize int) (f float64, err error)
		num, err := strconv.ParseFloat(s.Text(), 64)
		if err != nil {
			return -1, nil, nil, err
		}

		//calculate the lower boundary
		lowerB := float64(math.Floor(num/binWidth)) * binWidth

		// fmt.Printf("\nlower Boundery %f, number %f, d.binWidth %f, int64(num/d.binWidth) %d ", lowerB, num, d.binWidth, int64(num/d.binWidth))

		// increase frquency of the bin
		histo[lowerB]++

		//Now handling Top N
		switch {
		//append the first TOP_N items when the TopN slice is not long enough
		case len(topN) < N:
			topN = append(topN, num)
			sort.Float64s(topN)
		//the new number is greater than one of the existing TopN
		case num > topN[0]:
			// if new number is bigger than the first one, which is the min value in the TopN as TopN slice is sorted
			// replace the first value
			topN[0] = num
			sort.Float64s(topN)
		}

	}
	//check if the scanner has encountered any error
	if err = s.Err(); err != nil {
		return -1, nil, nil, err
	}

	// fmt.Printf("\n\nNOT SORTED%#v", histo)
	return count, topN, histo, err
}
Пример #8
0
func (node *Node) Split(depth int) {
	if len(node.shapes) < 8 {
		return
	}
	xs := make([]float64, 0, len(node.shapes)*2)
	ys := make([]float64, 0, len(node.shapes)*2)
	zs := make([]float64, 0, len(node.shapes)*2)
	for _, shape := range node.shapes {
		box := shape.Box()
		xs = append(xs, box.Min.X)
		xs = append(xs, box.Max.X)
		ys = append(ys, box.Min.Y)
		ys = append(ys, box.Max.Y)
		zs = append(zs, box.Min.Z)
		zs = append(zs, box.Max.Z)
	}
	sort.Float64s(xs)
	sort.Float64s(ys)
	sort.Float64s(zs)
	mx, my, mz := Median(xs), Median(ys), Median(zs)
	best := int(float64(len(node.shapes)) * 0.85)
	bestAxis := AxisNone
	bestPoint := 0.0
	sx := node.PartitionScore(AxisX, mx)
	if sx < best {
		best = sx
		bestAxis = AxisX
		bestPoint = mx
	}
	sy := node.PartitionScore(AxisY, my)
	if sy < best {
		best = sy
		bestAxis = AxisY
		bestPoint = my
	}
	sz := node.PartitionScore(AxisZ, mz)
	if sz < best {
		best = sz
		bestAxis = AxisZ
		bestPoint = mz
	}
	if bestAxis == AxisNone {
		return
	}
	l, r := node.Partition(best, bestAxis, bestPoint)
	node.axis = bestAxis
	node.point = bestPoint
	node.left = NewNode(l)
	node.right = NewNode(r)
	node.left.Split(depth + 1)
	node.right.Split(depth + 1)
	node.shapes = nil // only needed at leaf nodes
}
Пример #9
0
// KS performs a Kolmogorov-Smirnov test for the two datasets, and returns the
// p-value for the null hypothesis that the two sets come from the same distribution.
func KS(data1, data2 []float64) float64 {

	sort.Float64s(data1)
	sort.Float64s(data2)

	for math.IsNaN(data1[0]) {
		data1 = data1[1:]
	}

	for math.IsNaN(data2[0]) {
		data2 = data2[1:]
	}

	n1, n2 := len(data1), len(data2)
	en1, en2 := float64(n1), float64(n2)

	var d float64
	var fn1, fn2 float64

	j1, j2 := 0, 0
	for j1 < n1 && j2 < n2 {
		d1 := data1[j1]
		d2 := data2[j2]

		if d1 <= d2 {
			for j1 < n1 && d1 == data1[j1] {
				j1++
				fn1 = float64(j1) / en1
			}
		}

		if d2 <= d1 {
			for j2 < n2 && d2 == data2[j2] {
				j2++
				fn2 = float64(j2) / en2
			}
		}

		if dt := math.Abs(fn2 - fn1); dt > d {
			d = dt
		}

	}
	en := math.Sqrt((en1 * en2) / (en1 + en2))
	// R and Octave don't use this approximation that NR does
	//return qks((en + 0.12 + 0.11/en) * d)
	return qks(en * d)
}
Пример #10
0
Файл: reg.go Проект: enodev0/rat
// GraphInit initializes and returns a Graphdata
// TODO: massive cleanups required
func (n *New) GraphInit(gridcountX, gridcountY int) Graphdata {
	a := make([]float64, len(n.Xvalues))
	b := make([]float64, len(n.Yvalues))
	copy(a, n.Xvalues)
	copy(b, n.Yvalues)
	sort.Float64s(a)
	sort.Float64s(b) // naive
	leastX := a[0]
	leastY := b[0]
	var xs []float64
	var ys []float64     // scaled grid marks
	xs = append(xs, 0.0) // origin
	ys = append(ys, 0.0)
	m, p, _ := Translate(0, 0)
	// m, p := 0.0, 0.0
	x := n.GridScale("X", float64(gridcountX))
	y := n.GridScale("Y", float64(gridcountY))
	for i := 1; i <= gridcountX; i++ {
		if i == 0 {
			xs = append(xs, (leastX - x))
		} else if i == 1 {
			xs = append(xs, (leastX))
		} else {
			xs = append(xs, (leastX + (float64(i) * x)))
		}
	}
	for i := 1; i <= gridcountY; i++ {
		if i == 0 {
			ys = append(ys, (leastY - y))
		} else if i == 1 {
			ys = append(ys, (leastY))
		} else {
			ys = append(ys, (leastY + (float64(i) * y)))
		}
	}
	var xg []float64
	for i := 1; i <= gridcountX+1; i++ { // +1 to account for origin
		xg = append(xg, m)
		m = m + 20 //x coordinate
	}
	var yg []float64 // actual translated coordinates
	for i := 1; i <= gridcountY+1; i++ {
		yg = append(yg, p)
		p = p - 20 //y coordinate
	}
	g := Graphdata{xs, ys, xg, yg}
	return g
}
Пример #11
0
func StepFunction(data map[float64][]float64) func(float64) []float64 {
	sorted := make([]float64, len(data))
	i := 0
	for key, _ := range data {
		sorted[i] = key
		i++
	}

	sort.Float64s(sorted)

	return func(x float64) []float64 {
		curPoint := 0
		for ; curPoint < len(sorted) && sorted[curPoint] <= x; curPoint++ {
		}
		if curPoint >= len(sorted) {
			curPoint = len(sorted) - 1
		}

		if curPoint == 0 && sorted[curPoint] > x {
			fmt.Printf("values at x=%f are undefined, earliest point in step function defined at x=%f\n", x, sorted[curPoint])
			panic("undefined value")
		}

		return data[sorted[curPoint]]
	}
}
Пример #12
0
func StumpPool(samples SampleList) boosting.Pool {
	dims := len(samples[0])
	res := make([]boosting.Classifier, 0, len(samples)*dims)
	for d := 0; d < dims; d++ {
		values := make([]float64, 0, len(samples))
		seenValues := map[float64]bool{}
		for _, s := range samples {
			val := s[d]
			if !seenValues[val] {
				seenValues[val] = true
				values = append(values, val)
			}
		}
		sort.Float64s(values)
		for i, val := range values {
			var t TreeStump
			if i == 0 {
				t = TreeStump{FieldIndex: d, Threshold: val - 1}
			} else {
				lastVal := values[i-1]
				average := (lastVal + val) / 2
				t = TreeStump{FieldIndex: d, Threshold: average}
			}
			res = append(res, t)
		}
	}
	return boosting.NewStaticPool(res, samples)
}
Пример #13
0
func createPrintableResult(slice []*Result) *PrintableResult {
	if slice == nil || len(slice) == 0 {
		return nil
	}
	pr := PrintableResult{}
	pr.count = uint64(0)
	pr.totalT = float64(0)
	pr.times = []float64{}
	pr.totalSize = uint64(0)
	for _, result := range slice {
		pr.count++
		pr.totalT += float64(result.t)
		pr.times = append(pr.times, float64(result.t))
		pr.totalSize += uint64(result.size)
	}
	pr.avgSize = float64(pr.totalSize) / float64(pr.count)
	sort.Float64s(pr.times)
	pr.min = pr.times[0]
	pr.p25 = pr.times[int(float64(25.0/100.0)*float64(pr.count))]
	pr.p50 = pr.times[int(float64(50.0/100.0)*float64(pr.count))]
	pr.avg = pr.totalT / float64(pr.count)
	pr.p75 = pr.times[int(float64(75.0/100.0)*float64(pr.count))]
	pr.p80 = pr.times[int(float64(80.0/100.0)*float64(pr.count))]
	pr.p85 = pr.times[int(float64(85.0/100.0)*float64(pr.count))]
	pr.p90 = pr.times[int(float64(90.0/100.0)*float64(pr.count))]
	pr.p95 = pr.times[int(float64(95.0/100.0)*float64(pr.count))]
	pr.p99 = pr.times[int(float64(99.0/100.0)*float64(pr.count))]
	pr.max = pr.times[pr.count-1]
	return &pr
}
Пример #14
0
// Histogram numpy.histogram
func Histogram(series []float64, bins int) ([]int, []float64) {
	var binEdges []float64
	var hist []int
	l := len(series)
	if l == 0 {
		return hist, binEdges
	}
	sort.Float64s(series)
	w := (series[l-1] - series[0]) / float64(bins)
	for i := 0; i < bins; i++ {
		binEdges = append(binEdges, w*float64(i)+series[0])
		if binEdges[len(binEdges)-1] >= series[l-1] {
			break
		}
	}
	binEdges = append(binEdges, w*float64(bins)+series[0])
	bl := len(binEdges)
	hist = make([]int, bl-1)
	for i := 0; i < bl-1; i++ {
		for _, val := range series {
			if val >= binEdges[i] && val < binEdges[i+1] {
				hist[i] += 1
				continue
			}
			if i == (bl-2) && val >= binEdges[i] && val <= binEdges[i+1] {
				hist[i] += 1
			}
		}
	}
	return hist, binEdges
}
Пример #15
0
func TestQuantRandQuery(t *testing.T) {
	s := NewTargeted(0.5, 0.90, 0.99)
	a := make([]float64, 0, 1e5)
	rand.Seed(42)
	for i := 0; i < cap(a); i++ {
		v := float64(rand.Int63())
		s.Insert(v)
		a = append(a, v)
	}
	t.Logf("len: %d", s.Count())
	sort.Float64s(a)
	w := getPerc(a, 0.50)
	if g := s.Query(0.50); math.Abs(w-g)/w > 0.03 {
		t.Errorf("perc50: want %v, got %v", w, g)
		t.Logf("e: %f", math.Abs(w-g)/w)
	}
	w = getPerc(a, 0.90)
	if g := s.Query(0.90); math.Abs(w-g)/w > 0.03 {
		t.Errorf("perc90: want %v, got %v", w, g)
		t.Logf("e: %f", math.Abs(w-g)/w)
	}
	w = getPerc(a, 0.99)
	if g := s.Query(0.99); math.Abs(w-g)/w > 0.03 {
		t.Errorf("perc99: want %v, got %v", w, g)
		t.Logf("e: %f", math.Abs(w-g)/w)
	}
}
Пример #16
0
// fillAvgPct caches the average, 95th percentile of the DayStore.
func (ds *DayStore) fillAvgPct() {
	ds.validAvgPct = true

	// If no past Hourly data has been flushed to the window,
	// return the average and 95th percentile of the past hour.
	if ds.size == 0 {
		ds.cachedAverage, _ = ds.Hour.Average()
		ds.cachedNinetyFifth, _ = ds.Hour.Percentile(0.95)
		return
	}
	// Otherwise, ignore the past one hour and use the window values

	// generate a slice of the window
	day := ds.window.Slice()

	// calculate the average value of the hourly averages
	// also create a sortable slice of float64
	var sum uint64
	var nf []float64
	for _, elem := range day {
		he := elem.(hourEntry)
		sum += he.average
		nf = append(nf, float64(he.ninetyFifth))
	}
	ds.cachedAverage = sum / uint64(ds.size)

	// sort and calculate the 95th percentile
	sort.Float64s(nf)
	pcIdx := int(math.Trunc(0.95 * float64(ds.size+1)))
	if pcIdx >= len(nf) {
		pcIdx = len(nf) - 1
	}
	ds.cachedNinetyFifth = uint64(nf[pcIdx])
}
Пример #17
0
// ReducePercentile computes the percentile of values for each key.
func ReducePercentile(values []interface{}, c *influxql.Call) interface{} {
	// Checks that this arg exists and is a valid type are done in the parsing validation
	// and have test coverage there
	lit, _ := c.Args[1].(*influxql.NumberLiteral)
	percentile := lit.Val

	var allValues []float64

	for _, v := range values {
		if v == nil {
			continue
		}

		vals := v.([]interface{})
		for _, v := range vals {
			switch v.(type) {
			case int64:
				allValues = append(allValues, float64(v.(int64)))
			case float64:
				allValues = append(allValues, v.(float64))
			}
		}
	}

	sort.Float64s(allValues)
	length := len(allValues)
	index := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1

	if index < 0 || index >= len(allValues) {
		return nil
	}

	return allValues[index]
}
Пример #18
0
// getDatacenterDistance will return the median round trip time estimate for
// the given DC from the given serfer, in seconds. This will return positive
// infinity if no coordinates are available.
func getDatacenterDistance(s serfer, dc string) (float64, error) {
	// If this is the serfer's DC then just bail with zero RTT.
	if dc == s.GetDatacenter() {
		return 0.0, nil
	}

	// Otherwise measure from the serfer to the nodes in the other DC.
	coord, err := s.GetCoordinate()
	if err != nil {
		return 0.0, err
	}

	// Fetch all the nodes in the DC and record their distance, if available.
	nodes := s.GetNodesForDatacenter(dc)
	subvec := make([]float64, 0, len(nodes))
	for _, node := range nodes {
		if other, ok := s.GetCachedCoordinate(node); ok {
			subvec = append(subvec, computeDistance(coord, other))
		}
	}

	// Compute the median by sorting and taking the middle item.
	if len(subvec) > 0 {
		sort.Float64s(subvec)
		return subvec[len(subvec)/2], nil
	}

	// Return the default infinity value.
	return computeDistance(coord, nil), nil
}
Пример #19
0
// contourPaths returns a collection of vg.Paths describing contour lines based
// on the input data in m cut at the given levels. The trX and trY function
// are coordinate transforms. The returned map contains slices of paths keyed
// on the value of the contour level. contouPaths sorts levels ascending as a
// side effect.
func contourPaths(m GridXYZ, levels []float64, trX, trY func(float64) vg.Length) map[float64][]vg.Path {
	sort.Float64s(levels)

	ends := make(map[float64]endMap)
	conts := make(contourSet)
	conrec(m, levels, func(_, _ int, l line, z float64) {
		paths(l, z, ends, conts)
	})
	ends = nil

	// TODO(kortschak): Check that all non-loop paths have
	// both ends at boundary. If any end is not at a boundary
	// it may have a partner near by. Find this partner and join
	// the two conts by merging the near by ends at the mean
	// location. This operation is done level by level to ensure
	// close contours of different heights are not joined.
	// A partner should be a float error different end, but I
	// suspect that is is possible for a bi- or higher order
	// furcation so it may be that the path ends at middle node
	// of another path. This needs to be investigated.

	// Excise loops from crossed paths.
	for c := range conts {
		// Always try to do quick excision in production if possible.
		c.exciseLoops(conts, true)
	}

	// Build vg.Paths.
	paths := make(map[float64][]vg.Path)
	for c := range conts {
		paths[c.z] = append(paths[c.z], c.path(trX, trY))
	}

	return paths
}
Пример #20
0
func Print(xyvec map[float64]float64, xlabel string, ylabel string) []byte {
	x := vlib.NewVectorF(len(xyvec))
	y := vlib.NewVectorF(len(xyvec))
	cnt := 0
	for vx, _ := range xyvec {
		x[cnt] = vx
		cnt++
	}

	sort.Float64s([]float64(x))

	for indx, vx := range x {
		y[indx] = xyvec[vx]
	}
	type temp struct {
		SNR vlib.VectorF
		BER vlib.VectorF
	}
	data := temp{x, y}
	//keys := []float64(x)
	fmt.Printf("\n%s=%1.2e\n %s=%1.2e", xlabel, x, ylabel, y)
	result, err := json.Marshal(data)
	if err != nil {
		return nil
	} else {
		return result
	}

	//fmt.Printf("\n%s=%f", xlabel, x)
	//fmt.Printf("\n%s=%f", ylabel, y)
}
Пример #21
0
func buildComplexGauge(k string, t []float64, pct float64) *ComplexGauge {
	threshold := ((100.0 - pct) / 100.0) * float64(len(t))
	threshold = math.Floor(threshold + 0.5)

	count := len(t) - int(threshold)
	if count <= 0 {
		return nil
	}

	g := &ComplexGauge{}
	g.Name, g.Source = parseSource(k)
	if pct != 100.0 {
		if float64(int(pct)) != pct {
			rem := int(math.Ceil((pct - float64(int(pct))) * 10))
			g.Name += fmt.Sprintf(".%d_%d", int(pct), rem)
		} else {
			g.Name += fmt.Sprintf(".%d", int(pct))
		}
	}
	g.Count = count

	sort.Float64s(t)
	g.Min = t[0]
	g.Max = t[count-1]
	for i := 0; i < count; i++ {
		g.Sum += t[i]
		g.SumSquares += (t[i] * t[i])
	}

	return g
}
Пример #22
0
func median(rows [][]string, idx int) float64 {
	var sorted []float64
	for i, row := range rows {
		if i != 0 {
			fs := strings.Fields(row[0])
			val, _ := strconv.ParseFloat(fs[idx], 64)
			sorted = append(sorted, val)
		}
	}

	sort.Float64s(sorted)

	if len(sorted)%2 == 0 {
		// even number of items
		// for example: 3, 5, 8, 9
		// median is (5 + 8) / 2 = 6.5
		middle := len(sorted) / 2
		higher := sorted[middle]
		lower := sorted[middle-1]
		return (higher + lower) / 2
	}
	// odd number of items
	// for example: 3, 5, 8
	// median is 5
	middle := float64(len(sorted)) / 2
	return sorted[int(math.Floor(middle))]
}
Пример #23
0
//This function assumes that the data across []float64 retrieved from the map are
//ordered such that data[key][n] and data[anotherkey][n] are those to be interpolated between
func LinearInterpolationFunction(data map[float64][]float64) func(float64) []float64 {

	sorted := make([]float64, len(data))
	size := -1
	i := 0
	for key, slice := range data {
		sorted[i] = key
		if i == 0 {
			size = len(slice)
		} else if size != len(slice) {
			panic("LinearInterpolationFunction -- mismatched []float64 sizes, they must all be equal to interpolate")
		}
		i++
	}

	sort.Float64s(sorted)

	return func(x float64) []float64 {
		point2 := 0
		for ; point2 < len(sorted) && sorted[point2] < x; point2++ {
		}
		if point2 >= len(sorted) || point2 == 0 {
			fmt.Printf("value x=%f exceeds defined range x=%f -> x=%f\n", x, sorted[0], sorted[len(sorted)-1])
			panic("undefined value")
		}

		point1 := point2 - 1
		values := make([]float64, len(data[sorted[point1]]))
		for i := range values {
			values[i] = (data[sorted[point1]][i] + data[sorted[point2]][i]) / 2.0
		}

		return values
	}
}
func (b *AccumulatingBucket) ValueForIndex(index int) float64 {
	b.mutex.RLock()
	defer b.mutex.RUnlock()

	elementCount := len(b.elements)

	if elementCount == 0 {
		return math.NaN()
	}

	sortedElements := make([]float64, elementCount)

	for i, element := range b.elements {
		sortedElements[i] = element.Value.(float64)
	}

	sort.Float64s(sortedElements)

	/*
		N.B.(mtp): Interfacing components should not need to comprehend what
		           eviction and storage container strategies used; therefore,
		           we adjust this silently.
	*/
	targetIndex := int(float64(elementCount-1) * (float64(index) / float64(b.observations)))

	return sortedElements[targetIndex]
}
Пример #25
0
func (r *report) print() {
	if r.output == "csv" {
		r.printCSV()
		if r.outputFile == "" {
			return
		}
	}

	sort.Float64s(r.lats)

	if len(r.lats) > 0 {
		r.fastest = r.lats[0]
		r.slowest = r.lats[len(r.lats)-1]
		fmt.Printf("\nSummary:\n")
		fmt.Printf("  Total:\t%4.4f secs.\n", r.total.Seconds())
		fmt.Printf("  Slowest:\t%4.4f secs.\n", r.slowest)
		fmt.Printf("  Fastest:\t%4.4f secs.\n", r.fastest)
		fmt.Printf("  Average:\t%4.4f secs.\n", r.average)
		fmt.Printf("  Requests/sec:\t%4.4f\n", r.rps)
		if r.sizeTotal > 0 {
			fmt.Printf("  Total Data Received:\t%d bytes.\n", r.sizeTotal)
			fmt.Printf("  Response Size per Request:\t%d bytes.\n", r.sizeTotal/int64(len(r.lats)))
		}
		r.printStatusCodes()
		r.printHistogram()
		r.printLatencies()
	}

	if len(r.errorDist) > 0 {
		r.printErrors()
	}
}
Пример #26
0
// ReducePercentile computes the percentile of values for each key.
func ReducePercentile(percentile float64) ReduceFunc {
	return func(values []interface{}) interface{} {
		var allValues []float64

		for _, v := range values {
			if v == nil {
				continue
			}

			vals := v.([]interface{})
			for _, v := range vals {
				switch v.(type) {
				case int64:
					allValues = append(allValues, float64(v.(int64)))
				case float64:
					allValues = append(allValues, v.(float64))
				}
			}
		}

		sort.Float64s(allValues)
		length := len(allValues)
		index := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1

		if index < 0 || index >= len(allValues) {
			return nil
		}

		return allValues[index]
	}
}
Пример #27
0
func (b *Boom) Print() {
	total := b.end.Sub(b.start)
	var avgTotal float64
	var fastest, slowest time.Duration

	for {
		select {
		case r := <-b.results:
			latencies = append(latencies, r.duration.Seconds())
			statusCodeDist[r.statusCode]++

			avgTotal += r.duration.Seconds()
			if fastest.Nanoseconds() == 0 || r.duration.Nanoseconds() < fastest.Nanoseconds() {
				fastest = r.duration
			}
			if r.duration.Nanoseconds() > slowest.Nanoseconds() {
				slowest = r.duration
			}
		default:
			rps := float64(b.N) / total.Seconds()
			fmt.Printf("\nSummary:\n")
			fmt.Printf("  Total:\t%4.4f secs.\n", total.Seconds())
			fmt.Printf("  Slowest:\t%4.4f secs.\n", slowest.Seconds())
			fmt.Printf("  Fastest:\t%4.4f secs.\n", fastest.Seconds())
			fmt.Printf("  Average:\t%4.4f secs.\n", avgTotal/float64(b.N))
			fmt.Printf("  Requests/sec:\t%4.4f\n", rps)
			fmt.Printf("  Speed index:\t%v\n", speedIndex(rps))
			sort.Float64s(latencies)
			b.printHistogram()
			b.printLatencies()
			b.printStatusCodes()
			return
		}
	}
}
Пример #28
0
// Mode returns the most common value(s), if any.
// See https://en.wikipedia.org/wiki/Mode_%28statistics%29.
func Mode(vals []float64) []float64 {
	// Sort the values so we can compare for near equality.
	sort.Float64s(vals)
	cnts := make(map[float64]int)
	max := 0
	prev := 0.0
	for _, val := range vals {
		if calc.Near(val, prev) {
			// Discard trivial differences between floating point values.
			val = prev
		} else {
			// Only save the change once a sufficient difference has occurred.
			prev = val
		}
		cnts[val]++
		if cnts[val] > max {
			max = cnts[val]
		}
	}
	modes := make([]float64, 0, len(cnts))

	// A mode must occur more than once.
	if max > 1 {
		for val, cnt := range cnts {
			if cnt == max {
				// Every val with a max count is a mode.
				modes = append(modes, val)
			}
		}
	}
	return modes
}
Пример #29
0
func AnalyzeBenchmarkRuns(label string, times []float64) {
	sorted := times
	sort.Float64s(sorted)

	tot := 0.0
	for _, v := range times {
		tot += v
	}
	n := float64(len(times))

	avg := tot / n
	variance := 0.0
	for _, v := range times {
		variance += (v - avg) * (v - avg)
	}
	variance /= n

	stddev := math.Sqrt(variance)

	median := sorted[len(times)/2]
	perc90 := sorted[int(n*0.9)]
	perc10 := sorted[int(n*0.1)]

	fmt.Printf(
		"%s: %d samples\n"+
			"avg %.3fms +/- %.0f%% "+
			"median %.3fms, 10%%tiles: [-%.0f%%, +%.0f%%]\n",
		label,
		len(times), avg, 100.0*2*stddev/avg,
		median, 100*(median-perc10)/median, 100*(perc90-median)/median)
}
Пример #30
0
// thresholdEdgeWeight sorts all weights order and finds top N weights
// that accumulate to frac of total weights; it then returns the
// weight of rank N.
func (v *Visualizer) thresholdEdgeWeight(frac float64) float64 {
	if frac < 0 || frac > 1 {
		panic(fmt.Sprintf("frac (%d) out of range [0,1]", frac))
	}

	w := make([]float64, 0)
	sum := 0.0
	for s, row := range v.Σξ {
		for _, tr := range row {
			w = append(w, tr/v.Σγ[s])
			sum += tr / v.Σγ[s]
		}
	}

	sort.Float64s(w)

	partial := 0.0
	for i := len(w) - 1; i >= 0; i-- {
		partial += w[i]
		if partial >= frac*sum {
			return w[i]
		}
	}
	return 0 // Display all edges.
}