func calculateConfidenceInterval(s scoreResult) confInterval { var t0s []float64 var t1s []float64 // Partition the data into treatment 0 and treatment 1 // and save the score for evaluation for _, each := range s.t0 { t0s = append(t0s, each.y) } for _, each := range s.t1 { t1s = append(t1s, each.y) } var ci confInterval //var z = 1.96 // http://www.dummies.com/how-to/content/creating-a-confidence-interval-for-the-difference-.html //var z = 1.645 // http://www.dummies.com/how-to/content/creating-a-confidence-interval-for-the-difference-.html //var z = 2.58 var m0, _ = stats.Mean(t0s) var n0 = float64(len(t0s)) var sd0, _ = stats.StandardDeviation(t0s) var m1, _ = stats.Mean(t1s) var n1 = float64(len(t1s)) var sd1, _ = stats.StandardDeviation(t1s) var mDiff = m0 - m1 var sd0s = sd0 * sd0 var sd1s = sd1 * sd1 ci.min = mDiff - zScore*math.Sqrt(sd1s/n1+sd0s/n0) ci.max = mDiff + zScore*math.Sqrt(sd1s/n1+sd0s/n0) ci.diff = ci.min - ci.max ci.t1Max = m1 + ci.max ci.t1Min = m1 + ci.min ci.diffSd = ci.diff / sd1 // how close is the score to the middle of the confidence interval ci.middle = (ci.min + ci.max) / 2 //ci.closeness = math.Abs(s.score - ci.middle) //ci.closeness = math.Abs(ci.diffSd - s.score) // Difference in sample means +- confidence interval //fmt.Printf("conf interval: %f to %f, conf diff: %f, t1: %f, t1max: %f, t1min: %f, diffSd: %f\n", ci.min, ci.max, ci.diff, m1, ci.t1Max, ci.t1Min, ci.diffSd) return ci }
// https://github.com/hermanschaaf/stats/blob/master/stats.go func NormalConfidenceInterval(nums []float64) (lower float64, upper float64) { conf := 1.95996 // 95% confidence for the mean, http://bit.ly/Mm05eZ mean, _ := stats.Mean(nums) dev, _ := stats.StandardDeviation(nums) dev = dev / math.Sqrt(float64(len(nums))) return mean - dev*conf, mean + dev*conf }
func bench(requests, concurrency int, images []string, args []string) { start := time.Now() timings := make([]float64, requests) // Create a buffered channel so our display goroutine can't slow down the workers. completeCh := make(chan time.Duration, requests) doneCh := make(chan struct{}) current := 0 go func() { for timing := range completeCh { timings = append(timings, timing.Seconds()) current++ percent := float64(current) / float64(requests) * 100 fmt.Printf("[%3.f%%] %d/%d containers started\n", percent, current, requests) } doneCh <- struct{}{} }() session(requests, concurrency, images, args, completeCh) close(completeCh) <-doneCh total := time.Since(start) mean, _ := stats.Mean(timings) p90th, _ := stats.Percentile(timings, 90) p99th, _ := stats.Percentile(timings, 99) meanMillis := mean * MILLIS_IN_SECOND p90thMillis := p90th * MILLIS_IN_SECOND p99thMillis := p99th * MILLIS_IN_SECOND fmt.Printf("\n") fmt.Printf("Time taken for tests: %.3fs\n", total.Seconds()) fmt.Printf("Time per container: %.3fms [mean] | %.3fms [90th] | %.3fms [99th]\n", meanMillis, p90thMillis, p99thMillis) }
func updateComplexity(v *Web, pop neat.Population) { // Build complexity slice x := make([]float64, len(pop.Genomes)) for i, g := range pop.Genomes { x[i] = float64(g.Complexity()) } var b neat.Genome max := -1.0 for _, g := range pop.Genomes { if g.Fitness > max { b = g max = g.Fitness } } // Append the record min, _ := stats.Min(x) max, _ = stats.Max(x) mean, _ := stats.Mean(x) v.complexity = append(v.complexity, [4]float64{ min, mean, max, float64(b.Complexity()), }) }
// startStats blocks and periodically logs transaction statistics (throughput, // success rates, durations, ...). // TODO(tschottdorf): Use a proper metrics subsystem for this (+the store-level // stats). // TODO(mrtracy): Add this to TimeSeries. func (tc *TxnCoordSender) startStats() { res := time.Millisecond // for duration logging resolution lastNow := tc.clock.PhysicalNow() for { select { case <-time.After(statusLogInterval): if !log.V(1) { continue } tc.Lock() curStats := tc.txnStats tc.txnStats = txnCoordStats{} tc.Unlock() now := tc.clock.PhysicalNow() // Tests have weird clocks. if now-lastNow <= 0 { continue } num := len(curStats.durations) dMax := time.Duration(stats.Max(curStats.durations)) dMean := time.Duration(stats.Mean(curStats.durations)) dDev := time.Duration(stats.StdDevP(curStats.durations)) rMax := stats.Max(curStats.restarts) rMean := stats.Mean(curStats.restarts) rDev := stats.StdDevP(curStats.restarts) rate := float64(int64(num)*int64(time.Second)) / float64(now-lastNow) var pCommitted, pAbandoned, pAborted float32 if num > 0 { pCommitted = 100 * float32(curStats.committed) / float32(num) pAbandoned = 100 * float32(curStats.abandoned) / float32(num) pAborted = 100 * float32(curStats.aborted) / float32(num) } log.Infof("txn coordinator: %.2f txn/sec, %.2f/%.2f/%.2f %%cmmt/abrt/abnd, %s/%s/%s avg/σ/max duration, %.1f/%.1f/%.1f avg/σ/max restarts (%d samples)", rate, pCommitted, pAborted, pAbandoned, util.TruncateDuration(dMean, res), util.TruncateDuration(dDev, res), util.TruncateDuration(dMax, res), rMean, rDev, rMax, num) lastNow = now case <-tc.stopper.ShouldStop(): return } } }
// Finalize calculation of the risk using available datapoints func riskFinalize(op opContext, rs *slib.RRAServiceRisk) error { var ( rvals []float64 err error ) for _, x := range rs.Scenarios { // If the scenario had no data, don't include it in the // final scoring if x.NoData { continue } rvals = append(rvals, x.Score) } // Note the highest business impact value that was determined from // the RRA. This can be used as an indication of the business impact // for the service. rs.Risk.Impact = rs.UsedRRAAttrib.Impact rs.Risk.ImpactLabel, err = slib.ImpactLabelFromValue(rs.Risk.Impact) if err != nil { return err } if len(rvals) == 0 { // This can occur if we have no metric data, including no valid // information in the RRA logf("error in risk calculation: %q has no valid scenarios", rs.RRA.Name) rs.Risk.Median = 0.0 rs.Risk.Average = 0.0 rs.Risk.WorstCase = 0.0 rs.Risk.MedianLabel = "unknown" rs.Risk.AverageLabel = "unknown" rs.Risk.WorstCaseLabel = "unknown" rs.Risk.DataClass, err = slib.DataValueFromLabel(rs.RRA.DefData) return nil } rs.Risk.Median, err = stats.Median(rvals) if err != nil { return err } rs.Risk.MedianLabel = slib.NormalLabelFromValue(rs.Risk.Median) rs.Risk.Average, err = stats.Mean(rvals) if err != nil { return err } rs.Risk.AverageLabel = slib.NormalLabelFromValue(rs.Risk.Average) rs.Risk.WorstCase, err = stats.Max(rvals) if err != nil { return err } rs.Risk.WorstCaseLabel = slib.NormalLabelFromValue(rs.Risk.WorstCase) rs.Risk.DataClass, err = slib.DataValueFromLabel(rs.RRA.DefData) if err != nil { return err } return nil }
func (s *statistics) report() { for range time.Tick(time.Second) { s.Lock() writeTimes := s.writeTimes s.writeTimes = nil s.Unlock() // The stats functions return an error only when the input is empty. mean, _ := stats.Mean(writeTimes) stddev, _ := stats.StandardDeviation(writeTimes) log.Infof("wrote %d messages, latency mean=%s, stddev=%s", len(writeTimes), time.Duration(mean), time.Duration(stddev)) } }
func updateFitness(v *Web, pop neat.Population) { // Build fitness slice x := make([]float64, len(pop.Genomes)) for i, g := range pop.Genomes { x[i] = g.Fitness } // Append the record min, _ := stats.Min(x) max, _ := stats.Max(x) mean, _ := stats.Mean(x) v.fitness = append(v.fitness, [3]float64{ min, mean, max, }) }
// for a partition in the set of data, calculate the effective treatement // score using (mean t1 - mean t0) / population standard deviation func evalScore(d []coreData, rc []rowCriteria, dataSetId int) scoreResult { var s scoreResult s.rc = rc s.d = d s.score = 0 s.dataSetId = dataSetId // check for minimum row threshhold if len(d) <= rowThreshhold { return s } var t0 []coreData var t1 []coreData var t0s []float64 var t1s []float64 var allTs []float64 // Partition the data into treatment 0 and treatment 1 // and save the score for evaluation for _, each := range d { // Save all responses for later SD calculation allTs = append(allTs, each.y) if each.treatment == 0 { t0 = append(t0, each) t0s = append(t0s, each.y) } else { t1 = append(t1, each) t1s = append(t1s, each.y) } } // Must have minimum threshhold of records if len(t0)+len(t1) < rowThreshhold { return s } // Must have at least one in each group if len(t0) == 0 || len(t1) == 0 { return s } // then calculate the median, also experiment with average var mean0, _ = stats.Mean(t0s) var mean1, _ = stats.Mean(t1s) //var meanAll, _ = stats.Mean(allTs) //var sd, _ = stats.StandardDeviationPopulation(allTs) // subtract the two t0-t1, we want t1 to be smaller // Note: use spooled // square root of ((Nt-1)St^2 + (Nc-1)Sc^2)/(Nt+Nc)) var St, _ = stats.StandardDeviation(t1s) var Sc, _ = stats.StandardDeviation(t0s) var Nt = float64(len(t1s)) var Nc = float64(len(t0s)) //func calculateConfidenceInterval2(nt, nc, mt, mc, sdt, sdc float64) confInterval2 var ci = calculateConfidenceInterval2(Nt, Nc, mean1, mean0, St, Sc) // If the confidence intervals overlap then not valid range if ci.overlap { return s } var St2 = St * St var Sc2 = Sc * Sc //var Ntm1 = float64(Nt - 1) //var Ncm1 = float64(Nc - 1) //var kt = Ntm1 * St2 //var kc = Ncm1 * Sc2 //var ksum = kt + kc //var Nsum = Nt + Nc //var sPooled = math.Sqrt(ksum / Nsum) //http://www.uccs.edu/~lbecker/ var sPooled = math.Sqrt((St2 + Sc2) / 2) s.t0 = t0 s.t1 = t1 //sPooled = math.Sqrt((St2 * Sc2) / 2) //var _, t1confh = NormalConfidenceInterval(t1s) //var _, t0confh = NormalConfidenceInterval(t0s) //var meanValue = mean1 - mean0 //var meanValue = (mean1/St - mean0/Sc) / sPooled // Score Type 1 var meanDifference = mean1 - mean0 //s.score = meanDifference / meanAll //var max, _ = stats.Max(allTs) //s.score = meanDifference / sPooled var cohensd = meanDifference / sPooled var a = ((Nt + Nc) * (Nt + Nc)) / (Nt + Nc) // Score type 5 s.score = cohensd / math.Sqrt((cohensd*cohensd)+4) // Score type 6 s.score = cohensd / math.Sqrt((cohensd*cohensd)+a) //s.score = (mean1/St - mean0/Sc) / St return s }
func main() { d := stats.LoadRawData([]interface{}{1.1, "2", 3.0, 4, "5"}) a, _ := stats.Min(d) fmt.Println(a) // 1.1 a, _ = stats.Max(d) fmt.Println(a) // 5 a, _ = stats.Sum([]float64{1.1, 2.2, 3.3}) fmt.Println(a) // 6.6 a, _ = stats.Mean([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 3 a, _ = stats.Median([]float64{1, 2, 3, 4, 5, 6, 7}) fmt.Println(a) // 4 m, _ := stats.Mode([]float64{5, 5, 3, 3, 4, 2, 1}) fmt.Println(m) // [5 3] a, _ = stats.PopulationVariance([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 2 a, _ = stats.SampleVariance([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 2.5 a, _ = stats.MedianAbsoluteDeviationPopulation([]float64{1, 2, 3}) fmt.Println(a) // 1 a, _ = stats.StandardDeviationPopulation([]float64{1, 2, 3}) fmt.Println(a) // 0.816496580927726 a, _ = stats.StandardDeviationSample([]float64{1, 2, 3}) fmt.Println(a) // 1 a, _ = stats.Percentile([]float64{1, 2, 3, 4, 5}, 75) fmt.Println(a) // 4 a, _ = stats.PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75) fmt.Println(a) // 40 c := []stats.Coordinate{ {1, 2.3}, {2, 3.3}, {3, 3.7}, {4, 4.3}, {5, 5.3}, } r, _ := stats.LinearRegression(c) fmt.Println(r) // [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}] r, _ = stats.ExponentialRegression(c) fmt.Println(r) // [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}] r, _ = stats.LogarithmicRegression(c) fmt.Println(r) // [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}] s, _ := stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 3, false) fmt.Println(s) // [0.2,0.4,0.3] s, _ = stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 10, true) fmt.Println(s) // [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1] q, _ := stats.Quartile([]float64{7, 15, 36, 39, 40, 41}) fmt.Println(q) // {15 37.5 40} iqr, _ := stats.InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118}) fmt.Println(iqr) // 10 mh, _ := stats.Midhinge([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}) fmt.Println(mh) // 7.5 tr, _ := stats.Trimean([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}) fmt.Println(tr) // 7.25 o, _ := stats.QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100}) fmt.Printf("%+v\n", o) // {Mild:[15 18] Extreme:[-1000 100]} gm, _ := stats.GeometricMean([]float64{10, 51.2, 8}) fmt.Println(gm) // 15.999999999999991 hm, _ := stats.HarmonicMean([]float64{1, 2, 3, 4, 5}) fmt.Println(hm) // 2.18978102189781 a, _ = stats.Round(2.18978102189781, 3) fmt.Println(a) // 2.189 }
func main() { verbose := flag.Bool("v", false, "verbose output") flag.Parse() file, err := os.Open("delta_data.bin") check(err) defer file.Close() buffer := bufio.NewReader(file) sizes := make([]float64, 0) speeds := make([]float64, 0) encode := qpc.NewHistory("encode") decode := qpc.NewHistory("decode") server := physics.NewState(901) client := physics.NewState(901) // initialize the base state for i := 0; i < 6; i += 1 { server.ReadNext(buffer) client.IncFrame() client.Current().Assign(server.Current()) } frame := 6 for { err = server.ReadNext(buffer) if err == io.EOF { break } check(err) frame += 1 runtime.GC() // Server side encode.Start() snapshot := server.Encode() encode.Stop() // === runtime.GC() // Client side decode.Start() client.IncFrame() client.Decode(snapshot) decode.Stop() // === size := float64(len(snapshot)*8) / 1000.0 sizes = append(sizes, size) speed := size * 60.0 speeds = append(speeds, speed) equal := server.Current().Equals(client.Current()) if *verbose { if !equal { fmt.Print("! ") } fmt.Printf("%04d %8.3fkbps %10s %10s\n", frame, speed, encode.Last(), decode.Last()) } else { if equal { fmt.Print(".") } else { fmt.Print("X") } } } fmt.Println() fmt.Printf("#%d %.3fkbps ±%.3fkbps\n", len(sizes), stats.Mean(speeds), stats.StdDevS(speeds)) fmt.Println() fmt.Printf("MIN %10.3f kbps\n", stats.Min(speeds)) for _, p := range []float64{5, 10, 25, 50, 75, 90, 95} { fmt.Printf("P%02.f %10.3f kbps\n", p, stats.Percentile(speeds, p)) } fmt.Printf("MAX %10.3f kbps\n", stats.Max(speeds)) fmt.Println() fmt.Printf("TOTAL %10.3f kb\n", stats.Sum(sizes)) fmt.Printf(" AVG %10.3f kb per frame\n", stats.Mean(sizes)) fmt.Printf(" AVG %10.3f bits per cube\n", stats.Mean(sizes)*1000/float64(len(sizes))) fmt.Println() fmt.Println("TIMING:") qpc.PrintSummary(encode, decode) }
//apply transforms an array of data func apply(data []string, transformation templates.Transformation) ([]string, []Mapping) { p := transformation.Parameters var wg sync.WaitGroup var mapping []Mapping switch transformation.Operation { case "toDate": if len(p) != 2 { log.Fatal("toDate transformation requires 2 parameters: current format, new format") } oldFormat := p[0] newFormat := p[1] for i, x := range data { y, err := time.Parse(oldFormat, x) if err != nil { log.Print("Error parsing date with index ", i, " with format: ", oldFormat) } else { data[i] = y.Format(newFormat) } } case "setNull": for i, x := range data { if arrayPos(x, p) != -1 { data[i] = "" } } case "standardize": if len(p) != 1 { log.Fatal("standardize transformation requires 1 parameter: type (min-max|z-score)") } stype := p[0] switch stype { case "min-max": newData := strArrToFloatArr(data) min, err := stats.Min(newData) if err != nil { log.Fatal("Error finding minimum of data: ", err) } max, err := stats.Max(newData) if err != nil { log.Fatal("Error finding maximum of data: ", err) } srange := max - min for i, x := range newData { data[i] = floatToString((x - min) / srange) } case "z-score": newData := strArrToFloatArr(data) mean, err := stats.Mean(newData) if err != nil { log.Fatal("Error finding mean of data: ", err) } sd, err := stats.StandardDeviation(newData) if err != nil { log.Fatal("Error finding standard deviation of data: ", err) } for i, x := range newData { data[i] = floatToString((x - mean) / sd) } case "decimal": newData := strArrToFloatArr(data) max, err := stats.Max(newData) if err != nil { log.Fatal("Error finding maximum of data: ", err) } min, err := stats.Min(newData) if err != nil { log.Fatal("Error finding minimum of data: ", err) } var maxAbs float64 if math.Abs(max) > math.Abs(min) { maxAbs = math.Abs(max) } else { maxAbs = math.Abs(min) } c := math.Ceil(math.Log10(maxAbs)) for i, x := range newData { data[i] = floatToString(x / math.Pow10(int(c))) } } case "binPercent": table := NewPivotTable(data) intP := strArrToIntArr(p) sort.Ints(intP) ps := NewPercentileService(*table, intP) mapping = ps.CreateMappings() ps.Bin(mapping, data) case "fuzzyMap": if len(p) != 3 { log.Fatal("fuzzyMap transformation requires 3 parameters: datasource GUID, match, put") } dsGUID := p[0] ds := datasources.NewDatasourceService(database.GetDatabase()) dsObj, err := ds.GetDatasource(dsGUID) if err != nil { log.Fatal("Error finding Datasource: ", err) } distinctValues := getDistinctValues(data) for i, datum := range distinctValues { wg.Add(1) go func(i int, datum string, dsObj datasources.Datasource) { result := fuzzyMap(datum, dsObj.Settings) fuzzyMapping := NewMapping(datum, result) mapping = append(mapping, *fuzzyMapping) defer wg.Done() }(i, datum, dsObj) } wg.Wait() data = applyMappings(mapping, data) } return data, mapping }
func main() { flag.Parse() n := *concurrency m := *total / n fmt.Printf("concurrency: %d\nrequests per client: %d\n\n", n, m) serviceMethodName := "Hello.Say" args := prepareArgs() b := make([]byte, 1024*1024) i, _ := args.MarshalTo(b) fmt.Printf("message size: %d bytes\n\n", i) var wg sync.WaitGroup wg.Add(n * m) var trans uint64 var transOK uint64 d := make([][]int64, n, n) //it contains warmup time but we can ignore it totalT := time.Now().UnixNano() for i := 0; i < n; i++ { dt := make([]int64, 0, m) d = append(d, dt) go func(i int) { s := &rpcx.DirectClientSelector{Network: "tcp", Address: *host} client := rpcx.NewClient(s) client.ClientCodecFunc = codec.NewProtobufClientCodec var reply BenchmarkMessage //warmup for j := 0; j < 5; j++ { client.Call(serviceMethodName, args, &reply) } for j := 0; j < m; j++ { t := time.Now().UnixNano() err := client.Call(serviceMethodName, args, &reply) t = time.Now().UnixNano() - t d[i] = append(d[i], t) if err == nil && reply.Field1 == "OK" { atomic.AddUint64(&transOK, 1) } atomic.AddUint64(&trans, 1) wg.Done() } client.Close() }(i) } wg.Wait() totalT = time.Now().UnixNano() - totalT totalT = totalT / 1000000 fmt.Printf("took %d ms for %d requests", totalT, n*m) totalD := make([]int64, 0, n*m) for _, k := range d { totalD = append(totalD, k...) } totalD2 := make([]float64, 0, n*m) for _, k := range totalD { totalD2 = append(totalD2, float64(k)) } mean, _ := stats.Mean(totalD2) median, _ := stats.Median(totalD2) max, _ := stats.Max(totalD2) min, _ := stats.Min(totalD2) fmt.Printf("sent requests : %d\n", n*m) fmt.Printf("received requests : %d\n", atomic.LoadUint64(&trans)) fmt.Printf("received requests_OK : %d\n", atomic.LoadUint64(&transOK)) fmt.Printf("throughput (TPS) : %d\n", int64(n*m)*1000/totalT) fmt.Printf("mean: %.f ns, median: %.f ns, max: %.f ns, min: %.f ns\n", mean, median, max, min) fmt.Printf("mean: %d ms, median: %d ms, max: %d ms, min: %d ms\n", int64(mean/1000000), int64(median/1000000), int64(max/1000000), int64(min/1000000)) }
func summarize(vs []float64) { fmt.Printf("%d %.3f ±%.3f\n", len(vs), stats.Mean(vs), stats.StdDevS(vs)) }
func main() { flag.Parse() n := *concurrency m := *total / n fmt.Printf("concurrency: %d\nrequests per client: %d\n\n", n, m) args := prepareArgs() b, _ := proto.Marshal(args) fmt.Printf("message size: %d bytes\n\n", len(b)) var wg sync.WaitGroup wg.Add(n * m) var trans uint64 var transOK uint64 d := make([][]int64, n, n) //it contains warmup time but we can ignore it totalT := time.Now().UnixNano() for i := 0; i < n; i++ { dt := make([]int64, 0, m) d = append(d, dt) go func(i int) { conn, err := grpc.Dial(*host, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } c := NewHelloClient(conn) //warmup for j := 0; j < 5; j++ { c.Say(context.Background(), args) } for j := 0; j < m; j++ { t := time.Now().UnixNano() reply, err := c.Say(context.Background(), args) t = time.Now().UnixNano() - t d[i] = append(d[i], t) if err == nil && *(reply.Field1) == "OK" { atomic.AddUint64(&transOK, 1) } atomic.AddUint64(&trans, 1) wg.Done() } conn.Close() }(i) } wg.Wait() totalT = time.Now().UnixNano() - totalT totalT = totalT / 1000000 fmt.Printf("took %d ms for %d requests", totalT, n*m) totalD := make([]int64, 0, n*m) for _, k := range d { totalD = append(totalD, k...) } totalD2 := make([]float64, 0, n*m) for _, k := range totalD { totalD2 = append(totalD2, float64(k)) } mean, _ := stats.Mean(totalD2) median, _ := stats.Median(totalD2) max, _ := stats.Max(totalD2) min, _ := stats.Min(totalD2) fmt.Printf("sent requests : %d\n", n*m) fmt.Printf("received requests : %d\n", atomic.LoadUint64(&trans)) fmt.Printf("received requests_OK : %d\n", atomic.LoadUint64(&transOK)) fmt.Printf("throughput (TPS) : %d\n", int64(n*m)*1000/totalT) fmt.Printf("mean: %.f ns, median: %.f ns, max: %.f ns, min: %.f ns\n", mean, median, max, min) fmt.Printf("mean: %d ms, median: %d ms, max: %d ms, min: %d ms\n", int64(mean/1000000), int64(median/1000000), int64(max/1000000), int64(min/1000000)) }
// startStats blocks and periodically logs transaction statistics (throughput, // success rates, durations, ...). Note that this only captures write txns, // since read-only txns are stateless as far as TxnCoordSender is concerned. // stats). // TODO(mrtracy): Add this to TimeSeries. func (tc *TxnCoordSender) startStats() { res := time.Millisecond // for duration logging resolution lastNow := tc.clock.PhysicalNow() for { select { case <-time.After(statusLogInterval): if !log.V(1) { continue } tc.Lock() curStats := tc.txnStats tc.txnStats = txnCoordStats{} tc.Unlock() now := tc.clock.PhysicalNow() // Tests have weird clocks. if now-lastNow <= 0 { continue } num := len(curStats.durations) // Only compute when non-empty input. var dMax, dMean, dDev, rMax, rMean, rDev float64 var err error if num > 0 { // There should never be an error in the below // computations. dMax, err = stats.Max(curStats.durations) if err != nil { panic(err) } dMean, err = stats.Mean(curStats.durations) if err != nil { panic(err) } dDev, err = stats.StdDevP(curStats.durations) if err != nil { panic(err) } rMax, err = stats.Max(curStats.restarts) if err != nil { panic(err) } rMean, err = stats.Mean(curStats.restarts) if err != nil { panic(err) } rDev, err = stats.StdDevP(curStats.restarts) if err != nil { panic(err) } } rate := float64(int64(num)*int64(time.Second)) / float64(now-lastNow) var pCommitted, pAbandoned, pAborted float32 if fNum := float32(num); fNum > 0 { pCommitted = 100 * float32(curStats.committed) / fNum pAbandoned = 100 * float32(curStats.abandoned) / fNum pAborted = 100 * float32(curStats.aborted) / fNum } log.Infof( "txn coordinator: %.2f txn/sec, %.2f/%.2f/%.2f %%cmmt/abrt/abnd, %s/%s/%s avg/σ/max duration, %.1f/%.1f/%.1f avg/σ/max restarts (%d samples)", rate, pCommitted, pAborted, pAbandoned, util.TruncateDuration(time.Duration(dMean), res), util.TruncateDuration(time.Duration(dDev), res), util.TruncateDuration(time.Duration(dMax), res), rMean, rDev, rMax, num, ) lastNow = now case <-tc.stopper.ShouldStop(): return } } }
func (c *cmdReport2) getFeatures(geneSnpChan chan SNPArr) { w, err := os.Create(c.prefix + ".detectable.gene.csv") if err != nil { log.Fatalln(err) } defer w.Close() w.WriteString("patric_id,genome,figfam,sample,pi,depth\n") fn := func(txn *lmdb.Txn) error { dbi, err := txn.OpenDBI("feature", 0) if err != nil { return err } for gs := range geneSnpChan { if len(gs.Arr) < 100 { continue } k := gs.Key v, err := txn.Get(dbi, k) if err != nil { return err } f := Feature{} if err := msgpack.Unmarshal(v, &f); err != nil { return err } seqLen := f.End - f.Start + 1 // calculate median of depth depthArr := []float64{} piArr := []float64{} for _, snp := range gs.Arr { pos := snp.Position - f.Start if f.IsComplementaryStrand() { pos = seqLen - 1 - pos } if (pos+1)%3 == 0 { depthArr = append(depthArr, float64(len(snp.Bases))) piArr = append(piArr, snp.Pi()) } } depthMedian, _ := stats.Median(depthArr) sort.Float64s(piArr) piMean, _ := stats.Mean(piArr[10 : len(piArr)-10]) w.WriteString(fmt.Sprintf("%s,%s,%s,%s,%g,%g\n", f.PatricID, f.TaxID, f.FigfamID, c.prefix, piMean, depthMedian)) } return nil } err = c.featureDB.View(fn) if err != nil { log.Panicln(err) } }