func bench(requests, concurrency int, image string) { start := time.Now() timings := make([]float64, requests) completeCh := make(chan time.Duration) current := 0 go func() { for timing := range completeCh { timings = append(timings, timing.Seconds()) current++ percent := float64(current) / float64(requests) * 100 fmt.Printf("[%3.f%%] %d/%d containers started\n", percent, current, requests) } }() session(requests, concurrency, image, completeCh) close(completeCh) total := time.Since(start) p50th, _ := stats.Median(timings) p90th, _ := stats.Percentile(timings, 90) p99th, _ := stats.Percentile(timings, 99) fmt.Println("") fmt.Printf("Time taken for tests: %s\n", total.String()) fmt.Printf("Time per container: %vms [50th] | %vms [90th] | %vms [99th]\n", int(p50th*1000), int(p90th*1000), int(p99th*1000)) }
// Finalize calculation of the risk using available datapoints func riskFinalize(op opContext, rs *slib.RRAServiceRisk) error { var ( rvals []float64 err error ) for _, x := range rs.Scenarios { // If the scenario had no data, don't include it in the // final scoring if x.NoData { continue } rvals = append(rvals, x.Score) } // Note the highest business impact value that was determined from // the RRA. This can be used as an indication of the business impact // for the service. rs.Risk.Impact = rs.UsedRRAAttrib.Impact rs.Risk.ImpactLabel, err = slib.ImpactLabelFromValue(rs.Risk.Impact) if err != nil { return err } if len(rvals) == 0 { // This can occur if we have no metric data, including no valid // information in the RRA logf("error in risk calculation: %q has no valid scenarios", rs.RRA.Name) rs.Risk.Median = 0.0 rs.Risk.Average = 0.0 rs.Risk.WorstCase = 0.0 rs.Risk.MedianLabel = "unknown" rs.Risk.AverageLabel = "unknown" rs.Risk.WorstCaseLabel = "unknown" rs.Risk.DataClass, err = slib.DataValueFromLabel(rs.RRA.DefData) return nil } rs.Risk.Median, err = stats.Median(rvals) if err != nil { return err } rs.Risk.MedianLabel = slib.NormalLabelFromValue(rs.Risk.Median) rs.Risk.Average, err = stats.Mean(rvals) if err != nil { return err } rs.Risk.AverageLabel = slib.NormalLabelFromValue(rs.Risk.Average) rs.Risk.WorstCase, err = stats.Max(rvals) if err != nil { return err } rs.Risk.WorstCaseLabel = slib.NormalLabelFromValue(rs.Risk.WorstCase) rs.Risk.DataClass, err = slib.DataValueFromLabel(rs.RRA.DefData) if err != nil { return err } return nil }
func main() { d := stats.LoadRawData([]interface{}{1.1, "2", 3.0, 4, "5"}) a, _ := stats.Min(d) fmt.Println(a) // 1.1 a, _ = stats.Max(d) fmt.Println(a) // 5 a, _ = stats.Sum([]float64{1.1, 2.2, 3.3}) fmt.Println(a) // 6.6 a, _ = stats.Mean([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 3 a, _ = stats.Median([]float64{1, 2, 3, 4, 5, 6, 7}) fmt.Println(a) // 4 m, _ := stats.Mode([]float64{5, 5, 3, 3, 4, 2, 1}) fmt.Println(m) // [5 3] a, _ = stats.PopulationVariance([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 2 a, _ = stats.SampleVariance([]float64{1, 2, 3, 4, 5}) fmt.Println(a) // 2.5 a, _ = stats.MedianAbsoluteDeviationPopulation([]float64{1, 2, 3}) fmt.Println(a) // 1 a, _ = stats.StandardDeviationPopulation([]float64{1, 2, 3}) fmt.Println(a) // 0.816496580927726 a, _ = stats.StandardDeviationSample([]float64{1, 2, 3}) fmt.Println(a) // 1 a, _ = stats.Percentile([]float64{1, 2, 3, 4, 5}, 75) fmt.Println(a) // 4 a, _ = stats.PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75) fmt.Println(a) // 40 c := []stats.Coordinate{ {1, 2.3}, {2, 3.3}, {3, 3.7}, {4, 4.3}, {5, 5.3}, } r, _ := stats.LinearRegression(c) fmt.Println(r) // [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}] r, _ = stats.ExponentialRegression(c) fmt.Println(r) // [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}] r, _ = stats.LogarithmicRegression(c) fmt.Println(r) // [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}] s, _ := stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 3, false) fmt.Println(s) // [0.2,0.4,0.3] s, _ = stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 10, true) fmt.Println(s) // [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1] q, _ := stats.Quartile([]float64{7, 15, 36, 39, 40, 41}) fmt.Println(q) // {15 37.5 40} iqr, _ := stats.InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118}) fmt.Println(iqr) // 10 mh, _ := stats.Midhinge([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}) fmt.Println(mh) // 7.5 tr, _ := stats.Trimean([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13}) fmt.Println(tr) // 7.25 o, _ := stats.QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100}) fmt.Printf("%+v\n", o) // {Mild:[15 18] Extreme:[-1000 100]} gm, _ := stats.GeometricMean([]float64{10, 51.2, 8}) fmt.Println(gm) // 15.999999999999991 hm, _ := stats.HarmonicMean([]float64{1, 2, 3, 4, 5}) fmt.Println(hm) // 2.18978102189781 a, _ = stats.Round(2.18978102189781, 3) fmt.Println(a) // 2.189 }
func main() { flag.Parse() n := *concurrency m := *total / n fmt.Printf("concurrency: %d\nrequests per client: %d\n\n", n, m) args := prepareArgs() b, _ := proto.Marshal(args) fmt.Printf("message size: %d bytes\n\n", len(b)) var wg sync.WaitGroup wg.Add(n * m) var trans uint64 var transOK uint64 d := make([][]int64, n, n) //it contains warmup time but we can ignore it totalT := time.Now().UnixNano() for i := 0; i < n; i++ { dt := make([]int64, 0, m) d = append(d, dt) go func(i int) { conn, err := grpc.Dial(*host, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } c := NewHelloClient(conn) //warmup for j := 0; j < 5; j++ { c.Say(context.Background(), args) } for j := 0; j < m; j++ { t := time.Now().UnixNano() reply, err := c.Say(context.Background(), args) t = time.Now().UnixNano() - t d[i] = append(d[i], t) if err == nil && *(reply.Field1) == "OK" { atomic.AddUint64(&transOK, 1) } atomic.AddUint64(&trans, 1) wg.Done() } conn.Close() }(i) } wg.Wait() totalT = time.Now().UnixNano() - totalT totalT = totalT / 1000000 fmt.Printf("took %d ms for %d requests", totalT, n*m) totalD := make([]int64, 0, n*m) for _, k := range d { totalD = append(totalD, k...) } totalD2 := make([]float64, 0, n*m) for _, k := range totalD { totalD2 = append(totalD2, float64(k)) } mean, _ := stats.Mean(totalD2) median, _ := stats.Median(totalD2) max, _ := stats.Max(totalD2) min, _ := stats.Min(totalD2) fmt.Printf("sent requests : %d\n", n*m) fmt.Printf("received requests : %d\n", atomic.LoadUint64(&trans)) fmt.Printf("received requests_OK : %d\n", atomic.LoadUint64(&transOK)) fmt.Printf("throughput (TPS) : %d\n", int64(n*m)*1000/totalT) fmt.Printf("mean: %.f ns, median: %.f ns, max: %.f ns, min: %.f ns\n", mean, median, max, min) fmt.Printf("mean: %d ms, median: %d ms, max: %d ms, min: %d ms\n", int64(mean/1000000), int64(median/1000000), int64(max/1000000), int64(min/1000000)) }
func main() { flag.Parse() n := *concurrency m := *total / n fmt.Printf("concurrency: %d\nrequests per client: %d\n\n", n, m) serviceMethodName := "Hello.Say" args := prepareArgs() b := make([]byte, 1024*1024) i, _ := args.MarshalTo(b) fmt.Printf("message size: %d bytes\n\n", i) var wg sync.WaitGroup wg.Add(n * m) var trans uint64 var transOK uint64 d := make([][]int64, n, n) //it contains warmup time but we can ignore it totalT := time.Now().UnixNano() for i := 0; i < n; i++ { dt := make([]int64, 0, m) d = append(d, dt) go func(i int) { s := &rpcx.DirectClientSelector{Network: "tcp", Address: *host} client := rpcx.NewClient(s) client.ClientCodecFunc = codec.NewProtobufClientCodec var reply BenchmarkMessage //warmup for j := 0; j < 5; j++ { client.Call(serviceMethodName, args, &reply) } for j := 0; j < m; j++ { t := time.Now().UnixNano() err := client.Call(serviceMethodName, args, &reply) t = time.Now().UnixNano() - t d[i] = append(d[i], t) if err == nil && reply.Field1 == "OK" { atomic.AddUint64(&transOK, 1) } atomic.AddUint64(&trans, 1) wg.Done() } client.Close() }(i) } wg.Wait() totalT = time.Now().UnixNano() - totalT totalT = totalT / 1000000 fmt.Printf("took %d ms for %d requests", totalT, n*m) totalD := make([]int64, 0, n*m) for _, k := range d { totalD = append(totalD, k...) } totalD2 := make([]float64, 0, n*m) for _, k := range totalD { totalD2 = append(totalD2, float64(k)) } mean, _ := stats.Mean(totalD2) median, _ := stats.Median(totalD2) max, _ := stats.Max(totalD2) min, _ := stats.Min(totalD2) fmt.Printf("sent requests : %d\n", n*m) fmt.Printf("received requests : %d\n", atomic.LoadUint64(&trans)) fmt.Printf("received requests_OK : %d\n", atomic.LoadUint64(&transOK)) fmt.Printf("throughput (TPS) : %d\n", int64(n*m)*1000/totalT) fmt.Printf("mean: %.f ns, median: %.f ns, max: %.f ns, min: %.f ns\n", mean, median, max, min) fmt.Printf("mean: %d ms, median: %d ms, max: %d ms, min: %d ms\n", int64(mean/1000000), int64(median/1000000), int64(max/1000000), int64(min/1000000)) }
func main() { t := time.Now() fmt.Println(t.Format(time.RFC3339)) rand.Seed(1) // Read in data readData() // Set one level with all row criteria, // this is used to start the set creation levelOne = fullOneLevel() //levels = fullTwoLevel() outputRowCriteria(levels) // experiment variables rand_numSets = 1000 rand_maxSetMembers = 5 maxExperiments = 1 var expMin []float64 var expMax []float64 scoreCutoff = -0.89 rowThreshhold = 2 zScore = 2.58 for experiment := 1; experiment <= maxExperiments; experiment++ { // experiment variables, changes per experiment rand_numSets += 0 rand_maxSetMembers += 0 scoreCutoff += -0.00 zScore += 0.0 // Setup experiment variables var scores []scoreResult var minScore float64 = -100 var maxScore float64 = 0 levels = fullFourLevel() //randLevels() fmt.Printf("sets count: %d, max set members: %d, level 1 count: %d, rowThreshhold: %d, scoreCutoff: %f, zScore: %f\n", len(levels), rand_maxSetMembers+2, len(levelOne), rowThreshhold, scoreCutoff, zScore) for dataSetId := 1; dataSetId <= datasets; dataSetId++ { s := levelEval(dataSetId) sort.Sort(scoreResults(s)) // s contains a list of scores for one dataset, sorted // this is were we can get some info on that data //outputScoreList(s) if len(s) > 0 { //var sEval = evaluateScores(s) // pick the top score var sEval = s[0] scores = append(scores, sEval) fmt.Printf("%d, %f \n", sEval.dataSetId, sEval.score) if minScore < sEval.score { minScore = sEval.score } if maxScore > sEval.score { maxScore = sEval.score } } // For all score in this set write out the median and standard deviation var set []float64 for _, scoreItem := range s { if scoreItem.score < 0.0 { set = append(set, scoreItem.score) } } var median, _ = stats.Median(set) var sd, _ = stats.StandardDeviation(set) var min, _ = stats.Min(set) var max, _ = stats.Max(set) fmt.Printf("dataset: %d, median: %f, sd: %f, min: %f, max: %f, len: %d\n", dataSetId, median, sd, min, max, len(set)) } expMin = append(expMin, minScore) expMax = append(expMax, maxScore) //scoreCutoff = (minScore * (percentRofMin / 100.0)) + minScore //fmt.Printf(" scoreCutoff: %f \n", scoreCutoff) outputScores(scores) // Write output file outputResults(scores) // Compare to training truth data // compareTrainingDataWithResults() } t = time.Now() fmt.Println(t.Format(time.RFC3339)) // Output min max scores per experiment for _, each := range expMin { fmt.Printf("min: %f, ", each) } fmt.Println() for _, each := range expMax { fmt.Printf("max: %f, ", each) } }
func (c *cmdReport2) getFeatures(geneSnpChan chan SNPArr) { w, err := os.Create(c.prefix + ".detectable.gene.csv") if err != nil { log.Fatalln(err) } defer w.Close() w.WriteString("patric_id,genome,figfam,sample,pi,depth\n") fn := func(txn *lmdb.Txn) error { dbi, err := txn.OpenDBI("feature", 0) if err != nil { return err } for gs := range geneSnpChan { if len(gs.Arr) < 100 { continue } k := gs.Key v, err := txn.Get(dbi, k) if err != nil { return err } f := Feature{} if err := msgpack.Unmarshal(v, &f); err != nil { return err } seqLen := f.End - f.Start + 1 // calculate median of depth depthArr := []float64{} piArr := []float64{} for _, snp := range gs.Arr { pos := snp.Position - f.Start if f.IsComplementaryStrand() { pos = seqLen - 1 - pos } if (pos+1)%3 == 0 { depthArr = append(depthArr, float64(len(snp.Bases))) piArr = append(piArr, snp.Pi()) } } depthMedian, _ := stats.Median(depthArr) sort.Float64s(piArr) piMean, _ := stats.Mean(piArr[10 : len(piArr)-10]) w.WriteString(fmt.Sprintf("%s,%s,%s,%s,%g,%g\n", f.PatricID, f.TaxID, f.FigfamID, c.prefix, piMean, depthMedian)) } return nil } err = c.featureDB.View(fn) if err != nil { log.Panicln(err) } }