func Server(stop chan int) { channel := make(chan []*big.Int, 1) fmt.Printf("Server wird gestartet\n") tcpAddr, err := net.ResolveTCPAddr("tcp4", ":9999") checkError(err) listener, err := net.ListenTCP("tcp", tcpAddr) checkError(err) fmt.Printf("Server wartet auf Verbindungsanfragen\n") conn, err := listener.Accept() if err != nil { } var time0 time.Time var duration time.Duration time0 = time.Now() handleClient(channel, conn) duration = time.Since(time0) fmt.Print(float64(duration.Nanoseconds()) / 1000 / 1000) listener.Close() stop <- 1 }
func run(name string) { var t, total time.Duration test, ok := tests[name] if !ok { fmt.Fprintf(os.Stderr, "test: `%s` does not exists\n", name) os.Exit(1) } fmt.Printf("%s:\n", strings.ToUpper(name)) for i := 0; i < *R; i++ { if *mock { t = BenchmarkMock(test) } else { t = BenchmarkRedis(test) } total += t prints(t) } avg := time.Duration(total.Nanoseconds() / int64(*R)) print("AVG ") printsA(avg, total) println() }
func TimerL(name string, duration time.Duration, rate float64) { if rand.Float64() > rate { return } HistogramL(name, float64(duration.Nanoseconds()/1000000), rate) }
// NewMapper returns a new instance of Mapper with a given function and interval. func NewMapper(fn MapFunc, itr Iterator, interval time.Duration) *Mapper { return &Mapper{ fn: fn, itr: itr, interval: interval.Nanoseconds(), } }
// NewPingChecker returns a check function that can check if a host answer to a ICMP Ping func NewPingChecker(host, service, ip string) CheckFunction { return func() Event { var retRtt time.Duration var result = Event{Host: host, Service: service, State: "critical"} p := fastping.NewPinger() p.MaxRTT = maxPingTime ra, err := net.ResolveIPAddr("ip4:icmp", ip) if err != nil { result.Description = err.Error() } p.AddIPAddr(ra) p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { result.State = "ok" result.Metric = float32(retRtt.Nanoseconds() / 1e6) } err = p.Run() if err != nil { result.Description = err.Error() } return result } }
// SendUsage is called at the end of processing a request and will record info // about the request for analytics and error detection later func SendUsage( c *Context, statusCode int, contentLength int, dur time.Duration, errors []string, ) { m := Usage{} m.Method = c.GetHTTPMethod() m.URL = c.Request.URL.String() // Remove querystring and replace IDs to allow grouping endPointURL := regURLIDs.ReplaceAllString( strings.Split(m.URL, "?")[0], repURLIDs, ) if strings.Contains(endPointURL, "/out/") { endPointURL = regJumpLink.ReplaceAllString(endPointURL, repJumpLink) } m.EndPointURL = endPointURL // Only send first 4 chars if c.Auth.AccessToken.TokenValue != "" { m.AccessToken = c.Auth.AccessToken.TokenValue[:4] } // Only send last two sections of IP address if c.Request.Header.Get("X-Real-IP") != "" { if strings.Contains(c.Request.Header.Get("X-Real-IP"), ".") { // IPv4 m.IPAddr = strings.Join( strings.Split(c.Request.Header.Get("X-Real-IP"), ".")[2:], ".", ) } else if strings.Contains(c.Request.Header.Get("X-Real-IP"), ":") { // IPv6 ipv6Split := strings.Split(c.Request.Header.Get("X-Real-IP"), ":") m.IPAddr = strings.Join(ipv6Split[(len(ipv6Split)-2):], ":") } } m.UserAgent = c.Request.UserAgent() m.HTTPStatus = statusCode m.Host = c.Request.Host m.ContentLength = contentLength m.Created = time.Now().Format(time.RFC3339) m.TimeSpent = dur.Nanoseconds() m.SiteID = c.Site.ID m.UserID = c.Auth.UserID m.ProfileID = c.Auth.ProfileID if len(errors) > 0 { m.Error = strings.Join(errors, ", ") } m.Send() }
// SendReadTimeout instructs the peer to simulate a read timeout. It then waits // for acknowledgement of the timeout, buffering any packets received since // then. The packets are then returned. func (p *packetAdaptor) SendReadTimeout(d time.Duration) ([][]byte, error) { payload := make([]byte, 1+8) payload[0] = opcodeTimeout binary.BigEndian.PutUint64(payload[1:], uint64(d.Nanoseconds())) if _, err := p.Conn.Write(payload); err != nil { return nil, err } packets := make([][]byte, 0) for { opcode, err := p.readOpcode() if err != nil { return nil, err } switch opcode { case opcodeTimeoutAck: // Done! Return the packets buffered and continue. return packets, nil case opcodePacket: // Buffer the packet for the caller to process. packet, err := p.readPacketBody() if err != nil { return nil, err } packets = append(packets, packet) default: return nil, fmt.Errorf("unexpected opcode '%s'", opcode) } } }
// update the expected duration that we expect the given task to take when run on the // given host func UpdateExpectedDuration(t *task.Task, timeTaken time.Duration) error { matcher := bson.M{ "name": t.DisplayName, "build_variant": t.BuildVariant, "branch": t.Project, } taskBk, err := findOneTaskBk(matcher, bson.M{}) if err != nil { return err } var averageTaskDuration time.Duration if taskBk == nil { averageTaskDuration = timeTaken } else { averageTime := ((taskBk.ExpectedDuration.Nanoseconds() * taskBk.NumStarted) + timeTaken.Nanoseconds()) / (taskBk.NumStarted + 1) averageTaskDuration = time.Duration(averageTime) } // for now, we are just using the duration of the last comparable task ran as the // guess for upcoming tasks update := bson.M{ "$set": bson.M{"expected_duration": averageTaskDuration}, "$inc": bson.M{"num_started": 1}, } return upsertOneTaskBk(matcher, update) }
func (t *Thread) work(id int) { log.LogMessage(t.TAG, " thread work, id:", id) var start_time time.Time var delay time.Duration warninglvl := 50 * time.Millisecond for { select { case rpc := <-t.Queue[id]: log.LogMessage(t.TAG, " thread:", id, rpc.GetSrc(), " call:", rpc.GetMethod()) start_time = time.Now() err := rpc.Call() if err != nil { log.LogError("rpc error:", err) } delay = time.Now().Sub(start_time) if delay > warninglvl { log.LogWarning("rpc call ", rpc.GetMethod(), " delay:", delay.Nanoseconds()/1000000, "ms") } err = rpc.Done() if err != nil { log.LogError("rpc error:", err) } rpc.Free() break default: if t.Quit { log.LogMessage(t.TAG, " thread ", id, " quit") return } time.Sleep(time.Millisecond) } } }
func segmentSize(duration time.Duration) time.Duration { var segmentSize time.Duration for i := int64(1); i < duration.Nanoseconds(); i = i * 10 { segmentSize = time.Duration(i) } return segmentSize }
func dumpReadToFile(results []Result) { f, err := os.Create("results-read.txt") check(err) defer func() { if err := f.Close(); err != nil { panic(err) } }() var duration time.Duration for _, result := range results { times := strings.Split(result.ReadTimes, ",") durations := make([]string, len(times)) for i := 0; i < len(times); i++ { duration, err = time.ParseDuration(times[i]) if err != nil { fmt.Println(err) fmt.Println("index:", i, times[i]) duration = time.Duration(0) } durations[i] = fmt.Sprintf("%d", duration.Nanoseconds()) } _, err := f.WriteString(strings.Join(durations, ",") + "\n") check(err) } }
func (g *Client) QuerySince(q string, ago time.Duration) Datapoints { if ago.Nanoseconds() <= 0 { return Datapoints{errors.New("Duration is expected to be positive."), "", nil} } // Cloning to be able to modify. url := g.URL url.Path = path.Join(url.Path, "/render") queryPart := constructQueryPart([]string{q}) queryPart.Add("from", graphiteSinceString(ago)) url.RawQuery = queryPart.Encode() resp, err := http.Get(url.String()) if err != nil { return Datapoints{err, "", nil} } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return Datapoints{err, "", nil} } points, err := parseGraphiteResponse(body) return parseSingleGraphiteResponse(points, err) }
// Fetches one or multiple Graphite series. Deferring identifying whether the // result are ints of floats to later. Useful in clients that executes adhoc // queries. func (g *Client) QueryMultiSince(q []string, ago time.Duration) (MultiDatapoints, error) { if ago.Nanoseconds() <= 0 { return nil, errors.New("Duration is expected to be positive.") } // Cloning to be able to modify. url := g.URL url.Path = path.Join(url.Path, "/render") queryPart := constructQueryPart(q) queryPart.Add("from", graphiteSinceString(ago)) url.RawQuery = queryPart.Encode() resp, err := g.Client.Get(url.String()) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return parseGraphiteResponse(body) }
func main() { runtime.GOMAXPROCS(numOfCore) file, err := os.Create("app_host_list.txt") if err != nil { fmt.Println(err) } queue := make(chan *pingResult) for i := 0; i < numOfCore; i++ { go pingRoutine(queue, i*hostPerCore, (i+1)*hostPerCore) } succ := 0.0 var fastDuration *time.Duration var fastHostname string for i := 0; i < maxAppleHost; i++ { result := <-queue if result.Error == nil { succ = succ + 1 if fastDuration == nil || result.Duration.Nanoseconds() < fastDuration.Nanoseconds() { fastDuration = result.Duration fastHostname = result.Hostname } file.WriteString(fmt.Sprint(result.Duration, "\t", result.Hostname, "\n")) } else { fmt.Println(result.Hostname, ": ", result.Error) } if i%100 == 0 { fmt.Printf(". %d ", i) } } fmt.Println("\nSucceeded: ", succ/maxAppleHost) fmt.Println("Fast: ", fastHostname, " Duration: ", fastDuration) }
// newImagePolicyWebhook creates a temporary kubeconfig file from the provided arguments and attempts to load // a new newImagePolicyWebhook from it. func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte, cacheTime time.Duration, defaultAllow bool) (*imagePolicyWebhook, error) { tempfile, err := ioutil.TempFile("", "") if err != nil { return nil, err } p := tempfile.Name() defer os.Remove(p) config := v1.Config{ Clusters: []v1.NamedCluster{ { Cluster: v1.Cluster{Server: callbackURL, CertificateAuthorityData: ca}, }, }, AuthInfos: []v1.NamedAuthInfo{ { AuthInfo: v1.AuthInfo{ClientCertificateData: clientCert, ClientKeyData: clientKey}, }, }, } if err := json.NewEncoder(tempfile).Encode(config); err != nil { return nil, err } tempconfigfile, err := ioutil.TempFile("", "") if err != nil { return nil, err } pc := tempconfigfile.Name() defer os.Remove(pc) configTmpl, err := template.New("testconfig").Parse(defaultConfigTmplYAML) if err != nil { return nil, fmt.Errorf("failed to parse test template: %v", err) } dataConfig := struct { KubeConfig string AllowTTL int64 DenyTTL int64 RetryBackoff int64 DefaultAllow bool }{ KubeConfig: p, AllowTTL: cacheTime.Nanoseconds(), DenyTTL: cacheTime.Nanoseconds(), RetryBackoff: 0, DefaultAllow: defaultAllow, } if err := configTmpl.Execute(tempconfigfile, dataConfig); err != nil { return nil, fmt.Errorf("failed to execute test template: %v", err) } // Create a new admission controller configFile, err := os.Open(pc) if err != nil { return nil, fmt.Errorf("failed to read test config: %v", err) } defer configFile.Close() wh, err := NewImagePolicyWebhook(configFile) return wh.(*imagePolicyWebhook), err }
// ExecuteVtctlCommand is part of the VtctlClient interface func (client *gRPCVtctlClient) ExecuteVtctlCommand(ctx context.Context, args []string, actionTimeout, lockTimeout time.Duration) (<-chan *logutil.LoggerEvent, vtctlclient.ErrFunc, error) { query := &pb.ExecuteVtctlCommandRequest{ Args: args, ActionTimeout: int64(actionTimeout.Nanoseconds()), LockTimeout: int64(lockTimeout.Nanoseconds()), } stream, err := client.c.ExecuteVtctlCommand(ctx, query) if err != nil { return nil, nil, err } results := make(chan *logutil.LoggerEvent, 1) var finalError error go func() { for { le, err := stream.Recv() if err != nil { if err != io.EOF { finalError = err } close(results) return } results <- logutil.ProtoToLoggerEvent(le.Event) } }() return results, func() error { return finalError }, nil }
func (me *uberShaders) ensureProg() (prog *ugl.Program) { pname := thrRend.curEffect.uberPnames[thrRend.curTech.name()] if prog = ogl.progs.Get(pname); prog == nil { var job uberShaderJob job.init(pname) prog = ogl.progs.AddNew(pname) var err error if err = me.setShaderSources(prog, &job, thrRend.curTech.name(), thrRend.curEffect); err == nil { var dur time.Duration if dur, err = ogl.makeProgs(false, pname); err == nil { Diag.LogShaders("Built new GLSL shader program '%s' in %v", pname, dur) Stats.addProgCompile(1, dur.Nanoseconds()) prog.Tag = thrRend.curTech if err = prog.SetAttrLocations(job.progAtts...); err == nil { if err = prog.SetUnifLocations(job.progUnis...); err == nil { // for _, meshBuf := range Core.MeshBuffers { // if err = meshBuf.setupVao(prog.Index); err != nil { // break // } // } } } } } if err != nil { Diag.LogErr(err) } } return }
func (a *Account) work(id int) { log.LogMessage("db work, id:", id) var start_time time.Time var delay time.Duration warninglvl := 50 * time.Millisecond for { select { case caller := <-a.queue[id]: log.LogMessage(caller.GetSrc(), " rpc call:", caller.GetMethod(), ", thread:", id) start_time = time.Now() err := caller.Call() if err != nil { log.LogError("rpc error:", err) } delay = time.Now().Sub(start_time) if delay > warninglvl { log.LogWarning("rpc call ", caller.GetMethod(), " delay:", delay.Nanoseconds()/1000000, "ms") } caller.Free() break default: if a.quit { return } time.Sleep(time.Millisecond) } } }
// NewListener creates and initializes new Listener object func NewListener(addr string, port string, expire time.Duration, captureResponse bool) (l *Listener) { l = &Listener{captureResponse: captureResponse} l.packetsChan = make(chan *TCPPacket, 10000) l.messagesChan = make(chan *TCPMessage, 10000) l.quit = make(chan bool) l.messages = make(map[string]*TCPMessage) l.ackAliases = make(map[uint32]uint32) l.seqWithData = make(map[uint32]uint32) l.respAliases = make(map[uint32]*request) l.addr = addr _port, _ := strconv.Atoi(port) l.port = uint16(_port) if expire.Nanoseconds() == 0 { expire = 2000 * time.Millisecond } l.messageExpire = expire go l.listen() go l.readRAWSocket() return }
func main() { procLimit, err := strconv.Atoi(os.Args[1]) if err != nil { fmt.Printf("Atoi: %d", err) return } if procLimit < 1 { procLimit = len(urls) } runtime.GOMAXPROCS(1) const iterations = 10 n := len(urls) service, quit := startServer(getURL, procLimit) var d time.Duration for i := 0; i < iterations; i++ { reqs := make([]request, len(urls)) start_inside_loop := d.Nanoseconds() for i := 0; i < n; i++ { req := &reqs[i] req.url = urls[i] req.replyc = make(chan string, 1024) service <- req } for i := n - 1; i >= 0; i-- { // doesn't matter what order fmt.Println(<-reqs[i].replyc) } fmt.Printf("Inside_loop,%d\n", d.Nanoseconds()-start_inside_loop) } quit <- true //Shutdown server fmt.Printf("urls:%d\n", n) fmt.Printf("Server processes: %d\n", procLimit) }
func (ss *simplexSession) get(seqno Seqno, poll time.Duration, behavior int) (ret [][]byte, err error) { timeout := false handleMessage := func(msg message) { ret = append(ret, msg.msg) } if poll.Nanoseconds() > 0 { select { case msg := <-ss.ch: handleMessage(msg) case <-time.After(poll): timeout = true } } if !timeout { loopMessages: for { select { case msg := <-ss.ch: handleMessage(msg) default: break loopMessages } } } if (behavior&BadRouterReorder) != 0 && len(ret) > 1 { ret[0], ret[1] = ret[1], ret[0] } if (behavior&BadRouterDrop) != 0 && len(ret) > 1 { ret = ret[1:] } return ret, err }
// GetCacheStatus returns a GatewayEndPointCacheStatus representing the current gateway status. func (gepsa *GatewayEndPointStatusAggregator) GetCacheStatus() *GatewayEndPointCacheStatus { status := &GatewayEndPointCacheStatus{ Keyspace: gepsa.Keyspace, Shard: gepsa.Shard, Name: gepsa.Name, } gepsa.mu.RLock() defer gepsa.mu.RUnlock() status.TabletType = gepsa.TabletType status.Addr = gepsa.Addr status.QueryCount = gepsa.QueryCount status.QueryError = gepsa.QueryError var totalQuery uint64 for _, c := range gepsa.queryCountInMinute { totalQuery += c } var totalLatency time.Duration for _, d := range gepsa.latencyInMinute { totalLatency += d } status.QPS = totalQuery / 60 if totalQuery > 0 { status.AvgLatency = float64(totalLatency.Nanoseconds()) / float64(totalQuery) / 1000000 } return status }
//calcNextSize takes the current preformance metrics and //attempts to calculate what the next size should be func calcNextSize(b uint64, dur time.Duration) uint64 { if b == 0 { return startBlockSize } target := time.Second * 5 return (b * uint64(target.Nanoseconds())) / uint64(dur.Nanoseconds()) }
// Conn creates a new net.Conn wrapping the given net.Conn that times out after // the specified period. Once a connection has timed out, any pending reads or // writes will return io.EOF and the underlying connection will be closed. // // idleTimeout specifies how long to wait for inactivity before considering // connection idle. // // If onIdle is specified, it will be called to indicate when the connection has // idled and been closed. func Conn(conn net.Conn, idleTimeout time.Duration, onIdle func()) *IdleTimingConn { c := &IdleTimingConn{ conn: conn, idleTimeout: idleTimeout, halfIdleTimeout: time.Duration(idleTimeout.Nanoseconds() / 2), activeCh: make(chan bool, 1), closedCh: make(chan bool, 1), lastActivityTime: int64(time.Now().UnixNano()), } go func() { timer := time.NewTimer(idleTimeout) defer timer.Stop() for { select { case <-c.activeCh: // We're active, continue timer.Reset(idleTimeout) atomic.StoreInt64(&c.lastActivityTime, time.Now().UnixNano()) continue case <-timer.C: c.Close() if onIdle != nil { onIdle() } return case <-c.closedCh: return } } }() return c }
// isHealthyOffsetInterval returns true if the ClusterOffsetInterval indicates // that the node's offset is within maxOffset, else false. For example, if the // offset interval is [-20, -11] and the maxOffset is 10 nanoseconds, then the // clock offset must be too great, because no point in the interval is within // the maxOffset. func isHealthyOffsetInterval(i ClusterOffsetInterval, maxOffset time.Duration) bool { if i.Lowerbound > maxOffset.Nanoseconds() || i.Upperbound < -maxOffset.Nanoseconds() { return false } return true }
func (ex *SamplableExperiment) Sample() { commands := make(map[string]Command) var iterations int64 var totalTime time.Duration var avg time.Duration var lastError error var lastResult time.Duration var totalErrors int var workers int var worstResult time.Duration startTime := time.Now() for { sampleType := OtherSample select { case iteration, ok := <-ex.iteration: if !ok { close(ex.samples) return } sampleType = ResultSample iterations = iterations + 1 totalTime = totalTime + iteration.Duration avg = time.Duration(totalTime.Nanoseconds() / iterations) lastResult = iteration.Duration if iteration.Duration > worstResult { worstResult = iteration.Duration } for _, step := range iteration.Steps { cmd := commands[step.Command] cmd.Count = cmd.Count + 1 cmd.TotalTime = cmd.TotalTime + step.Duration cmd.LastTime = step.Duration cmd.Average = time.Duration(cmd.TotalTime.Nanoseconds() / cmd.Count) if step.Duration > cmd.WorstTime { cmd.WorstTime = step.Duration } commands[step.Command] = cmd } if iteration.Error != nil { lastError = iteration.Error totalErrors = totalErrors + 1 } case w := <-ex.workers: workers = workers + w case seconds := <-ex.ticks: sampleType = ThroughputSample for key, _ := range commands { cmd := commands[key] cmd.Throughput = float64(cmd.Count) / float64(seconds) commands[key] = cmd } } ex.samples <- &Sample{commands, avg, totalTime, iterations, totalErrors, workers, lastResult, lastError, worstResult, time.Now().Sub(startTime), sampleType} } }
// newRestartFrequency returns an initialized restart frequency. func newRestartFrequency(intensity int, period time.Duration) *restartFrequency { return &restartFrequency{ intensity: intensity, period: period.Nanoseconds(), restarts: make([]int64, 0), } }
func logObjectPopulate(name string, duration time.Duration) { var ( value string unit string colorFunc func(string) string message string ) s := duration.Seconds() ms := duration.Nanoseconds() / 1000000 us := duration.Nanoseconds() / 1000 if s >= 1 { unit = "s" colorFunc = secondsColor value = fmt.Sprintf("%.2f", s) } else if ms >= 1 { unit = "ms" colorFunc = milliColor value = fmt.Sprintf("%d", ms) } else { unit = "μs" colorFunc = microColor value = fmt.Sprintf("%d", us) } message = fmt.Sprintf( "Loaded %s in %s", name, colorFunc(fmt.Sprintf("%s%s", value, unit)), ) log.Info(message) }
func HumanDuration(d time.Duration) string { maybePluralize := func(input string, num int) string { if num == 1 { return input } return input + "s" } nanos := time.Duration(d.Nanoseconds()) days := int(nanos / (time.Hour * 24)) nanos %= time.Hour * 24 hours := int(nanos / (time.Hour)) nanos %= time.Hour minutes := int(nanos / time.Minute) nanos %= time.Minute seconds := int(nanos / time.Second) s := "" if days > 0 { s += fmt.Sprintf("%d %s ", days, maybePluralize("day", days)) } if hours > 0 { s += fmt.Sprintf("%d %s ", hours, maybePluralize("hour", hours)) } if minutes > 0 { s += fmt.Sprintf("%d %s ", minutes, maybePluralize("minute", minutes)) } if seconds >= 0 { s += fmt.Sprintf("%d %s ", seconds, maybePluralize("second", seconds)) } return s }
func dur2secs(dur time.Duration) (secs float32) { secs = float32(dur.Hours() * 3600) secs += float32(dur.Minutes() * 60) secs += float32(dur.Seconds()) secs += float32(dur.Nanoseconds()) * float32(0.000000001) return secs }