func TestHistogramRotate(t *testing.T) { defer TestingSetNow(nil)() setNow(0) h := NewHistogram(histWrapNum*time.Second, 1000+10*histWrapNum, 3) var cur time.Duration for i := 0; i < 3*histWrapNum; i++ { v := int64(10 * i) h.RecordValue(v) cur += time.Second setNow(cur) cur := h.Current() // When i == histWrapNum-1, we expect the entry from i==0 to move out // of the window (since we rotated for the histWrapNum'th time). expMin := int64((1 + i - (histWrapNum - 1)) * 10) if expMin < 0 { expMin = 0 } if min := cur.Min(); min != expMin { t.Fatalf("%d: unexpected minimum %d, expected %d", i, min, expMin) } if max, expMax := cur.Max(), v; max != expMax { t.Fatalf("%d: unexpected maximum %d, expected %d", i, max, expMax) } } }
func client(wg *sync.WaitGroup, host string, port, len int, duration time.Duration, chStat chan int) { defer func() { if r := recover(); r != nil { log.Printf("error: %s", r) } log.Printf("disconnected") wg.Done() }() log.Printf("connecting to %s:%d, len %d, duration %s", host, port, len, duration.String()) conn, err := utp.DialTimeout(fmt.Sprintf("%s:%d", host, port), time.Second) if err != nil { panic(err) } defer conn.Close() log.Printf("connected") buf := bytes.Repeat([]byte("H"), len) ts := time.Now() for time.Since(ts) < duration { n, err := conn.Write(buf) if err != nil { if err == io.EOF { break } panic(err) } chStat <- n } }
func timerLoop(count *uint64, ticker *time.Ticker) { lastTime := time.Now().UTC() lastCount := *count zeroes := int8(0) var ( msgsSent, newCount uint64 elapsedTime time.Duration now time.Time rate float64 ) for { _ = <-ticker.C newCount = *count now = time.Now() msgsSent = newCount - lastCount lastCount = newCount elapsedTime = now.Sub(lastTime) lastTime = now rate = float64(msgsSent) / elapsedTime.Seconds() if msgsSent == 0 { if newCount == 0 || zeroes == 3 { continue } zeroes++ } else { zeroes = 0 } log.Printf("Sent %d messages. %0.2f msg/sec\n", newCount, rate) } }
// Attack reads its Targets from the passed Targeter and attacks them at // the rate specified for duration time. Results are put into the returned channel // as soon as they arrive. func (a *Attacker) Attack(tr Targeter, rate uint64, du time.Duration) chan *Result { resc := make(chan *Result) throttle := time.NewTicker(time.Duration(1e9 / rate)) hits := rate * uint64(du.Seconds()) wrk := a.workers if wrk == 0 || wrk > hits { wrk = hits } share := hits / wrk var wg sync.WaitGroup for i := uint64(0); i < wrk; i++ { wg.Add(1) go func(share uint64) { defer wg.Done() for j := uint64(0); j < share; j++ { select { case tm := <-throttle.C: resc <- a.hit(tr, tm) case <-a.stop: return } } }(share) } go func() { wg.Wait() close(resc) throttle.Stop() }() return resc }
func (f Filter) Update(placement *Placement, sensors Sensors, dt time.Duration) { if f.AccGain+f.GyroGain != 1 { panic("Gains must add up to 1") } var ( dts = dt.Seconds() accDeg = PRY{ Pitch: degAngle(sensors.Acc.Roll, sensors.Acc.Yaw), Roll: degAngle(sensors.Acc.Pitch, sensors.Acc.Yaw), } gyroDeg = PRY{ Pitch: placement.Pitch + (sensors.Gyro.Pitch * dts), Roll: placement.Roll + (sensors.Gyro.Roll * dts), Yaw: placement.Yaw + (sensors.Gyro.Yaw * dts), } ) // Implements a simple complementation filter. // see http://www.pieter-jan.com/node/11 placement.Pitch = gyroDeg.Pitch*f.GyroGain + accDeg.Pitch*f.AccGain placement.Roll = gyroDeg.Roll*f.GyroGain + accDeg.Roll*f.AccGain // @TODO Integrate gyro yaw with magotometer yaw placement.Yaw = gyroDeg.Yaw // The sonar sometimes reads very high values when on the ground. Ignoring // the sonar above a certain altitude solves the problem. // @TODO Use barometer above SonarMax if sensors.Sonar < f.SonarMax { placement.Altitude += (sensors.Sonar - placement.Altitude) * f.SonarGain } }
// doShareUploadURL uploads files to the target. func doShareUploadURL(objectURL string, isRecursive bool, expiry time.Duration, contentType string) *probe.Error { clnt, err := newClient(objectURL) if err != nil { return err.Trace(objectURL) } // Generate pre-signed access info. uploadInfo, err := clnt.ShareUpload(isRecursive, expiry, contentType) if err != nil { return err.Trace(objectURL, "expiry="+expiry.String(), "contentType="+contentType) } // Generate curl command. curlCmd := makeCurlCmd(objectURL, isRecursive, uploadInfo) printMsg(shareMesssage{ ObjectURL: objectURL, ShareURL: curlCmd, TimeLeft: expiry, ContentType: contentType, }) // save shared URL to disk. return saveSharedURL(objectURL, curlCmd, expiry, contentType) }
// fetchExcessSharedDuration returns a slice of duration times (as seconds) // given a distro and a map of DistroScheduleData, it traverses the map looking // for alternate distros that are unable to meet maxDurationPerHost (turnaround) // specification for shared tasks func fetchExcessSharedDuration(distroScheduleData map[string]DistroScheduleData, distro string, maxDurationPerHost time.Duration) (sharedTasksDurationTimes []float64) { distroData := distroScheduleData[distro] // if we have more tasks to run than we have existing hosts and at least one // other alternate distro can not run all its shared scheduled and running tasks // within the maxDurationPerHost period, we need some more hosts for this distro for sharedDistro, sharedDuration := range distroData.sharedTasksDuration { if distro == sharedDistro { continue } alternateDistroScheduleData := distroScheduleData[sharedDistro] durationPerHost := alternateDistroScheduleData.totalTasksDuration / (float64(alternateDistroScheduleData.numExistingHosts) + float64(alternateDistroScheduleData.nominalNumNewHosts)) // if no other alternate distro is able to meet the host requirements, // append its shared tasks duration time to the returned slice if durationPerHost > maxDurationPerHost.Seconds() { sharedTasksDurationTimes = append(sharedTasksDurationTimes, sharedDuration) } } return }
// NewIngestionProcess creates a Process for ingesting data. func NewIngestionProcess(git *gitinfo.GitInfo, tileDir, datasetName string, ri ingester.ResultIngester, config map[string]string, every time.Duration, nCommits int, minDuration time.Duration, statusDir, metricName string) ProcessStarter { return func() { i, err := ingester.NewIngester(git, tileDir, datasetName, ri, nCommits, minDuration, config, statusDir, metricName) if err != nil { glog.Fatalf("Failed to create Ingester: %s", err) } glog.Infof("Starting %s ingester. Run every %s.", datasetName, every.String()) // oneStep is a single round of ingestion. oneStep := func() { glog.Infof("Running ingester: %s", datasetName) err := i.Update() if err != nil { glog.Error(err) } glog.Infof("Finished running ingester: %s", datasetName) } // Start the ingester. go func() { oneStep() for _ = range time.Tick(every) { oneStep() } }() } }
func TimerL(name string, duration time.Duration, rate float64) { if rand.Float64() > rate { return } HistogramL(name, float64(duration.Nanoseconds()/1000000), rate) }
func run(name string) { var t, total time.Duration test, ok := tests[name] if !ok { fmt.Fprintf(os.Stderr, "test: `%s` does not exists\n", name) os.Exit(1) } fmt.Printf("%s:\n", strings.ToUpper(name)) for i := 0; i < *R; i++ { if *mock { t = BenchmarkMock(test) } else { t = BenchmarkRedis(test) } total += t prints(t) } avg := time.Duration(total.Nanoseconds() / int64(*R)) print("AVG ") printsA(avg, total) println() }
// newRestartFrequency returns an initialized restart frequency. func newRestartFrequency(intensity int, period time.Duration) *restartFrequency { return &restartFrequency{ intensity: intensity, period: period.Nanoseconds(), restarts: make([]int64, 0), } }
// getOneTimeResourceUsageOnNode queries the node's /stats/container endpoint // and returns the resource usage of targetContainers for the past // cpuInterval. // The acceptable range of the interval is 2s~120s. Be warned that as the // interval (and #containers) increases, the size of kubelet's response // could be sigificant. E.g., the 60s interval stats for ~20 containers is // ~1.5MB. Don't hammer the node with frequent, heavy requests. // // cadvisor records cumulative cpu usage in nanoseconds, so we need to have two // stats points to compute the cpu usage over the interval. Assuming cadvisor // polls every second, we'd need to get N stats points for N-second interval. // Note that this is an approximation and may not be accurate, hence we also // write the actual interval used for calcuation (based on the timestampes of // the stats points in containerResourceUsage.CPUInterval. func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterval time.Duration) (map[string]*containerResourceUsage, error) { numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds) if numStats < 2 || numStats > maxNumStatsToRequest { return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest) } // Get information of all containers on the node. containerInfos, err := getContainerInfo(c, nodeName, &kubelet.StatsRequest{ ContainerName: "/", NumStats: numStats, Subcontainers: true, }) if err != nil { return nil, err } // Process container infos that are relevant to us. usageMap := make(map[string]*containerResourceUsage, len(targetContainers)) for _, name := range targetContainers { info, ok := containerInfos[name] if !ok { return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName) } first := info.Stats[0] last := info.Stats[len(info.Stats)-1] usageMap[name] = computeContainerResourceUsage(name, first, last) } return usageMap, nil }
func StartHeartbeats(localIp string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) (stopChan chan (chan bool)) { if len(config.EtcdUrls) == 0 { return } if storeAdapter == nil { panic("store adapter is nil") } logger.Debugf("Starting Health Status Updates to Store: /healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index) status, stopChan, err := storeAdapter.MaintainNode(storeadapter.StoreNode{ Key: fmt.Sprintf("/healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index), Value: []byte(localIp), TTL: uint64(ttl.Seconds()), }) if err != nil { panic(err) } go func() { for stat := range status { logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now()) } }() return stopChan }
// isHealthyOffsetInterval returns true if the ClusterOffsetInterval indicates // that the node's offset is within maxOffset, else false. For example, if the // offset interval is [-20, -11] and the maxOffset is 10 nanoseconds, then the // clock offset must be too great, because no point in the interval is within // the maxOffset. func isHealthyOffsetInterval(i ClusterOffsetInterval, maxOffset time.Duration) bool { if i.Lowerbound > maxOffset.Nanoseconds() || i.Upperbound < -maxOffset.Nanoseconds() { return false } return true }
func (s *Server) DoBatchRecording(name string, past time.Duration) (id string, err error) { v := url.Values{} v.Add("type", "batch") v.Add("name", name) v.Add("past", past.String()) r, err := http.Post(s.URL()+"/record?"+v.Encode(), "", nil) if err != nil { return } defer r.Body.Close() if r.StatusCode != http.StatusOK { err = fmt.Errorf("unexpected status code got %d exp %d", r.StatusCode, http.StatusOK) return } // Decode valid response type resp struct { RecordingID string `json:"RecordingID"` Error string `json:"Error"` } rp := resp{} d := json.NewDecoder(r.Body) d.Decode(&rp) if rp.Error != "" { err = errors.New(rp.Error) return } id = rp.RecordingID v = url.Values{} v.Add("id", id) _, err = s.HTTPGet(s.URL() + "/record?" + v.Encode()) return }
func (s *Statistics) printHumanReadable() { delta := s.Delta nreq := SUCCESS + FAIL rps := float64(nreq) / float64(delta.Seconds()) fmt.Printf("\nrequest count:%d, concurrency:%d, time:%.5f[s], %f[req/s]\n", nreq, s.MaxRequest, delta.Seconds(), rps) fmt.Printf("SUCCESS %d\n", SUCCESS) fmt.Printf("FAILED %d\n", FAIL) var avgTimeByPath map[string]float64 = map[string]float64{} var totalCount uint32 var totalTime time.Duration for path, cnt := range PathCount { totalTime += PathTime[path] totalCount += cnt avgTimeByPath[path] += PathTime[path].Seconds() / float64(cnt) } fmt.Printf("Average response time[ms]: %v\n", 1000.*totalTime.Seconds()/float64(totalCount)) if s.Config.ShowReport { var stats AvarageTimeStats = []AvarageTimeByPath{} fmt.Printf("Average response time for each path (order by longest) [ms]:\n") for path, time := range avgTimeByPath { stats = append(stats, AvarageTimeByPath{Path: path, Time: time}) } sort.Sort(sort.Reverse(stats)) for i := 0; i < len(stats); i++ { fmt.Printf("%.3f : %s\n", stats[i].Time*1000., stats[i].Path) } } }
func (p *PushProcessor) retryRequest(req *Request, retryAfter time.Duration, subscriber string, psp *PushServiceProvider, dp *DeliveryPoint) { if req.nrRetries >= p.max_nr_retry { return } newreq := new(Request) newreq.nrRetries = req.nrRetries + 1 newreq.PreviousTry = req newreq.ID = req.ID newreq.Action = ACTION_PUSH newreq.PushServiceProvider = psp newreq.DeliveryPoint = dp newreq.RequestSenderAddr = req.RequestSenderAddr newreq.Notification = req.Notification newreq.Service = req.Service newreq.Subscribers = make([]string, 1) newreq.Subscribers[0] = subscriber newreq.PunchTimestamp() if req.nrRetries == 0 || req.backoffTime == 0 { newreq.backoffTime = init_backoff_time } else { newreq.backoffTime = req.backoffTime << 1 } waitTime := newreq.backoffTime if retryAfter > 0*time.Second { waitTime = int64(retryAfter.Seconds()) } duration := time.Duration(time.Duration(waitTime) * time.Second) time.Sleep(duration) p.backendch <- newreq }
func HumanDuration(d time.Duration) string { maybePluralize := func(input string, num int) string { if num == 1 { return input } return input + "s" } nanos := time.Duration(d.Nanoseconds()) days := int(nanos / (time.Hour * 24)) nanos %= time.Hour * 24 hours := int(nanos / (time.Hour)) nanos %= time.Hour minutes := int(nanos / time.Minute) nanos %= time.Minute seconds := int(nanos / time.Second) s := "" if days > 0 { s += fmt.Sprintf("%d %s ", days, maybePluralize("day", days)) } if hours > 0 { s += fmt.Sprintf("%d %s ", hours, maybePluralize("hour", hours)) } if minutes > 0 { s += fmt.Sprintf("%d %s ", minutes, maybePluralize("minute", minutes)) } if seconds >= 0 { s += fmt.Sprintf("%d %s ", seconds, maybePluralize("second", seconds)) } return s }
// computeDurationBasedNumNewHosts returns the number of new hosts needed based // on a heuristic that utilizes the total duration of currently running and // scheduled tasks - and based on a maximum duration of a task per distro host - // a turnaround cap on all outstanding and running tasks in the system func computeDurationBasedNumNewHosts(scheduledTasksDuration, runningTasksDuration, numExistingDistroHosts float64, maxDurationPerHost time.Duration) (numNewHosts int) { // total duration of scheduled and currently running tasks totalDistroTasksDuration := scheduledTasksDuration + runningTasksDuration // number of hosts needed to meet the duration based turnaround requirement numHostsForTurnaroundRequirement := totalDistroTasksDuration / maxDurationPerHost.Seconds() // floating point precision number of new hosts needed durationBasedNumNewHostsNeeded := numHostsForTurnaroundRequirement - numExistingDistroHosts // duration based number of new hosts needed numNewHosts = int(math.Ceil(durationBasedNumNewHostsNeeded)) // return 0 if numNewHosts is less than 0 if numNewHosts < 0 { numNewHosts = 0 } return }
// convertIPPDateToTime converts an RFC 2579 date to a time.Time object. func convertIPPDateToTime(date *C.ipp_uchar_t) time.Time { r := bytes.NewReader(C.GoBytes(unsafe.Pointer(date), 11)) var year uint16 var month, day, hour, min, sec, dsec uint8 binary.Read(r, binary.BigEndian, &year) binary.Read(r, binary.BigEndian, &month) binary.Read(r, binary.BigEndian, &day) binary.Read(r, binary.BigEndian, &hour) binary.Read(r, binary.BigEndian, &min) binary.Read(r, binary.BigEndian, &sec) binary.Read(r, binary.BigEndian, &dsec) var utcDirection, utcHour, utcMin uint8 binary.Read(r, binary.BigEndian, &utcDirection) binary.Read(r, binary.BigEndian, &utcHour) binary.Read(r, binary.BigEndian, &utcMin) var utcOffset time.Duration utcOffset += time.Duration(utcHour) * time.Hour utcOffset += time.Duration(utcMin) * time.Minute var loc *time.Location if utcDirection == '-' { loc = time.FixedZone("", -int(utcOffset.Seconds())) } else { loc = time.FixedZone("", int(utcOffset.Seconds())) } nsec := int(dsec) * 100 * int(time.Millisecond) return time.Date(int(year), time.Month(month), int(day), int(hour), int(min), int(sec), nsec, loc) }
// RunTest runs test on the node. func (n *NodeE2ERemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs string, timeout time.Duration) (string, error) { // Install the cni plugin. if err := installCNI(host, workspace); err != nil { return "", err } // Configure iptables firewall rules if err := configureFirewall(host); err != nil { return "", err } // Kill any running node processes cleanupNodeProcesses(host) testArgs, err := updateGCIMounterPath(testArgs, host, workspace) if err != nil { return "", err } // Run the tests glog.V(2).Infof("Starting tests on %q", host) cmd := getSSHCommand(" && ", fmt.Sprintf("cd %s", workspace), fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s %s", timeout.Seconds(), ginkgoArgs, host, results, junitFilePrefix, testArgs), ) return SSH(host, "sh", "-c", cmd) }
/* The "put" command is for any process that wants to insert a job into the queue. It comprises a command line followed by the job body: put <pri> <delay> <ttr> <bytes>\r\n <data>\r\n It inserts a job into the client's currently used tube (see the "use" command below). - <pri> is an integer < 2**32. Jobs with smaller priority values will be scheduled before jobs with larger priorities. The most urgent priority is 0; the least urgent priority is 4,294,967,295. - <delay> is an integer number of seconds to wait before putting the job in the ready queue. The job will be in the "delayed" state during this time. - <ttr> -- time to run -- is an integer number of seconds to allow a worker to run this job. This time is counted from the moment a worker reserves this job. If the worker does not delete, release, or bury the job within <ttr> seconds, the job will time out and the server will release the job. The minimum ttr is 1. If the client sends 0, the server will silently increase the ttr to 1. - <bytes> is an integer indicating the size of the job body, not including the trailing "\r\n". This value must be less than max-job-size (default: 2**16). - <data> is the job body -- a sequence of bytes of length <bytes> from the previous line. After sending the command line and body, the client waits for a reply, which may be: - "INSERTED <id>\r\n" to indicate success. - <id> is the integer id of the new job - "BURIED <id>\r\n" if the server ran out of memory trying to grow the priority queue data structure. - <id> is the integer id of the new job - "EXPECTED_CRLF\r\n" The job body must be followed by a CR-LF pair, that is, "\r\n". These two bytes are not counted in the job size given by the client in the put command line. - "JOB_TOO_BIG\r\n" The client has requested to put a job with a body larger than max-job-size bytes. - "DRAINING\r\n" This means that the server has been put into "drain mode" and is no longer accepting new jobs. The client should try another server or disconnect and try again later. */ func (this *BeanstalkdClient) Put(priority uint32, delay, ttr time.Duration, data []byte) (id uint64, err error) { cmd := fmt.Sprintf("put %d %d %d %d\r\n", priority, uint64(delay.Seconds()), uint64(ttr.Seconds()), len(data)) cmd = cmd + string(data) + string(crnl) _, reply, err := this.sendReply(cmd) if err != nil { return 0, err } switch { case strings.Index(reply, "INSERTED") == 0: var id uint64 _, perr := fmt.Sscanf(reply, "INSERTED %d\r\n", &id) return id, perr case strings.Index(reply, "BURIED") == 0: var id uint64 _, perr := fmt.Sscanf(reply, "BURIED %d\r\n", &id) return id, perr case reply == "EXPECTED_CRLF\r\n": return 0, errExpectedCrlf case reply == "JOB_TOO_BIG\r\n": return 0, errJobTooBig case reply == "DRAINING\r\n": return 0, errDraining default: return 0, this.parseError(reply) } }
// WaitPort waits until port available. func (c *Container) WaitPort(port int, timeout time.Duration) int { // wait until port available p := c.ports[port] if p == 0 { log.Fatalf("port %d is not exposed on %s", port, c.image) } nw := c.networks[port] if nw == "" { log.Fatalf("network not described on %s", c.image) } end := time.Now().Add(timeout) for { now := time.Now() _, err := net.DialTimeout(nw, c.Addr(port), end.Sub(now)) if err != nil { if time.Now().After(end) { log.Fatalf("port %d not available on %s for %f seconds", port, c.image, timeout.Seconds()) } time.Sleep(time.Second) continue } break } return p }
func (server *server) statistics() serverStats { stats := *server.stats stats.Uptime = time.Since(server.startedAt).Seconds() stats.CurrentTubes = len(server.tubes) stats.TotalJobs = len(server.jobs) for _, tube := range server.tubes { stats.CurrentJobsBuried += tube.buried.Len() stats.CurrentJobsDelayed += tube.delayed.Len() stats.CurrentJobsReady += tube.ready.Len() stats.CurrentJobsReserved += tube.reserved.Len() } var duration time.Duration usage := new(syscall.Rusage) err := syscall.Getrusage(syscall.RUSAGE_SELF, usage) if err == nil { s, ns := usage.Utime.Unix() duration, err = time.ParseDuration(fmt.Sprintf("%d.%ds", s, ns)) stats.RusageStime = duration.Seconds() s, ns = usage.Stime.Unix() duration, err = time.ParseDuration(fmt.Sprintf("%d.%ds", s, ns)) stats.RusageUtime = duration.Seconds() } else { pf("failed to get rusage : %v", err) } return stats }
// Incr increments the specified key. If the key did not exist, it sets it to 1 // and sets it to expire after the number of seconds specified by window. // // It returns the new count value and the number of remaining seconds, or an error // if the operation fails. func (r *redisStore) Incr(key string, window time.Duration) (int, int, error) { conn := r.pool.Get() defer conn.Close() if err := selectDB(r.db, conn); err != nil { return 0, 0, err } // Atomically increment and read the TTL. conn.Send("MULTI") conn.Send("INCR", r.prefix+key) conn.Send("TTL", r.prefix+key) vals, err := redis.Values(conn.Do("EXEC")) if err != nil { conn.Do("DISCARD") return 0, 0, err } var cnt, ttl int if _, err = redis.Scan(vals, &cnt, &ttl); err != nil { return 0, 0, err } // If there was no TTL set, then this is a newly created key (INCR creates the key // if it didn't exist), so set it to expire. if ttl == -1 { ttl = int(window.Seconds()) _, err = conn.Do("EXPIRE", r.prefix+key, ttl) if err != nil { return 0, 0, err } } return cnt, ttl, nil }
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL) } outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())} ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)} return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }
// 返回秒 func (clock *Clock) End_tick() float64 { now_tick := time.Now() var dur_time time.Duration = now_tick.Sub(clock.m_tick) var elapsed_sec float64 = dur_time.Seconds() return elapsed_sec }
// machineName: A unique identifier to identify the host that current cAdvisor // instance is running on. // influxdbHost: The host which runs influxdb. // percentilesDuration: Time window which will be considered when calls Percentiles() func New(machineName, tablename, database, username, password, influxdbHost string, isSecure bool, percentilesDuration time.Duration, ) (storage.StorageDriver, error) { config := &influxdb.ClientConfig{ Host: influxdbHost, Username: username, Password: password, Database: database, IsSecure: isSecure, } client, err := influxdb.NewClient(config) if err != nil { return nil, err } // TODO(monnand): With go 1.3, we cannot compress data now. client.DisableCompression() if percentilesDuration.Seconds() < 1.0 { percentilesDuration = 5 * time.Minute } ret := &influxdbStorage{ client: client, windowLen: percentilesDuration, machineName: machineName, tableName: tablename, } return ret, nil }
func (i *RateInterval) GetCost(duration, startSecond time.Duration) float64 { price, _, rateUnit := i. GetRateParameters(startSecond) price /= rateUnit.Seconds() d := duration.Seconds() return d * price }
// NewPingChecker returns a check function that can check if a host answer to a ICMP Ping func NewPingChecker(host, service, ip string) CheckFunction { return func() Event { var retRtt time.Duration var result = Event{Host: host, Service: service, State: "critical"} p := fastping.NewPinger() p.MaxRTT = maxPingTime ra, err := net.ResolveIPAddr("ip4:icmp", ip) if err != nil { result.Description = err.Error() } p.AddIPAddr(ra) p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { result.State = "ok" result.Metric = float32(retRtt.Nanoseconds() / 1e6) } err = p.Run() if err != nil { result.Description = err.Error() } return result } }