// IndexNear returns the index of the blob created closed to time t. The actual // blob ref can be retrieved by passing the index to RefAt. func (ti *TimeIndex) IndexNear(t time.Time) int { ti.lock.RLock() defer ti.lock.RUnlock() down, up := 0, len(ti.entries)-1 if up < 0 { return -1 } pivot, done := split(down, up) for !done { pivt := ti.entries[pivot].tm if t.After(pivt) { down, up = pivot, up } else { down, up = down, pivot } pivot, done = split(down, up) } lowt := ti.entries[down].tm upt := ti.entries[up].tm lowdiff := int64(time.Since(lowt)) - int64(time.Since(t)) updiff := int64(time.Since(t)) - int64(time.Since(upt)) if updiff < lowdiff { return up } return down }
func ClientTask(pipe chan<- bool) { client, _ := zmq.NewSocket(zmq.DEALER) client.Connect("tcp://localhost:5555") fmt.Println("Setting up test...") time.Sleep(100 * time.Millisecond) fmt.Println("Synchronous round-trip test...") start := time.Now() var requests int for requests = 0; requests < 10000; requests++ { client.Send("hello", 0) client.Recv(0) } fmt.Println(requests, "calls in", time.Since(start)) fmt.Println("Asynchronous round-trip test...") start = time.Now() for requests = 0; requests < 100000; requests++ { client.Send("hello", 0) } for requests = 0; requests < 100000; requests++ { client.Recv(0) } fmt.Println(requests, "calls in", time.Since(start)) pipe <- true }
// migRollingUpdatePoll (CKE/GKE-only) polls the progress of the MIG rolling // update with ID id until it is complete. It returns an error if this takes // longer than nt times the number of nodes. func migRollingUpdatePoll(id string, nt time.Duration) error { // Two keys and a val. status, progress, done := "status", "statusMessage", "ROLLED_OUT" start, timeout := time.Now(), nt*time.Duration(testContext.CloudConfig.NumNodes) var errLast error Logf("Waiting up to %v for MIG rolling update to complete.", timeout) // TODO(mbforbes): Refactor this to use cluster_upgrade.go:retryCmd(...) if wait.Poll(restartPoll, timeout, func() (bool, error) { o, err := exec.Command("gcloud", "preview", "rolling-updates", fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), "describe", id).CombinedOutput() if err != nil { errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err) Logf("%v", errLast) return false, nil } output := string(o) // The 'describe' call probably succeeded; parse the output and try to // find the line that looks like "status: <status>" and see whether it's // done. Logf("Waiting for MIG rolling update: %s (%v elapsed)", parseKVLines(output, progress), time.Since(start)) if st := parseKVLines(output, status); st == done { return true, nil } return false, nil }) != nil { return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast) } Logf("MIG rolling update complete after %v", time.Since(start)) return nil }
func (cw *streamWriter) run() { var msgc chan raftpb.Message var heartbeatc <-chan time.Time var t streamType var enc encoder var flusher http.Flusher tickc := time.Tick(ConnReadTimeout / 3) for { select { case <-heartbeatc: start := time.Now() if err := enc.encode(linkHeartbeatMessage); err != nil { reportSentFailure(string(t), linkHeartbeatMessage) cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error()) cw.close() heartbeatc, msgc = nil, nil continue } flusher.Flush() reportSentDuration(string(t), linkHeartbeatMessage, time.Since(start)) case m := <-msgc: start := time.Now() if err := enc.encode(m); err != nil { reportSentFailure(string(t), m) cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error()) cw.close() heartbeatc, msgc = nil, nil cw.r.ReportUnreachable(m.To) continue } flusher.Flush() reportSentDuration(string(t), m, time.Since(start)) case conn := <-cw.connc: cw.close() t = conn.t switch conn.t { case streamTypeMsgAppV2: enc = newMsgAppV2Encoder(conn.Writer, cw.fs) case streamTypeMessage: enc = &messageEncoder{w: conn.Writer} default: plog.Panicf("unhandled stream type %s", conn.t) } flusher = conn.Flusher cw.mu.Lock() cw.status.activate() cw.closer = conn.Closer cw.working = true cw.mu.Unlock() heartbeatc, msgc = tickc, cw.msgc case <-cw.stopc: cw.close() close(cw.done) return } } }
func (t *Tsdb) Run() { for i := 0; i < t.Concurrency; i++ { go t.sendData() } ticker := time.NewTicker(time.Second) last := time.Now() for { select { case <-ticker.C: if time.Since(last) >= time.Second { log.Debug("no flushes in last 1second. Flushing now.") last = time.Now() t.Flush() log.Debug("flush took %f seconds", time.Since(last).Seconds()) } case <-t.flushMetrics: log.Debug("flush trigger received.") last = time.Now() t.Flush() log.Debug("flush took %f seconds", time.Since(last).Seconds()) case <-t.flushEvents: t.SendEvents() case <-t.closeChan: close(t.dataChan) return } } }
func singleping(ip string) (time.Duration, error) { addr := net.IPAddr{IP: net.ParseIP(ip)} sendid := os.Getpid() & 0xffff sendseq := 1 pingpktlen := 64 sendpkt := makePingRequest(sendid, sendseq, pingpktlen, []byte("Go Ping")) ipconn, err := net.DialIP("ip4:icmp", nil, &addr) // *IPConn (Conn 인터페이스 구현) if err != nil { log.Fatalf(`net.DialIP("ip4:icmp", %v) = %v`, ipconn, err) } ipconn.SetDeadline(time.Now().Add(time.Second)) //1 Second timeout start := time.Now() n, err := ipconn.WriteToIP(sendpkt, &addr) if err != nil || n != pingpktlen { log.Fatalf(`net.WriteToIP(..., %v) = %v, %v`, addr, n, err) } resp := make([]byte, 1024) _, _, pingerr := ipconn.ReadFrom(resp) if pingerr != nil { fmt.Printf("%s : FAIL\n", ip) } else { fmt.Printf("%s : %s\n", ip, time.Since(start)) } // log.Printf("%x", resp) return time.Since(start), pingerr }
// Munge is the workhorse that will actually close the PRs func (CloseStalePR) Munge(obj *github.MungeObject) { if !obj.IsPR() { return } if obj.HasLabel(keepOpenLabel) { return } lastModif, err := findLastModificationTime(obj) if err != nil { glog.Errorf("Failed to find last modification: %v", err) return } closeIn := -time.Since(lastModif.Add(stalePullRequest)) inactiveFor := time.Since(*lastModif) if closeIn <= 0 { closePullRequest(obj, inactiveFor) } else if closeIn <= startWarning { checkAndWarn(obj, inactiveFor, closeIn) } else { // Pull-request is active. Remove previous potential warning // Ignore potential errors, we just want to remove old comments ... comment, _ := findLatestWarningComment(obj) if comment != nil { obj.DeleteComment(comment) } } }
func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { start := time.Now() fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) b := pbutil.MustMarshal(snapshot) crc := crc32.Update(0, crcTable, b) snap := snappb.Snapshot{Crc: crc, Data: b} d, err := snap.Marshal() if err != nil { return err } else { marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) } err = pioutil.WriteAndSyncFile(path.Join(s.dir, fname), d, 0666) if err == nil { saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) } else { err1 := os.Remove(path.Join(s.dir, fname)) if err1 != nil { plog.Errorf("failed to remove broken snapshot file %s", path.Join(s.dir, fname)) } } return err }
// WaitForStateChange blocks until the state changes to something other than the sourceState // or timeout fires. It returns false if timeout fires and true otherwise. // TODO(zhaoq): Rewrite for complex Picker. func (cc *Conn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { start := time.Now() cc.mu.Lock() defer cc.mu.Unlock() if sourceState != cc.state { return true } expired := timeout <= time.Since(start) if expired { return false } done := make(chan struct{}) go func() { select { case <-time.After(timeout - time.Since(start)): cc.mu.Lock() expired = true cc.stateCV.Broadcast() cc.mu.Unlock() case <-done: } }() defer close(done) for sourceState == cc.state { cc.stateCV.Wait() if expired { return false } } return true }
func read(dbPath string, mapPop bool) { opt := &bolt.Options{Timeout: 5 * time.Minute, ReadOnly: true} if mapPop { fmt.Println("read with MAP_POPULATE flag...") opt = &bolt.Options{Timeout: 5 * time.Minute, ReadOnly: true, MmapFlags: syscall.MAP_POPULATE} } else { fmt.Println("read without MAP_POPULATE flag...") } to := time.Now() db, err := bolt.Open(dbPath, 0600, opt) if err != nil { panic(err) } defer db.Close() fmt.Println("bolt.Open took", time.Since(to)) tr := time.Now() tx, err := db.Begin(writable) if err != nil { panic(err) } defer tx.Rollback() bk := tx.Bucket([]byte(bucketName)) c := bk.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { // fmt.Printf("%s ---> %s.\n", k, v) _ = k _ = v } fmt.Println("bolt read took:", time.Since(tr)) }
func client_task(c chan string) { context, _ := zmq.NewContext() client, _ := context.NewSocket(zmq.DEALER) defer context.Close() defer client.Close() client.SetIdentity("C") client.Connect("tcp://localhost:5555") fmt.Println("Setting up test...") time.Sleep(time.Duration(100) * time.Millisecond) fmt.Println("Synchronous round-trip test...") start := time.Now() requests := 10000 for i := 0; i < requests; i++ { client.Send([]byte("hello"), 0) client.Recv(0) } fmt.Printf("%d calls/second\n", int64(float64(requests)/time.Since(start).Seconds())) fmt.Println("Asynchronous round-trip test...") start = time.Now() for i := 0; i < requests; i++ { client.Send([]byte("hello"), 0) } for i := 0; i < requests; i++ { client.Recv(0) } fmt.Printf("%d calls/second\n", int64(float64(requests)/time.Since(start).Seconds())) c <- "done" }
func (cache *Cache) Clear() { for key, entry := range cache.datamap { if entry.granted == true { dur := time.Since(entry.leaseTime) if dur > entry.leaseDur { entry.granted = false } } elem := entry.query.Front() for elem != nil { tempdur := time.Since(elem.Value.(time.Time)) if tempdur > time.Duration(storagerpc.QueryCacheSeconds)*time.Second { _ = entry.query.Remove(elem) elem = entry.query.Front() } else { break } } if entry.query.Len() == 0 { delete(cache.datamap, key) } } }
func TestMain(m *testing.M) { size = numXi(index) pk = []byte{1} runtime.GOMAXPROCS(runtime.NumCPU()) id := flag.Int("index", 1, "graph index") flag.Parse() index = int64(*id) graphDir = fmt.Sprintf("%s%d", graphDir, *id) //os.RemoveAll(graphDir) now := time.Now() prover = NewProver(pk, index, name, graphDir) fmt.Printf("%d. Graph gen: %fs\n", index, time.Since(now).Seconds()) now = time.Now() commit := prover.Init() fmt.Printf("%d. Graph commit: %fs\n", index, time.Since(now).Seconds()) root := commit.Commit verifier = NewVerifier(pk, index, beta, root) os.Exit(m.Run()) }
// DispatchSync sends a body to the destination, and blocks waiting on a response. func (rpc *AmqpRPCCLient) DispatchSync(method string, body []byte) (response []byte, err error) { rpc.stats.Inc(fmt.Sprintf("RPC.Rate.%s", method), 1, 1.0) rpc.stats.Inc("RPC.Traffic", int64(len(body)), 1.0) rpc.stats.GaugeDelta("RPC.CallsWaiting", 1, 1.0) defer rpc.stats.GaugeDelta("RPC.CallsWaiting", -1, 1.0) callStarted := time.Now() select { case jsonResponse := <-rpc.Dispatch(method, body): var rpcResponse RPCResponse err = json.Unmarshal(jsonResponse, &rpcResponse) if err != nil { return } err = unwrapError(rpcResponse.Error) if err != nil { rpc.stats.Inc(fmt.Sprintf("RPC.Latency.%s.Error", method), 1, 1.0) return } rpc.stats.Inc("RPC.Rate.Success", 1, 1.0) rpc.stats.TimingDuration(fmt.Sprintf("RPC.Latency.%s.Success", method), time.Since(callStarted), 1.0) response = rpcResponse.ReturnVal return case <-time.After(rpc.timeout): rpc.stats.TimingDuration(fmt.Sprintf("RPC.Latency.%s.Timeout", method), time.Since(callStarted), 1.0) rpc.stats.Inc("RPC.Rate.Timeouts", 1, 1.0) rpc.log.Warning(fmt.Sprintf(" [c!][%s] AMQP-RPC timeout [%s]", rpc.clientQueue, method)) err = errors.New("AMQP-RPC timeout") return } }
// Gesture is called by the system when the LED matrix receives any kind of gesture // it only seems to receive tap gestures ("GestureNone" type) func (p *DemoPane) Gesture(gesture *gestic.GestureMessage) { log.Infof("gesture received - %v, %v", gesture.Touch, gesture.Position) // check the second last touch location since the most recent one before a tap is usually blank it seems lastLocation := p.lastTapLocation p.lastTapLocation = gesture.Touch if gesture.Tap.Active() && time.Since(p.lastTap) > tapInterval { p.lastTap = time.Now() log.Infof("Tap! %v", lastLocation) // change between images - right or left if lastLocation.East { p.imageIndex++ p.imageIndex %= len(stateImageNames) } else { p.imageIndex-- if p.imageIndex < 0 { p.imageIndex = len(stateImageNames) - 1 } } log.Infof("Showing image: %v", stateImageNames[p.imageIndex]) } if gesture.DoubleTap.Active() && time.Since(p.lastDoubleTap) > tapInterval { p.lastDoubleTap = time.Now() log.Infof("Double Tap!") // change between image and text displaying (in Render) p.isImageMode = !p.isImageMode } }
func (l *loader) spawnClient(wg *sync.WaitGroup, queue chan []*Task) { // this is one out of c clients client := &http.Client{} // if we're not busy, get a tasks list from the queue channel for tasks := range queue { for i, task := range tasks { req := task.Request() start := time.Now() // Send the request res, err := client.Do(req) if err != nil { if err := l.ws.WriteJSON(feedback{Task: i, StatusCode: 0, Duration: time.Since(start)}); err != nil { fmt.Println("Couldn't write to the socket") } } else { if err := l.ws.WriteJSON(feedback{Task: i, StatusCode: res.StatusCode, Duration: time.Since(start)}); err != nil { fmt.Println("Couldn't write to the socket") } res.Body.Close() } } // We are done wg.Done() } }
func replayMark(active bool) { var t string if !active { t = fmt.Sprintf("PAUSE,%d\n", time.Since(timeStarted).Nanoseconds()) } else { t = fmt.Sprintf("UNPAUSE,%d\n", time.Since(timeStarted).Nanoseconds()) } if uatReplayWriter != nil { uatReplayWriter.Write([]byte(t)) } if esReplayWriter != nil { esReplayWriter.Write([]byte(t)) } if gpsReplayWriter != nil { gpsReplayWriter.Write([]byte(t)) } if ahrsReplayWriter != nil { ahrsReplayWriter.Write([]byte(t)) } if dump1090ReplayWriter != nil { dump1090ReplayWriter.Write([]byte(t)) } }
// runFullThrottle runs the benchmark without a limit on requests per second. func (c *connectionBenchmark) runFullThrottle() (time.Duration, error) { var ( stop = time.After(c.duration) start = time.Now() ) for { select { case <-stop: return time.Since(start), nil default: } before := time.Now() err := c.requester.Request() latency := time.Since(before).Nanoseconds() if err != nil { if err := c.errorHistogram.RecordValue(latency); err != nil { return 0, err } c.errorTotal++ } else { if err := c.successHistogram.RecordValue(latency); err != nil { return 0, err } c.successTotal++ } } }
// local function which process single request for given site name and // set of time stamps func process(metric, siteName string, tstamps []string, tier, breakdown string, bins []int, ch chan Record) { startTime := time.Now() // get statistics from popDB for given site and time range var popdbRecords []Record if BLKINFO { popdbRecords = blockStats(siteName, tstamps, tier) } else { popdbRecords = datasetStats(siteName, tstamps, tier) } if utils.PROFILE { fmt.Println("popDBRecords", time.Since(startTime)) } // sort dataset results from popDB into bins by given metric rdict := popdb2Bins(metric, bins, popdbRecords, siteName, tstamps) if utils.PROFILE { fmt.Println("popdb2Bins", time.Since(startTime)) } // find out size for all bins results, bres := bins2size(siteName, rdict, tstamps[0], breakdown) if utils.PROFILE { fmt.Println("bins2size", time.Since(startTime)) } // create return record and send it back to given channel rec := make(Record) rec[siteName] = Record{"results": results, "breakdown": bres} ch <- rec }
func (ia *importerAcct) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { ia.serveHTTPPost(w, r) return } ia.mu.Lock() defer ia.mu.Unlock() body := acctBody{ Acct: ia, AcctType: fmt.Sprintf("%T", ia.im.impl), } if run := ia.current; run != nil { body.Running = true body.StartedAgo = time.Since(ia.lastRunStart) run.mu.Lock() body.LastStatus = fmt.Sprintf("%+v", run.lastProgress) run.mu.Unlock() } else if !ia.lastRunDone.IsZero() { body.LastAgo = time.Since(ia.lastRunDone) if ia.lastRunErr != nil { body.LastError = ia.lastRunErr.Error() } } title := fmt.Sprintf("%s account: ", ia.im.name) if summary := ia.im.impl.SummarizeAccount(ia.acct); summary != "" { title += summary } else { title += ia.acct.PermanodeRef().String() } execTemplate(w, r, acctPage{ Title: title, Body: body, }) }
func (l *looper) tick() { tickStart := l.clk.Now() err := l.tickFunc(l.batchSize) l.stats.TimingDuration(fmt.Sprintf("OCSP.%s.TickDuration", l.name), time.Since(tickStart), 1.0) l.stats.Inc(fmt.Sprintf("OCSP.%s.Ticks", l.name), 1, 1.0) tickEnd := tickStart.Add(time.Since(tickStart)) expectedTickEnd := tickStart.Add(l.tickDur) if tickEnd.After(expectedTickEnd) { l.stats.Inc(fmt.Sprintf("OCSP.%s.LongTicks", l.name), 1, 1.0) } // After we have all the stats stuff out of the way let's check if the tick // function failed, if the reason is the HSM is dead increase the length of // sleepDur using the exponentially increasing duration returned by core.RetryBackoff. sleepDur := expectedTickEnd.Sub(tickEnd) if err != nil { l.stats.Inc(fmt.Sprintf("OCSP.%s.FailedTicks", l.name), 1, 1.0) if _, ok := err.(core.ServiceUnavailableError); ok && (l.failureBackoffFactor > 0 && l.failureBackoffMax > 0) { l.failures++ sleepDur = core.RetryBackoff(l.failures, l.tickDur, l.failureBackoffMax, l.failureBackoffFactor) } } else if l.failures > 0 { // If the tick was successful and previously there were failures reset // counter to 0 l.failures = 0 } // Sleep for the remaining tick period or for the backoff time l.clk.Sleep(sleepDur) }
func TestBenchmark(test *testing.T) { t := time.Now() // Get set.. fmt.Printf("Generating %d×%d image... ", srcW, srcH) // Print dimensions. img := GenerateImage(srcW, srcH) // Generate image full of noise. Δt := time.Since(t) // Take time's delta. fmt.Printf("it took %v.\n", Δt) // Print it. // Set some target sizes. size1 := Size{100, 100} size2 := Size{900, 900} size3 := Size{2000, 2000} // Some info and formatting guides. fmt.Printf("Resizing to %v, %v and %v with %d rounds!\n\n", size1, size2, size3, rounds) fmt.Printf("%17s \t %9s \t %10s \t %10s \t %10s \t %10s \n", "Filter", "Target size", "Min", "Max", "Avg", "Cmp") fmt.Println("---------------------------------------------------------------------------------------------------") startTime := time.Now() // Get set again... results := Benchmark(img, rounds, size1, size2, size3) // Run the benchmarks. totalTime := time.Since(startTime) // startTime's delta. for name, resultSet := range results { // For each filter... for size, r := range resultSet { // For each result... // Print 'em. cmp := r.Compare(results["NearestNeighbor"][size]) fmt.Printf("%17s \t %d×%d \t %10v \t %10v \t %10v \t %10.2f \n", name, size[0], size[1], r.Min, r.Max, r.Avg, cmp) } } fmt.Printf("\nResizings took %v total.\n", totalTime) }
func BenchMark() { var start = time.Now() var roads, total, err = readGraph() //IO就是慢!!! if err != nil { fmt.Println("Illegal Input") return } var roads_u = uAdjList(roads) var matrix_u = transform(roads_u) var matrix = sAdjMatrix(matrix_u) var size = len(roads) fmt.Printf("Prepare Graph [%d vertexes & %d edges] in %v\n", size, total, time.Since(start)) start = time.Now() for i := 0; i < size; i++ { SPFA(roads, i) } fmt.Println("SPFA: ", time.Since(start)) start = time.Now() for i := 0; i < size; i++ { Dijkstra(roads_u, i) } fmt.Println("Dijkstra: ", time.Since(start)) start = time.Now() for i := 0; i < size; i++ { PlainDijkstra(matrix_u, i) } fmt.Println("Plain Dijkstra:", time.Since(start)) start = time.Now() FloydWarshall(matrix) fmt.Println("Floyd-Warshall:", time.Since(start)) }
func removeInvalidCerts(csvFilename string, dbMap *gorp.DbMap, stats metrics.Statter, statsRate float32) { file, err := os.Open(csvFilename) cmd.FailOnError(err, "Could not open the file for reading") csvReader := csv.NewReader(file) for { record, err := csvReader.Read() if err == io.EOF { break } else if err != nil { fmt.Println("Error:", err) return } identifierData := core.IdentifierData{ CertSHA1: record[0], } externalCert := core.ExternalCert{ SHA1: record[0], } deleteStart := time.Now() _, err = dbMap.Delete(&identifierData) stats.TimingDuration("ExistingCert.Domains.DeleteLatency", time.Since(deleteStart), statsRate) _, err = dbMap.Delete(&externalCert) stats.TimingDuration("ExistingCert.Certs.DeleteLatency", time.Since(deleteStart), statsRate) stats.Inc("ExistingCert.Removed", 1, statsRate) } }
func main() { pl := new(Pool) pl.available = 200 pl.emptyCond = sync.NewCond(&pl.lock) maxrequest := 5000000 status := make(chan int, 2) start := time.Now() for i := 1; i <= maxrequest; i++ { conn, _ := pl.pull() go func(i int, conn *GenConn, pl *Pool) { defer pl.push(conn) if ret := conn.Call(i); ret == maxrequest { status <- 1 } }(i, conn, pl) } select { case <-status: fmt.Printf("Executed %v in %v\n", maxrequest, time.Since(start)) case <-time.After(60e9): fmt.Println("Timeout", time.Since(start)) } }
func TestFactorizer(t *testing.T) { max := uint64(200000000) starttime := time.Now() set := NewPrimeSet(max) log.Println("prime set initialized after ", time.Since(starttime)) f := set.Factorizer(max) log.Println("factorizer initialized after ", time.Since(starttime)) it := set.Iterator(0) p, ok := it.Next() for ok && p <= max { if pf, ok := f.LargestFactorOf(p); !ok || pf != p { t.Errorf("prime %d incorrectly factorized with factor %d", p, pf) } p, ok = it.Next() } if _, fok := f.LargestFactorOf(0); fok { t.Error("0 should not have a prime factor") } testLargestFactor(t, f, 2, 2) testLargestFactor(t, f, 1024, 2) testLargestFactor(t, f, 3, 3) testLargestFactor(t, f, 5, 5) testLargestFactor(t, f, 210, 7) testLargestFactor(t, f, 37055, 7411) testLargestFactor(t, f, 23173, 23173) testLargestFactor(t, f, 1664099, 1291) testLargestFactor(t, f, 3750000, 5) }
// testHostIP tests that a pod gets a host IP func testHostIP(c *client.Client, ns string, pod *api.Pod) { podClient := c.Pods(ns) By("creating pod") defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) if _, err := podClient.Create(pod); err != nil { Failf("Failed to create pod: %v", err) } By("ensuring that pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. err := waitForPodRunningInNamespace(c, pod.Name, ns) Expect(err).NotTo(HaveOccurred()) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t := time.Now() for { p, err := podClient.Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } Logf("Retrying to get the hostIP of pod %s", p.Name) time.Sleep(5 * time.Second) } }
func BenchMark() { var start = time.Now() var edges, size, err = readGraph() //IO就是慢!!! if err != nil { fmt.Println("Illegal Input") return } var roads = transform(edges, size) fmt.Printf("Prepare Graph [%d vertexes & %d edges] in %v\n", size, len(edges), time.Since(start)) start = time.Now() ret1, err := Kruskal(edges, size) var tm1 = time.Since(start) if err != nil { fmt.Println(err) } start = time.Now() ret2, err := Prim(roads) var tm2 = time.Since(start) if err != nil { fmt.Println(err) } if ret1 != ret2 { fmt.Printf("Kruskal[%d] != Prim[%d]\n", ret1, ret2) } else { fmt.Println("Kruskal:", tm1) fmt.Println("Prim: ", tm2) } }
func TestChunkerWithRandomPolynomial(t *testing.T) { // setup data source buf := getRandom(23, 32*1024*1024) // generate a new random polynomial start := time.Now() p, err := RandomPolynomial() if err != nil { t.Fatal(err) } t.Logf("generating random polynomial took %v", time.Since(start)) start = time.Now() ch := New(bytes.NewReader(buf), p) t.Logf("creating chunker took %v", time.Since(start)) // make sure that first chunk is different c, err := ch.Next(nil) if err != nil { t.Fatal(err.Error()) } if c.Cut == chunks1[0].CutFP { t.Fatal("Cut point is the same") } if c.Length == chunks1[0].Length { t.Fatal("Length is the same") } if bytes.Equal(hashData(c.Data), chunks1[0].Digest) { t.Fatal("Digest is the same") } }
func main() { var sum_sequential, sum_concurrent int var max int flag.IntVar(&max, "max", 10, "Number of integers to sum") flag.Parse() start_sequential := time.Now() c_seq := make(chan int) go sum_range(0, max, c_seq) sum_sequential = <-c_seq elapsed_sequential := time.Since(start_sequential) fmt.Printf("NumProc: %d, Time: %7.5f, Sum: %d\n", 1, float64(elapsed_sequential)/1000000000, sum_sequential) start_concurrent := time.Now() sum_concurrent = 0 c1 := make(chan int) c2 := make(chan int) go sum_range(0, max/2, c1) go sum_range(max/2, max, c2) sum_concurrent = <-c1 sum_concurrent += <-c2 elapsed_concurrent := time.Since(start_concurrent) fmt.Printf("NumProc: %d, Time: %7.5f, Sum: %d\n", 2, float64(elapsed_concurrent)/1000000000, sum_concurrent) }