Exemplo n.º 1
0
func time_exec(num_nodes int, num_msgs int) {
	before := time.Nanoseconds()
	run(num_nodes, num_msgs)
	after := time.Nanoseconds()
	elapsed := after - before
	fmt.Println("Took:", elapsed)
}
Exemplo n.º 2
0
// LockOrTimeout proceeds as Lock, except that it returns an os.EAGAIN
// error, if a lock cannot be obtained within ns nanoseconds.
func (fdl *FDLimiter) LockOrTimeout(ns int64) os.Error {
	waitsofar := int64(0)
	for {
		// Try to get an fd
		fdl.lk.Lock()
		if fdl.count < fdl.limit {
			fdl.count++
			fdl.lk.Unlock()
			return nil
		}
		fdl.lk.Unlock()

		// Or, wait for an fd or timeout
		if waitsofar >= ns {
			return os.EAGAIN
		}
		t0 := time.Nanoseconds()
		alrm := alarmOnce(ns - waitsofar)
		select {
		case <-alrm:
		case <-fdl.ch:
		}
		waitsofar += time.Nanoseconds() - t0
	}
	panic("FDLimiter, unreachable")
}
Exemplo n.º 3
0
//Read back the workSumary of each worker.
//Calculates the average response time and total time for the
//whole request.
func (self *Master) summarize() {
	log.Print("Tasks distributed. Waiting for summaries...")
	self.summary.Start = time.Nanoseconds()
	workers := self.runningTasks
	var avgs float64
	for tSummary := range self.channel {
		//remove the worker from master
		self.runningTasks -= 1

		avgs += float64(tSummary.Avg)
		self.summary.TotalSuc += tSummary.SucCount
		self.summary.TotalErr += tSummary.ErrCount

		self.summary.Max = Max(self.summary.Max, tSummary.Max)

		self.summary.Min = Min(self.summary.Min, tSummary.Min)
		//if no workers left
		if self.runningTasks == 0 {
			if self.summary.Min == -1 {
				self.summary.Min = 0
			}
			self.summary.End = time.Nanoseconds()
			self.summary.Elapsed = (self.summary.End - self.summary.Start)
			self.summary.Avg = float64(avgs / float64(workers))
			self.summary.RequestsPerSecond = int64(self.summary.TotalSuc*1000) / (self.summary.Elapsed / 1000000)
			break
		}

	}

	self.ctrlChan <- true
}
Exemplo n.º 4
0
func main() {
	//runtime.GOMAXPROCS(1)

	const tableRez = 1000

	mat := Eye(2)
	mat.Scale(.58)
	mat.Set(1, 0, .2)

	const transCount = 3
	trans := make([]*affine.Affine, transCount)
	trans[0] = affine.FromOrigin2(mat, 0, 0)
	trans[1] = affine.FromOrigin2(mat, .5, 1)
	trans[2] = affine.FromOrigin2(mat, 1, 0)

	x1, y1, x2, y2 := fit(trans)
	//print (x1,x2,y1,y2,"\n")

	shift := Zeros(2, 1)
	shift.Set(0, 0, -x1)
	shift.Set(1, 0, -y1)
	scale := (tableRez - 4) / math.Fmax(x2-x1, y2-y1)
	shift.Scale(scale)
	shift.AddDense(Scaled(Ones(2, 1), 2))

	//print (scale," "+shift.String(),"\n")
	for i, t := range trans {
		origin := Scaled(t.GetOrigin(), scale)
		origin.AddDense(shift)
		trans[i] = affine.FromOrigin(t.GetMat(), origin)
	}

	x1, y1, x2, y2 = fit(trans)
	ix1 := int(x1)
	ix2 := int(x2)
	iy1 := int(y1)
	iy2 := int(y2)
	//print (int(x1)," ",int(x2)," ",int(y1)," ",int(y2),"\n")

	rezx := ix2 + 2
	rezy := iy2 + 2

	ft := floatTable.NewFloatTable(rezx, rezy, channelCount)
	ft2 := floatTable.NewFloatTable(rezx, rezy, channelCount)

	ft.Fill(fill)

	t := time.Nanoseconds()
	Render(ix1, ix2, iy1, iy2, ft, ft2, trans)

	t = time.Nanoseconds() - t
	print("Time", "\n")
	print(t/1000000, "\n")

	println("Saving image")
	f, err := os.Open("testFile.png", os.O_WRONLY|os.O_CREAT, 0666)
	println(err)
	MakeImage(f, ft, MakeColorizer(ft))

}
Exemplo n.º 5
0
func main() {
	flag.Parse()

	t0 := time.Nanoseconds()

	maxDepth := *n
	if minDepth+2 > *n {
		maxDepth = minDepth + 2
	}
	stretchDepth := maxDepth + 1

	check := bottomUpTree(0, stretchDepth).itemCheck()
	fmt.Printf("stretch tree of depth %d\t check: %d\n", stretchDepth, check)

	longLivedTree := bottomUpTree(0, maxDepth)

	for depth := minDepth; depth <= maxDepth; depth += 2 {
		iterations := 1 << uint(maxDepth-depth+minDepth)
		check = 0

		for i := 1; i <= iterations; i++ {
			check += bottomUpTree(i, depth).itemCheck()
			check += bottomUpTree(-i, depth).itemCheck()
		}
		fmt.Printf("%d\t trees of depth %d\t check: %d\n", iterations*2, depth, check)
	}
	fmt.Printf("long lived tree of depth %d\t check: %d\n", maxDepth, longLivedTree.itemCheck())

	t1 := time.Nanoseconds()

	// Standard gotest benchmark output, collected by build dashboard.
	gcstats("BenchmarkTree", *n, t1-t0)
}
Exemplo n.º 6
0
func Log(level int, message string, v ...interface{}) {
	if level > MaxLevel {
		return
	}

	var curtime int64
	if Differential {
		if StartTime == -1 {
			StartTime = time.Nanoseconds()
		}
		curtime = time.Nanoseconds() - StartTime
	} else {
		curtime = time.Nanoseconds()
	}

	// Miliseconds
	message = fmt.Sprintf("%dms - %s", curtime/1000000, message)

	switch level {
	case L_Fatal:
		s := fmt.Sprintf(message, v...)
		fatal.Output(2, s)
		panic(s)
	case L_Error:
		error.Output(2, fmt.Sprintf(message, v...))
	case L_Warning:
		warning.Output(2, fmt.Sprintf(message, v...))
	case L_Info:
		info.Output(2, fmt.Sprintf(message, v...))
	case L_Debug:
		debug.Output(2, fmt.Sprintf(message, v...))
	}
}
Exemplo n.º 7
0
// Use a single redis.AsyncClient with specified number
// of workers to bench concurrent load on the async client
func benchTask(taskspec taskSpec, iterations int, workers int, printReport bool) (delta int64, err os.Error) {
	signal := make(chan int, workers) // Buffering optional but sensible.
	spec := redis.DefaultSpec().Db(13).Password("go-redis")
	client, e := redis.NewAsynchClientWithSpec(spec)
	if e != nil {
		log.Println("Error creating client for worker: ", e)
		return -1, e
	}
	//    defer client.Quit()        // will be deprecated soon
	defer client.RedisClient().Quit()

	t0 := time.Nanoseconds()
	for i := 0; i < workers; i++ {
		id := fmt.Sprintf("%d", i)
		go taskspec.task(id, signal, client, iterations)
	}
	for i := 0; i < workers; i++ {
		<-signal
	}
	delta = time.Nanoseconds() - t0
	//	for i := 0; i < workers; i++ {
	//		clients[i].Quit()
	//	}
	//
	if printReport {
		report("concurrent "+taskspec.name, delta, iterations*workers)
	}

	return
}
Exemplo n.º 8
0
func benchTask(taskspec taskSpec, iterations int, workers int, printReport bool) (delta int64, err os.Error) {
	signal := make(chan int, workers) // Buffering optional but sensible.
	clients, e := makeConcurrentClients(workers)
	if e != nil {
		return 0, e
	}
	t0 := time.Nanoseconds()
	for i := 0; i < workers; i++ {
		id := fmt.Sprintf("%d", i)
		go taskspec.task(id, signal, clients[i], iterations)
	}
	for i := 0; i < workers; i++ {
		<-signal
	}
	delta = time.Nanoseconds() - t0
	for i := 0; i < workers; i++ {
		clients[i].Quit()
	}

	if printReport {
		report("concurrent "+taskspec.name, delta, iterations*workers)
	}

	return
}
Exemplo n.º 9
0
// buildExternal downloads and builds external packages, and
// reports their build status to the dashboard.
// It will re-build all packages after pkgBuildInterval nanoseconds or
// a new release tag is found.
func (b *Builder) buildExternal() {
	var prevTag string
	var nextBuild int64
	for {
		time.Sleep(waitInterval)
		err := run(nil, goroot, "hg", "pull", "-u")
		if err != nil {
			log.Println("hg pull failed:", err)
			continue
		}
		hash, tag, err := firstTag(releaseRe)
		if err != nil {
			log.Println(err)
			continue
		}
		if *verbose {
			log.Println("latest release:", tag)
		}
		// don't rebuild if there's no new release
		// and it's been less than pkgBuildInterval
		// nanoseconds since the last build.
		if tag == prevTag && time.Nanoseconds() < nextBuild {
			continue
		}
		// build will also build the packages
		if err := b.buildHash(hash); err != nil {
			log.Println(err)
			continue
		}
		prevTag = tag
		nextBuild = time.Nanoseconds() + pkgBuildInterval
	}
}
Exemplo n.º 10
0
//DoTurn is where you should do your bot's actual work.
func (m *Map) DoTurn() {

	strategies := []struct {
		fn   func()
		name string
	}{
		{func() { m.closeCombat() }, "closeCombat"},
		{func() { m.defend() }, "defend"},
		{func() { m.reinforce() }, "reinforce"},
		{func() { m.forage() }, "forage"},
		{func() { m.attackEnemyHill() }, "enemyHill"},
		{func() { m.scout() }, "scout"},
	}
	times := make([]string, 0, len(strategies))

	for _, s := range strategies {
		if m.deadlineExpired() {
			break
		}
		start := time.Nanoseconds()
		s.fn()
		delta_ms := float64(time.Nanoseconds()-start) / 1e6
		if delta_ms > 100 {
			times = append(times, fmt.Sprintf("%s %.2f", s.name, delta_ms))
		}
	}
	m.moveAll()

	if len(times) > 0 {
		log.Print("timings: %s", strings.Join(times, ", "))
	}
}
Exemplo n.º 11
0
func receiver(pipe chan int, done_pipe chan int) {

	prevTime := time.Nanoseconds()
	var newTime int64

	msg_count := 0

	stats := NewMovingAverage(10)

	j := 0

	for i := true; i != false; {
		val := <-pipe
		msg_count++
		if val == -1 {
			i = false
		}

		if msg_count == 100000 {
			newTime = time.Nanoseconds()

			stats.SetNextT(newTime - prevTime)
			//fmt.Printf("%d %d\n", j, stats.CurrentAverage())
			fmt.Printf("%d %d\n", j, newTime-prevTime)

			j++
			msg_count = 0
			prevTime = newTime
		}
	}
	done_pipe <- 1
}
Exemplo n.º 12
0
// dispatch input from channel as \r\n terminated line to peer
// flood controlled using hybrid's algorithm if conn.Flood is true
func (conn *Conn) send() {
	lastsent := time.Nanoseconds()
	var badness, linetime, second int64 = 0, 0, 1000000000
	for line := range conn.out {
		// Hybrid's algorithm allows for 2 seconds per line and an additional
		// 1/120 of a second per character on that line.
		linetime = 2*second + int64(len(line))*second/120
		if !conn.Flood && conn.connected {
			// No point in tallying up flood protection stuff until connected
			if badness += linetime + lastsent - time.Nanoseconds(); badness < 0 {
				// negative badness times are badness...
				badness = int64(0)
			}
		}
		lastsent = time.Nanoseconds()

		// If we've sent more than 10 second's worth of lines according to the
		// calculation above, then we're at risk of "Excess Flood".
		if badness > 10*second && !conn.Flood {
			// so sleep for the current line's time value before sending it
			time.Sleep(linetime)
		}
		if _, err := conn.io.WriteString(line + "\r\n"); err != nil {
			conn.error("irc.send(): %s", err.String())
			conn.shutdown()
			break
		}
		conn.io.Flush()
		if conn.Debug {
			fmt.Println(conn.Timestamp().Format(conn.TSFormat) + " -> " + line)
		}
	}
}
Exemplo n.º 13
0
func (u *Upstream) FilterRequest(request *falcore.Request) (res *http.Response) {
	var err os.Error
	req := request.HttpRequest

	// Force the upstream to use http
	if u.ForceHttp || req.URL.Scheme == "" {
		req.URL.Scheme = "http"
		req.URL.Host = req.Host
	}
	before := time.Nanoseconds()
	req.Header.Set("Connection", "Keep-Alive")
	res, err = u.transport.RoundTrip(req)
	diff := falcore.TimeDiff(before, time.Nanoseconds())
	if err != nil {
		if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
			falcore.Error("%s Upstream Timeout error: %v", request.ID, err)
			res = falcore.SimpleResponse(req, 504, nil, "Gateway Timeout\n")
			request.CurrentStage.Status = 2 // Fail
		} else {
			falcore.Error("%s Upstream error: %v", request.ID, err)
			res = falcore.SimpleResponse(req, 502, nil, "Bad Gateway\n")
			request.CurrentStage.Status = 2 // Fail
		}
	}
	falcore.Debug("%s [%s] [%s%s] s=%d Time=%.4f", request.ID, req.Method, u.host, req.RawURL, res.StatusCode, diff)
	return
}
Exemplo n.º 14
0
func testTimeout(t *testing.T, network, addr string, readFrom bool) {
	fd, err := Dial(network, addr)
	if err != nil {
		t.Errorf("dial %s %s failed: %v", network, addr, err)
		return
	}
	defer fd.Close()
	t0 := time.Nanoseconds()
	fd.SetReadTimeout(1e8) // 100ms
	var b [100]byte
	var n int
	var err1 os.Error
	if readFrom {
		n, _, err1 = fd.(PacketConn).ReadFrom(b[0:])
	} else {
		n, err1 = fd.Read(b[0:])
	}
	t1 := time.Nanoseconds()
	what := "Read"
	if readFrom {
		what = "ReadFrom"
	}
	if n != 0 || err1 == nil || !err1.(Error).Timeout() {
		t.Errorf("fd.%s on %s %s did not return 0, timeout: %v, %v", what, network, addr, n, err1)
	}
	if t1-t0 < 0.5e8 || t1-t0 > 1.5e8 {
		t.Errorf("fd.%s on %s %s took %f seconds, expected 0.1", what, network, addr, float64(t1-t0)/1e9)
	}
}
Exemplo n.º 15
0
// See the comment for Exporter.Drain.
func (cs *clientSet) drain(timeout int64) error {
	startTime := time.Nanoseconds()
	for {
		pending := false
		cs.mu.Lock()
		// Any messages waiting for a client?
		for _, chDir := range cs.names {
			if chDir.ch.Len() > 0 {
				pending = true
			}
		}
		// Any unacknowledged messages?
		for client := range cs.clients {
			n := client.unackedCount()
			if n > 0 { // Check for > rather than != just to be safe.
				pending = true
				break
			}
		}
		cs.mu.Unlock()
		if !pending {
			break
		}
		if timeout > 0 && time.Nanoseconds()-startTime >= timeout {
			return errors.New("timeout")
		}
		time.Sleep(100 * 1e6) // 100 milliseconds
	}
	return nil
}
Exemplo n.º 16
0
// See the comment for Exporter.Sync.
func (cs *clientSet) sync(timeout int64) error {
	startTime := time.Nanoseconds()
	// seq remembers the clients and their seqNum at point of entry.
	seq := make(map[unackedCounter]int64)
	for client := range cs.clients {
		seq[client] = client.seq()
	}
	for {
		pending := false
		cs.mu.Lock()
		// Any unacknowledged messages?  Look only at clients that existed
		// when we started and are still in this client set.
		for client := range seq {
			if _, ok := cs.clients[client]; ok {
				if client.ack() < seq[client] {
					pending = true
					break
				}
			}
		}
		cs.mu.Unlock()
		if !pending {
			break
		}
		if timeout > 0 && time.Nanoseconds()-startTime >= timeout {
			return errors.New("timeout")
		}
		time.Sleep(100 * 1e6) // 100 milliseconds
	}
	return nil
}
Exemplo n.º 17
0
//var lst,_ = ioutil.ReadDir(".")
func BenchmarkAsyncRead(b *testing.B) {
	if len(lst) < 1 {
		lstFileInfo, _ := ioutil.ReadDir(*dirtouse)
		lst = make([]string, len(lstFileInfo))
		for cnt, i := range lstFileInfo {
			lst[cnt] = *dirtouse + "/" + i.Name
		}
	}

	fmt.Println("Count of files in " + *dirtouse + " " + strconv.Itoa(len(lst)))
	if len(lst) < 1 {
		fmt.Println("Provide another directory in -dirtoread. Can't read from " + *dirtouse)
		os.Exit(2)
	}

	start := time.Nanoseconds()
	c := make(chan int)
	for _, i := range lst {
		go myreadfile(i, c)
	}

	for i := 0; i < len(lst); i++ {
		<-c
	}
	fmt.Print((time.Nanoseconds() - start) / 1e6)
	fmt.Println("ms")
}
Exemplo n.º 18
0
func TestPoolSize(t *testing.T) {
	c1 := New("", 0, "")
	c2 := New("", 0, "")
	expected := MaxClientConn*2 + connCount

	if r := SendStr(c1, "SET", "foo", "foo"); r.Err != nil {
		t.Fatalf("'%s': %s", "SET", r.Err)
	}

	SendStr(c2, "SET", "bar", "bar")

	start := time.Nanoseconds()

	for i := 0; i < 1000; i++ {
		r1 := SendStr(c1, "GET", "foo")
		r2 := SendStr(c2, "GET", "bar")

		if r1.Elem.String() != "foo" && r2.Elem.String() != "bar" {
			t.Error(r1, r2)
			t.FailNow()
		}
	}

	stop := time.Nanoseconds() - start
	t.Logf("time: %.3f\n", float32(stop/1.0e+6)/1000.0)

	if expected != connCount {
		t.Errorf("connCount: expected %d got %d ", expected, connCount)
	}
}
Exemplo n.º 19
0
func indexer() {
	for {
		if !indexUpToDate() {
			// index possibly out of date - make a new one
			if *verbose {
				log.Printf("updating index...")
			}
			start := time.Nanoseconds()
			index := NewIndex(fsDirnames(), *maxResults > 0, *indexThrottle)
			stop := time.Nanoseconds()
			searchIndex.set(index)
			if *verbose {
				secs := float64((stop-start)/1e6) / 1e3
				stats := index.Stats()
				log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
					secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
			}
			log.Printf("before GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
			runtime.GC()
			log.Printf("after  GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
		}
		var delay int64 = 60 * 1e9 // by default, try every 60s
		if *testDir != "" {
			// in test mode, try once a second for fast startup
			delay = 1 * 1e9
		}
		time.Sleep(delay)
	}
}
func main() {
	startTime := time.Nanoseconds()
	flag.Parse()
	if *waldoDir == "" || *targetDir == "" {
		fmt.Println("You need to specify waldo and target directories!")
		fmt.Println("See", os.Args[0], "--help for more information.")
		return
	}

	runtime.GOMAXPROCS(1)

	// Read Waldo Directory
	waldoImages, err := ReadDirectory(*waldoDir)
	if err != nil {
		fmt.Println(err)
		return
	}
	// Read Target Directory
	targetImages, err := ReadDirectory(*targetDir)
	if err != nil {
		fmt.Println(err)
		return
	}

	for i := 0; i < len(targetImages); i++ {
		targetImages[i].FindImages(waldoImages)
	}

	fmt.Printf("Completed in %f seconds!\n", float64(time.Nanoseconds()-startTime)/1000000000.0)
}
Exemplo n.º 21
0
func (s *S) TestReloading(c *C) {
	rlName := "reloading.neste"
	rlPath := path.Join(baseDir, rlName)
	data := "foo"
	st := []byte("starting template: {@}\n")
	mt := []byte("modified template: {@}\n")
	sExpected := "starting template: foo\n"
	mExpected := "modified template: foo\n"

	ioutil.WriteFile(rlPath, st, 0644)
	tm := New(baseDir, nil)
	c.Assert(tm.reloading, Equals, false)
	t := tm.MustAddFile(rlName)

	output, err := t.Render(data)
	c.Assert(err, IsNil)
	c.Assert(output, Equals, sExpected)

	// Write changes
	ioutil.WriteFile(rlPath, mt, 0644)
	tm.SetReloading(true)

	// Attempt to force mtime to change.
	err = os.Chtimes(rlPath, time.Nanoseconds(), time.Nanoseconds())
	c.Assert(err, IsNil)

	output, err = t.Render(data)
	c.Assert(err, IsNil)
	c.Assert(output, Equals, mExpected)
}
Exemplo n.º 22
0
func main() {
	runtime.GOMAXPROCS(2) //Dual core
	args := flag.Args()

	if len(args) < 4 {
		fmt.Println("Usage: CallGenerator <numberOfAgents> <numberOfCalls> <maxCallDuration> <maxCallInterval>")
		return
	}
	num_agents, _ := strconv.Atoi(args[0])
	num_calls, _ := strconv.Atoi(args[1])
	max_dur, _ := strconv.Atoi(args[2])
	max_int, _ := strconv.Atoi(args[3])

	queue := make(chan *Call, 10000)
	clock_in := make(chan bool)
	clock_out := make(chan bool)

	call_center := &CallCenter{num_agents, queue, clock_in, clock_out}

	begin := time.Nanoseconds()

	call_center.Open()

	for i := 0; i < num_calls; i++ {
		time.Sleep(int64(max_int))
		queue <- &Call{i, rand.Int() * max_dur, time.Nanoseconds()}
	}

	call_center.Close()

	end := time.Nanoseconds()

	fmt.Printf("Simulation elapsed time: %d seconds\n", (end-begin)/1000000000)
}
Exemplo n.º 23
0
func ConcurrencySpec(c nanospec.Context) {
	r := NewRunner()
	r.AddSpec(VerySlowDummySpec)

	start := time.Nanoseconds()
	r.Run()
	end := time.Nanoseconds()
	totalTime := end - start

	// If the spec is executed single-threadedly, then it would take
	// at least 4*DELAY to execute. If executed multi-threadedly, it
	// would take at least 2*DELAY to execute, because the first spec
	// needs to be executed fully before the other specs are found, but
	// after that the other specs can be executed in parallel.
	expectedMaxTime := int64(math.Floor(2.9 * DELAY))

	if totalTime > expectedMaxTime {
		c.Errorf("Expected the run to take less than %v ms but it took %v ms",
			expectedMaxTime/MILLISECOND, totalTime/MILLISECOND)
	}

	runCounts := countSpecNames(r.executed)
	c.Expect(runCounts["Child A"]).Equals(1)
	c.Expect(runCounts["Child B"]).Equals(1)
	c.Expect(runCounts["Child C"]).Equals(1)
	c.Expect(runCounts["Child D"]).Equals(1)
}
Exemplo n.º 24
0
func main() {
	runtime.GOMAXPROCS(4)
	go func() {}()
	go func() {}()
	go func() {}()
	st := &runtime.MemStats
	packages = append(packages, packages...)
	packages = append(packages, packages...)
	n := flag.Int("n", 4, "iterations")
	p := flag.Int("p", len(packages), "# of packages to keep in memory")
	flag.BoolVar(&st.DebugGC, "d", st.DebugGC, "print GC debugging info (pause times)")
	flag.Parse()

	var lastParsed []map[string]*ast.Package
	var t0 int64
	pkgroot := runtime.GOROOT() + "/src/pkg/"
	for pass := 0; pass < 2; pass++ {
		// Once the heap is grown to full size, reset counters.
		// This hides the start-up pauses, which are much smaller
		// than the normal pauses and would otherwise make
		// the average look much better than it actually is.
		st.NumGC = 0
		st.PauseTotalNs = 0
		t0 = time.Nanoseconds()

		for i := 0; i < *n; i++ {
			parsed := make([]map[string]*ast.Package, *p)
			for j := range parsed {
				parsed[j] = parseDir(pkgroot + packages[j%len(packages)])
			}
			if i+1 == *n && *serve != "" {
				lastParsed = parsed
			}
		}
		runtime.GC()
		runtime.GC()
	}
	t1 := time.Nanoseconds()

	fmt.Printf("Alloc=%d/%d Heap=%d Mallocs=%d PauseTime=%.3f/%d = %.3f\n",
		st.Alloc, st.TotalAlloc,
		st.Sys,
		st.Mallocs, float64(st.PauseTotalNs)/1e9,
		st.NumGC, float64(st.PauseTotalNs)/1e9/float64(st.NumGC))

	/*
		fmt.Printf("%10s %10s %10s\n", "size", "#alloc", "#free")
		for _, s := range st.BySize {
			fmt.Printf("%10d %10d %10d\n", s.Size, s.Mallocs, s.Frees)
		}
	*/
	// Standard gotest benchmark output, collected by build dashboard.
	gcstats("BenchmarkParser", *n, t1-t0)

	if *serve != "" {
		log.Fatal(http.ListenAndServe(*serve, nil))
		println(lastParsed)
	}
}
Exemplo n.º 25
0
func (me *TimingFileSystem) startTimer(name string, arg string) (closure func()) {
	start := time.Nanoseconds()

	return func() {
		dt := (time.Nanoseconds() - start) / 1e6
		me.LatencyMap.Add(name, arg, dt)
	}
}
Exemplo n.º 26
0
// Test timer
func TestTimeModule(t *testing.T) {
	ns := -time.Nanoseconds()

	time.Sleep(1000000000) // 1 second

	ns += time.Nanoseconds()
	fmt.Printf("Duration: %.2f seconds\n", float64(ns)/1e9)
}
Exemplo n.º 27
0
func main() {
	model := train("big.txt")
	startTime := time.Nanoseconds()
	for i := 0; i < 1; i++ {
		fmt.Println(correct("korrecter", model))
	}
	fmt.Printf("Time : %v\n", float64(time.Nanoseconds()-startTime)/float64(1e9))
}
Exemplo n.º 28
0
func set(cl *doozer.Conn, id, iter int, path string, value []byte, f cb) {
	for i := 0; i < iter; i++ {
		s := time.Nanoseconds()
		rev, err := cl.Set(path, math.MaxInt64, value)
		e := time.Nanoseconds()
		f(id, i, rev, s, e, err)
	}
}
Exemplo n.º 29
0
func (g *Game) Run(input chan Msg) {
	g.input = input
	g.waitOnServiceStart(input)
	InitFunc(g, g.svc)

	// List of up to date entities
	updated := make(map[chan Msg]bool, len(g.ents))
	remove_list := []chan Msg{}
	tick_msg := MsgTick{input}

	for {
		tick_start := time.Nanoseconds()

		// Tell all the entities that a new tick has started
		ent_num := len(g.ents) // Store ent count for *this* tick
		for ent := range g.ents {
			Send(g, ent, tick_msg)
		}

		// Listen for any service messages
		// Break out of loop once all entities have updated
		for {
			msg := g.GetMsg(input)
			switch m := msg.(type) {
			case MsgTick:
				updated[m.Origin] = true // bool value doesn't matter
				if len(updated) == ent_num {
					// Clear out list, use current number of entities for next tick
					updated = make(map[chan Msg]bool, len(g.ents))
					goto update_end
				}
			case MsgEntityRemoved: // TODO: Counts as imperative here, fix?
				remove_list = append(remove_list, m.Entity.Chan)
			case MsgListEntities:
				Send(g, m.Reply, g.makeEntityList())
			case MsgSpawnEntity:
				g.spawnEntity(m)
			}
		}
	update_end:
		Send(g, g.svc.Comm, tick_msg)

		// Remove all entities that reported themselves to be removed
		for _, ent := range remove_list {
			g.RemoveEntity(g.ents[ent])
		}
		if len(remove_list) > 0 { // Clear out list if needed
			remove_list = []chan Msg{}
		}

		sleep_ns := (tick_start + skip_ns) - time.Nanoseconds()
		if sleep_ns > 0 {
			time.Sleep(sleep_ns)
		} else {
			log.Println("game: behind by", sleep_ns/1e6*-1, "ms")
		}
	}
}
Exemplo n.º 30
0
func main() {
	// Parse args
	flag.IntVar(&p, "p", 1, "Max. number of Go processes")
	flag.IntVar(&b, "b", 1, "Channel buffer size")
	flag.Parse()

	if flag.NArg() != 1 {
		fmt.Printf("No input file name specified\n")
		return
	}
	fname := flag.Arg(0)

	// Go MAXPROCS tweak
	runtime.GOMAXPROCS(p)

	// Set up timer
	runTime := time.Nanoseconds()

	// Initialize channels and run reducer
	partCounts = make(chan int, b)
	sumOut = make(chan int, b)
	waitGrp = new(sync.WaitGroup)

	go reduce()

	// Open file in buffered mode
	file, err := os.Open(fname, os.O_RDONLY, 0644)
	if err != nil {
		fmt.Printf("Input error: %s\n", err)
		return
	}

	fileReader := bufio.NewReader(file)

	// Read data line-by-line and send each line to a separate coroutine
	var line string
	for err == nil {
		line, err = fileReader.ReadString('\n')
		if len(line) > 0 && line != "\n" {
			waitGrp.Add(1)
			go countWords(line)
		}
	}

	file.Close()

	// Wait for all goroutines to finish
	waitGrp.Wait()

	// Terminate reducer and count sum
	partCounts <- -1
	count := <-sumOut

	// Stop timer and print results
	stopTime := time.Nanoseconds()
	runElapsed := float64(stopTime-runTime) / 1000000000
	fmt.Printf("Done: %d words in %f seconds\n", count, runElapsed)
}