예제 #1
0
func (t *Timer) durationUnit(dur time.Duration) float64 {
	sec := dur.Seconds()
	if t.TimeUnit == "ms" {
		return sec * 1000
	}
	return sec
}
예제 #2
0
파일: print.go 프로젝트: kkbankol/boom
func (b *Boom) Print() {
	total := b.end.Sub(b.start)
	var avgTotal float64
	var fastest, slowest time.Duration

	for {
		select {
		case r := <-b.results:
			latencies = append(latencies, r.duration.Seconds())
			statusCodeDist[r.statusCode]++

			avgTotal += r.duration.Seconds()
			if fastest.Nanoseconds() == 0 || r.duration.Nanoseconds() < fastest.Nanoseconds() {
				fastest = r.duration
			}
			if r.duration.Nanoseconds() > slowest.Nanoseconds() {
				slowest = r.duration
			}
		default:
			rps := float64(b.N) / total.Seconds()
			fmt.Printf("\nSummary:\n")
			fmt.Printf("  Total:\t%4.4f secs.\n", total.Seconds())
			fmt.Printf("  Slowest:\t%4.4f secs.\n", slowest.Seconds())
			fmt.Printf("  Fastest:\t%4.4f secs.\n", fastest.Seconds())
			fmt.Printf("  Average:\t%4.4f secs.\n", avgTotal/float64(b.N))
			fmt.Printf("  Requests/sec:\t%4.4f\n", rps)
			fmt.Printf("  Speed index:\t%v\n", speedIndex(rps))
			sort.Float64s(latencies)
			b.printHistogram()
			b.printLatencies()
			b.printStatusCodes()
			return
		}
	}
}
예제 #3
0
파일: main.go 프로젝트: pombredanne/heka
func timerLoop(count *uint64, ticker *time.Ticker) {
	lastTime := time.Now().UTC()
	lastCount := *count
	zeroes := int8(0)
	var (
		msgsSent, newCount uint64
		elapsedTime        time.Duration
		now                time.Time
		rate               float64
	)
	for {
		_ = <-ticker.C
		newCount = *count
		now = time.Now()
		msgsSent = newCount - lastCount
		lastCount = newCount
		elapsedTime = now.Sub(lastTime)
		lastTime = now
		rate = float64(msgsSent) / elapsedTime.Seconds()
		if msgsSent == 0 {
			if newCount == 0 || zeroes == 3 {
				continue
			}
			zeroes++
		} else {
			zeroes = 0
		}
		log.Printf("Sent %d messages. %0.2f msg/sec\n", newCount, rate)
	}
}
예제 #4
0
파일: attack.go 프로젝트: cwinters/vegeta
// Attack reads its Targets from the passed Targeter and attacks them at
// the rate specified for duration time. Results are put into the returned channel
// as soon as they arrive.
func (a *Attacker) Attack(tr Targeter, rate uint64, du time.Duration) chan *Result {
	resc := make(chan *Result)
	throttle := time.NewTicker(time.Duration(1e9 / rate))
	hits := rate * uint64(du.Seconds())
	wrk := a.workers
	if wrk == 0 || wrk > hits {
		wrk = hits
	}
	share := hits / wrk

	var wg sync.WaitGroup
	for i := uint64(0); i < wrk; i++ {
		wg.Add(1)
		go func(share uint64) {
			defer wg.Done()
			for j := uint64(0); j < share; j++ {
				select {
				case tm := <-throttle.C:
					resc <- a.hit(tr, tm)
				case <-a.stop:
					return
				}
			}
		}(share)
	}

	go func() {
		wg.Wait()
		close(resc)
		throttle.Stop()
	}()

	return resc
}
예제 #5
0
파일: filter.go 프로젝트: vjeko/godrone
func (f Filter) Update(placement *Placement, sensors Sensors, dt time.Duration) {
	if f.AccGain+f.GyroGain != 1 {
		panic("Gains must add up to 1")
	}
	var (
		dts    = dt.Seconds()
		accDeg = PRY{
			Pitch: degAngle(sensors.Acc.Roll, sensors.Acc.Yaw),
			Roll:  degAngle(sensors.Acc.Pitch, sensors.Acc.Yaw),
		}
		gyroDeg = PRY{
			Pitch: placement.Pitch + (sensors.Gyro.Pitch * dts),
			Roll:  placement.Roll + (sensors.Gyro.Roll * dts),
			Yaw:   placement.Yaw + (sensors.Gyro.Yaw * dts),
		}
	)
	// Implements a simple complementation filter.
	// see http://www.pieter-jan.com/node/11
	placement.Pitch = gyroDeg.Pitch*f.GyroGain + accDeg.Pitch*f.AccGain
	placement.Roll = gyroDeg.Roll*f.GyroGain + accDeg.Roll*f.AccGain
	// @TODO Integrate gyro yaw with magotometer yaw
	placement.Yaw = gyroDeg.Yaw
	// The sonar sometimes reads very high values when on the ground. Ignoring
	// the sonar above a certain altitude solves the problem.
	// @TODO Use barometer above SonarMax
	if sensors.Sonar < f.SonarMax {
		placement.Altitude += (sensors.Sonar - placement.Altitude) * f.SonarGain
	}
}
예제 #6
0
func (p *PushProcessor) retryRequest(req *Request, retryAfter time.Duration, subscriber string, psp *PushServiceProvider, dp *DeliveryPoint) {
	if req.nrRetries >= p.max_nr_retry {
		return
	}
	newreq := new(Request)
	newreq.nrRetries = req.nrRetries + 1
	newreq.PreviousTry = req
	newreq.ID = req.ID
	newreq.Action = ACTION_PUSH
	newreq.PushServiceProvider = psp
	newreq.DeliveryPoint = dp
	newreq.RequestSenderAddr = req.RequestSenderAddr
	newreq.Notification = req.Notification

	newreq.Service = req.Service
	newreq.Subscribers = make([]string, 1)
	newreq.Subscribers[0] = subscriber
	newreq.PunchTimestamp()

	if req.nrRetries == 0 || req.backoffTime == 0 {
		newreq.backoffTime = init_backoff_time
	} else {
		newreq.backoffTime = req.backoffTime << 1
	}

	waitTime := newreq.backoffTime
	if retryAfter > 0*time.Second {
		waitTime = int64(retryAfter.Seconds())
	}

	duration := time.Duration(time.Duration(waitTime) * time.Second)
	time.Sleep(duration)
	p.backendch <- newreq
}
// fetchExcessSharedDuration returns a slice of duration times (as seconds)
// given a distro and a map of DistroScheduleData, it traverses the map looking
// for alternate distros that are unable to meet maxDurationPerHost (turnaround)
// specification for shared tasks
func fetchExcessSharedDuration(distroScheduleData map[string]DistroScheduleData,
	distro string,
	maxDurationPerHost time.Duration) (sharedTasksDurationTimes []float64) {

	distroData := distroScheduleData[distro]

	// if we have more tasks to run than we have existing hosts and at least one
	// other alternate distro can not run all its shared scheduled and running tasks
	// within the maxDurationPerHost period, we need some more hosts for this distro
	for sharedDistro, sharedDuration := range distroData.sharedTasksDuration {
		if distro == sharedDistro {
			continue
		}

		alternateDistroScheduleData := distroScheduleData[sharedDistro]

		durationPerHost := alternateDistroScheduleData.totalTasksDuration /
			(float64(alternateDistroScheduleData.numExistingHosts) +
				float64(alternateDistroScheduleData.nominalNumNewHosts))

		// if no other alternate distro is able to meet the host requirements,
		// append its shared tasks duration time to the returned slice
		if durationPerHost > maxDurationPerHost.Seconds() {
			sharedTasksDurationTimes = append(sharedTasksDurationTimes,
				sharedDuration)
		}
	}
	return
}
예제 #8
0
파일: sandbox.go 프로젝트: fluter01/lotsawa
func runContainerTimed(name string,
	args []string,
	wd string,
	stdin io.Reader,
	stdout io.Writer,
	stderr io.Writer,
	timeout time.Duration) error {

	sec := fmt.Sprintf("%d", int(timeout.Seconds()))
	args = append([]string{"-k", "1", sec, name}, args...)
	err := runContainer("timeout",
		args,
		wd,
		stdin,
		stdout,
		stderr)

	start := time.Now()
	if err != nil {
		if ee, ok := err.(*exec.ExitError); ok {
			pst := ee.ProcessState
			if st, ok := pst.Sys().(syscall.WaitStatus); ok {
				if st.ExitStatus() == 124 {
					err = fmt.Errorf("program killed after %s",
						time.Now().Sub(start).String())
				}
			}
		}
		return err
	}

	return nil
}
예제 #9
0
파일: main.go 프로젝트: JimmyMa/loggregator
func StartHeartbeats(localIp string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) (stopChan chan (chan bool)) {
	if len(config.EtcdUrls) == 0 {
		return
	}

	if storeAdapter == nil {
		panic("store adapter is nil")
	}

	logger.Debugf("Starting Health Status Updates to Store: /healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index)
	status, stopChan, err := storeAdapter.MaintainNode(storeadapter.StoreNode{
		Key:   fmt.Sprintf("/healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index),
		Value: []byte(localIp),
		TTL:   uint64(ttl.Seconds()),
	})

	if err != nil {
		panic(err)
	}

	go func() {
		for stat := range status {
			logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now())
		}
	}()

	return stopChan
}
예제 #10
0
// Attack reads its Targets from the passed Targeter and attacks them at
// the rate specified for duration time. Results are put into the returned channel
// as soon as they arrive.
func (a *Attacker) Attack(tr Targeter, rate uint64, du time.Duration) <-chan *Result {
	workers := &sync.WaitGroup{}
	results := make(chan *Result)
	ticks := make(chan time.Time)
	for i := uint64(0); i < a.workers; i++ {
		go a.attack(tr, workers, ticks, results)
	}

	go func() {
		defer close(results)
		defer workers.Wait()
		defer close(ticks)
		interval := 1e9 / rate
		hits := rate * uint64(du.Seconds())
		for began, done := time.Now(), uint64(0); done < hits; done++ {
			now, next := time.Now(), began.Add(time.Duration(done*interval))
			time.Sleep(next.Sub(now))
			select {
			case ticks <- max(next, now):
			case <-a.stopch:
				return
			default: // all workers are blocked. start one more and try again
				go a.attack(tr, workers, ticks, results)
				done--
			}
		}
	}()

	return results
}
예제 #11
0
파일: main.go 프로젝트: orangemi/heka
func timerLoop(count, bytes *uint64, ticker *time.Ticker) {
	lastTime := time.Now().UTC()
	lastCount := *count
	lastBytes := *bytes
	zeroes := int8(0)
	var (
		msgsSent, newCount, bytesSent, newBytes uint64
		elapsedTime                             time.Duration
		now                                     time.Time
		msgRate, bitRate                        float64
	)
	for {
		_ = <-ticker.C
		newCount = *count
		newBytes = *bytes
		now = time.Now()
		msgsSent = newCount - lastCount
		lastCount = newCount
		bytesSent = newBytes - lastBytes
		lastBytes = newBytes
		elapsedTime = now.Sub(lastTime)
		lastTime = now
		msgRate = float64(msgsSent) / elapsedTime.Seconds()
		bitRate = float64(bytesSent*8.0) / 1e6 / elapsedTime.Seconds()
		if msgsSent == 0 {
			if newCount == 0 || zeroes == 3 {
				continue
			}
			zeroes++
		} else {
			zeroes = 0
		}
		client.LogInfo.Printf("Sent %d messages. %0.2f msg/sec %0.2f Mbit/sec\n", newCount, msgRate, bitRate)
	}
}
예제 #12
0
파일: publisher.go 프로젝트: gpxl/deis
// publishContainer publishes the docker container to etcd.
func (s *Server) publishContainer(container *docker.APIContainers, ttl time.Duration) {
	r := regexp.MustCompile(appNameRegex)
	for _, name := range container.Names {
		// HACK: remove slash from container name
		// see https://github.com/docker/docker/issues/7519
		containerName := name[1:]
		match := r.FindStringSubmatch(containerName)
		if match == nil {
			continue
		}
		appName := match[1]
		appPath := fmt.Sprintf("%s/%s", appName, containerName)
		keyPath := fmt.Sprintf("/deis/services/%s", appPath)
		for _, p := range container.Ports {
			// lowest port wins (docker sorts the ports)
			// TODO (bacongobbler): support multiple exposed ports
			port := strconv.Itoa(int(p.PublicPort))
			hostAndPort := s.host + ":" + port
			if s.IsPublishableApp(containerName) && s.IsPortOpen(hostAndPort) {
				s.setEtcd(keyPath, hostAndPort, uint64(ttl.Seconds()))
				safeMap.Lock()
				safeMap.data[container.ID] = appPath
				safeMap.Unlock()
			}
			break
		}
	}
}
예제 #13
0
파일: eng.go 프로젝트: skyview059/vu
// update polls user input, runs physics, calls application update, and
// finally refreshes all models resulting in updated transforms.
// The transform hierarchy is now ready to generate a render frame.
func (eng *engine) update(app App, dt time.Duration, ut uint64) {

	// Fetch input from the device thread. Essentially a sequential call.
	eng.machine <- eng.data // blocks until processed by the server.
	<-eng.data.reply        // blocks until processing is finished.
	input := eng.data.input // User input has been refreshed.
	state := eng.data.state // Engine state has been refreshed.
	dts := dt.Seconds()     // delta time as float.

	// Run physics on all the bodies; adjusting location and orientation.
	eng.bods = eng.bods[:0] // reset keeping capacity.
	for _, bod := range eng.solids {
		eng.bods = append(eng.bods, bod)
	}
	eng.physics.Step(eng.bods, dts)

	// Have the application adjust any or all state before rendering.
	input.Dt = dts                // how long to get back to here.
	input.Ut = ut                 // update ticks.
	app.Update(eng, input, state) // application to updates its own state.

	// update assets that the application changed or which need
	// per tick processing. Per-ticks include animated models,
	// particle effects, surfaces, phrases, ...
	if eng.alive {
		eng.updateModels(dts)                // load and bind updated data.
		eng.placeModels(eng.root(), lin.M4I) // update all transforms.
		eng.updateSoundListener()            // reposition sound listener.
	}
}
예제 #14
0
func (db *SQLDB) UpdateExpiresAtOnContainer(handle string, ttl time.Duration) error {
	tx, err := db.conn.Begin()
	if err != nil {
		return err
	}

	defer tx.Rollback()

	interval := fmt.Sprintf("%d second", int(ttl.Seconds()))

	_, err = tx.Exec(`
		UPDATE containers SET expires_at = NOW() + $2::INTERVAL, ttl = $3
		WHERE handle = $1
		`,
		handle,
		interval,
		ttl,
	)

	if err != nil {
		return err
	}

	return tx.Commit()
}
예제 #15
0
파일: ping.go 프로젝트: eminence/go-ipfs
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
	outChan := make(chan interface{})
	go func() {
		defer close(outChan)

		if len(n.Peerstore.Addrs(pid)) == 0 {
			// Make sure we can find the node in question
			outChan <- &PingResult{
				Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
			}

			ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
			defer cancel()
			p, err := n.Routing.FindPeer(ctx, pid)
			if err != nil {
				outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
				return
			}
			n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
		}

		outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}

		ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
		defer cancel()
		pings, err := n.Ping.Ping(ctx, pid)
		if err != nil {
			log.Debugf("Ping error: %s", err)
			outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
			return
		}

		var done bool
		var total time.Duration
		for i := 0; i < numPings && !done; i++ {
			select {
			case <-ctx.Done():
				done = true
				break
			case t, ok := <-pings:
				if !ok {
					done = true
					break
				}

				outChan <- &PingResult{
					Success: true,
					Time:    t,
				}
				total += t
				time.Sleep(time.Second)
			}
		}
		averagems := total.Seconds() * 1000 / float64(numPings)
		outChan <- &PingResult{
			Text: fmt.Sprintf("Average latency: %.2fms", averagems),
		}
	}()
	return outChan
}
예제 #16
0
// getOneTimeResourceUsageOnNode queries the node's /stats/container endpoint
// and returns the resource usage of targetContainers for the past
// cpuInterval.
// The acceptable range of the interval is 2s~120s. Be warned that as the
// interval (and #containers) increases, the size of kubelet's response
// could be sigificant. E.g., the 60s interval stats for ~20 containers is
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
//
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
// stats points to compute the cpu usage over the interval. Assuming cadvisor
// polls every second, we'd need to get N stats points for N-second interval.
// Note that this is an approximation and may not be accurate, hence we also
// write the actual interval used for calcuation (based on the timestampes of
// the stats points in containerResourceUsage.CPUInterval.
func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterval time.Duration) (map[string]*containerResourceUsage, error) {
	numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
	if numStats < 2 || numStats > maxNumStatsToRequest {
		return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest)
	}
	// Get information of all containers on the node.
	containerInfos, err := getContainerInfo(c, nodeName, &kubelet.StatsRequest{
		ContainerName: "/",
		NumStats:      numStats,
		Subcontainers: true,
	})
	if err != nil {
		return nil, err
	}
	// Process container infos that are relevant to us.
	usageMap := make(map[string]*containerResourceUsage, len(targetContainers))
	for _, name := range targetContainers {
		info, ok := containerInfos[name]
		if !ok {
			return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName)
		}
		first := info.Stats[0]
		last := info.Stats[len(info.Stats)-1]
		usageMap[name] = computeContainerResourceUsage(name, first, last)
	}
	return usageMap, nil
}
예제 #17
0
// machineName: A unique identifier to identify the host that current cAdvisor
// instance is running on.
// influxdbHost: The host which runs influxdb.
// percentilesDuration: Time window which will be considered when calls Percentiles()
func New(machineName,
	tablename,
	database,
	username,
	password,
	influxdbHost string,
	isSecure bool,
	percentilesDuration time.Duration,
) (storage.StorageDriver, error) {
	config := &influxdb.ClientConfig{
		Host:     influxdbHost,
		Username: username,
		Password: password,
		Database: database,
		IsSecure: isSecure,
	}
	client, err := influxdb.NewClient(config)
	if err != nil {
		return nil, err
	}
	// TODO(monnand): With go 1.3, we cannot compress data now.
	client.DisableCompression()
	if percentilesDuration.Seconds() < 1.0 {
		percentilesDuration = 5 * time.Minute
	}

	ret := &influxdbStorage{
		client:      client,
		windowLen:   percentilesDuration,
		machineName: machineName,
		tableName:   tablename,
	}
	return ret, nil
}
예제 #18
0
func (this *ProgressBarSimple) renderFixedSizeDuration(dur time.Duration) string {
	h := dur.Hours()
	m := dur.Minutes()
	if h > pbYear*10 {
		y := int(h / pbYear)
		h -= float64(y) * pbYear
		w := h / pbWeek
		return fmt.Sprintf("%02dy%02dw", y, int(w))
	} else if h > pbWeek*10 {
		return fmt.Sprintf("%05dw", int(h/pbWeek))
	} else if h > pbDay*2 {
		d := int(h / pbDay)
		h -= pbDay * float64(d)
		return fmt.Sprintf("%02dd%02dh", d, int(h))
	} else if h > 1 {
		o := int(h)
		i := m - float64(o)*60
		return fmt.Sprintf("%02dh%02dm", o, int(i))
	} else if dur.Seconds() < 0 {
		return "00m00s"
	} else {
		i := int(m)
		s := dur.Seconds() - float64(i)*60
		return fmt.Sprintf("%02dm%02ds", i, int(s))
	}
}
// computeDurationBasedNumNewHosts returns the number of new hosts needed based
// on a heuristic that utilizes the total duration of currently running and
// scheduled tasks - and based on a maximum duration of a task per distro host -
// a turnaround cap on all outstanding and running tasks in the system
func computeDurationBasedNumNewHosts(scheduledTasksDuration,
	runningTasksDuration, numExistingDistroHosts float64,
	maxDurationPerHost time.Duration) (numNewHosts int) {

	// total duration of scheduled and currently running tasks
	totalDistroTasksDuration := scheduledTasksDuration +
		runningTasksDuration

	// number of hosts needed to meet the duration based turnaround requirement
	numHostsForTurnaroundRequirement := totalDistroTasksDuration /
		maxDurationPerHost.Seconds()

	// floating point precision number of new hosts needed
	durationBasedNumNewHostsNeeded := numHostsForTurnaroundRequirement -
		numExistingDistroHosts

	// duration based number of new hosts needed
	numNewHosts = int(math.Ceil(durationBasedNumNewHostsNeeded))

	// return 0 if numNewHosts is less than 0
	if numNewHosts < 0 {
		numNewHosts = 0
	}
	return
}
예제 #20
0
func RenderFixedSizeDuration(dur time.Duration) string {
	h := dur.Hours()
	m := dur.Minutes()
	s := dur.Seconds()
	if h > pbYear*10 {
		y := int(h / pbYear)
		h -= float64(y) * pbYear
		w := h / pbWeek
		return fmt.Sprintf("%02dy%02dw", y, int(w))
	} else if h > pbWeek*10 {
		return fmt.Sprintf("%05dw", int(h/pbWeek))
	} else if h > pbDay*2 {
		d := int(h / pbDay)
		h -= pbDay * float64(d)
		return fmt.Sprintf("%02dd%02dh", d, int(h))
	} else if h > 1 {
		o := int(h)
		i := m - float64(o)*60
		return fmt.Sprintf("%02dh%02dm", o, int(i))
	} else if s > 99 {
		i := int(m)
		o := s - float64(i)*60
		return fmt.Sprintf("%02dm%02ds", i, int(o))
	} else if s < 1 {
		return "00m00s"
	} else {
		ms := (s - float64(int(s))) * 1000
		return fmt.Sprintf("%02ds%03d", int(s), int(ms))
	}
}
예제 #21
0
// RunTest runs test on the node.
func (n *NodeE2ERemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs string, timeout time.Duration) (string, error) {
	// Install the cni plugin.
	if err := installCNI(host, workspace); err != nil {
		return "", err
	}

	// Configure iptables firewall rules
	if err := configureFirewall(host); err != nil {
		return "", err
	}

	// Kill any running node processes
	cleanupNodeProcesses(host)

	testArgs, err := updateGCIMounterPath(testArgs, host, workspace)
	if err != nil {
		return "", err
	}

	// Run the tests
	glog.V(2).Infof("Starting tests on %q", host)
	cmd := getSSHCommand(" && ",
		fmt.Sprintf("cd %s", workspace),
		fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s %s",
			timeout.Seconds(), ginkgoArgs, host, results, junitFilePrefix, testArgs),
	)
	return SSH(host, "sh", "-c", cmd)
}
예제 #22
0
파일: statistics.go 프로젝트: KLab/gohakai
func (s *Statistics) printHumanReadable() {
	delta := s.Delta
	nreq := SUCCESS + FAIL
	rps := float64(nreq) / float64(delta.Seconds())

	fmt.Printf("\nrequest count:%d, concurrency:%d, time:%.5f[s], %f[req/s]\n",
		nreq, s.MaxRequest, delta.Seconds(), rps)
	fmt.Printf("SUCCESS %d\n", SUCCESS)
	fmt.Printf("FAILED %d\n", FAIL)

	var avgTimeByPath map[string]float64 = map[string]float64{}
	var totalCount uint32
	var totalTime time.Duration
	for path, cnt := range PathCount {
		totalTime += PathTime[path]
		totalCount += cnt
		avgTimeByPath[path] += PathTime[path].Seconds() / float64(cnt)
	}
	fmt.Printf("Average response time[ms]: %v\n",
		1000.*totalTime.Seconds()/float64(totalCount))

	if s.Config.ShowReport {
		var stats AvarageTimeStats = []AvarageTimeByPath{}

		fmt.Printf("Average response time for each path (order by longest) [ms]:\n")
		for path, time := range avgTimeByPath {
			stats = append(stats, AvarageTimeByPath{Path: path, Time: time})
		}
		sort.Sort(sort.Reverse(stats))
		for i := 0; i < len(stats); i++ {
			fmt.Printf("%.3f : %s\n", stats[i].Time*1000., stats[i].Path)
		}
	}
}
예제 #23
0
파일: docker.go 프로젝트: yorito/dockertest
// WaitPort waits until port available.
func (c *Container) WaitPort(port int, timeout time.Duration) int {
	// wait until port available
	p := c.ports[port]
	if p == 0 {
		log.Fatalf("port %d is not exposed on %s", port, c.image)
	}

	nw := c.networks[port]
	if nw == "" {
		log.Fatalf("network not described on %s", c.image)
	}

	end := time.Now().Add(timeout)
	for {
		now := time.Now()
		_, err := net.DialTimeout(nw, c.Addr(port), end.Sub(now))
		if err != nil {
			if time.Now().After(end) {
				log.Fatalf("port %d not available on %s for %f seconds", port, c.image, timeout.Seconds())
			}
			time.Sleep(time.Second)
			continue
		}
		break
	}
	return p
}
예제 #24
0
// convertIPPDateToTime converts an RFC 2579 date to a time.Time object.
func convertIPPDateToTime(date *C.ipp_uchar_t) time.Time {
	r := bytes.NewReader(C.GoBytes(unsafe.Pointer(date), 11))
	var year uint16
	var month, day, hour, min, sec, dsec uint8
	binary.Read(r, binary.BigEndian, &year)
	binary.Read(r, binary.BigEndian, &month)
	binary.Read(r, binary.BigEndian, &day)
	binary.Read(r, binary.BigEndian, &hour)
	binary.Read(r, binary.BigEndian, &min)
	binary.Read(r, binary.BigEndian, &sec)
	binary.Read(r, binary.BigEndian, &dsec)

	var utcDirection, utcHour, utcMin uint8
	binary.Read(r, binary.BigEndian, &utcDirection)
	binary.Read(r, binary.BigEndian, &utcHour)
	binary.Read(r, binary.BigEndian, &utcMin)

	var utcOffset time.Duration
	utcOffset += time.Duration(utcHour) * time.Hour
	utcOffset += time.Duration(utcMin) * time.Minute
	var loc *time.Location
	if utcDirection == '-' {
		loc = time.FixedZone("", -int(utcOffset.Seconds()))
	} else {
		loc = time.FixedZone("", int(utcOffset.Seconds()))
	}

	nsec := int(dsec) * 100 * int(time.Millisecond)

	return time.Date(int(year), time.Month(month), int(day), int(hour), int(min), int(sec), nsec, loc)
}
예제 #25
0
// Incr increments the specified key. If the key did not exist, it sets it to 1
// and sets it to expire after the number of seconds specified by window.
//
// It returns the new count value and the number of remaining seconds, or an error
// if the operation fails.
func (r *redisStore) Incr(key string, window time.Duration) (int, int, error) {
	conn := r.pool.Get()
	defer conn.Close()
	if err := selectDB(r.db, conn); err != nil {
		return 0, 0, err
	}
	// Atomically increment and read the TTL.
	conn.Send("MULTI")
	conn.Send("INCR", r.prefix+key)
	conn.Send("TTL", r.prefix+key)
	vals, err := redis.Values(conn.Do("EXEC"))
	if err != nil {
		conn.Do("DISCARD")
		return 0, 0, err
	}
	var cnt, ttl int
	if _, err = redis.Scan(vals, &cnt, &ttl); err != nil {
		return 0, 0, err
	}
	// If there was no TTL set, then this is a newly created key (INCR creates the key
	// if it didn't exist), so set it to expire.
	if ttl == -1 {
		ttl = int(window.Seconds())
		_, err = conn.Do("EXPIRE", r.prefix+key, ttl)
		if err != nil {
			return 0, 0, err
		}
	}
	return cnt, ttl, nil
}
예제 #26
0
파일: client.go 프로젝트: mpdroog/smtpw
/*
The "put" command is for any process that wants to insert a job into the queue.
It comprises a command line followed by the job body:

put <pri> <delay> <ttr> <bytes>\r\n
<data>\r\n

It inserts a job into the client's currently used tube (see the "use" command
below).

 - <pri> is an integer < 2**32. Jobs with smaller priority values will be
   scheduled before jobs with larger priorities. The most urgent priority is 0;
   the least urgent priority is 4,294,967,295.

 - <delay> is an integer number of seconds to wait before putting the job in
   the ready queue. The job will be in the "delayed" state during this time.

 - <ttr> -- time to run -- is an integer number of seconds to allow a worker
   to run this job. This time is counted from the moment a worker reserves
   this job. If the worker does not delete, release, or bury the job within
   <ttr> seconds, the job will time out and the server will release the job.
   The minimum ttr is 1. If the client sends 0, the server will silently
   increase the ttr to 1.

 - <bytes> is an integer indicating the size of the job body, not including the
   trailing "\r\n". This value must be less than max-job-size (default: 2**16).

 - <data> is the job body -- a sequence of bytes of length <bytes> from the
   previous line.

After sending the command line and body, the client waits for a reply, which
may be:

 - "INSERTED <id>\r\n" to indicate success.

   - <id> is the integer id of the new job

 - "BURIED <id>\r\n" if the server ran out of memory trying to grow the
   priority queue data structure.

   - <id> is the integer id of the new job

 - "EXPECTED_CRLF\r\n" The job body must be followed by a CR-LF pair, that is,
   "\r\n". These two bytes are not counted in the job size given by the client
   in the put command line.

 - "JOB_TOO_BIG\r\n" The client has requested to put a job with a body larger
   than max-job-size bytes.

 - "DRAINING\r\n" This means that the server has been put into "drain mode"
   and is no longer accepting new jobs. The client should try another server
   or disconnect and try again later.
*/
func (this *BeanstalkdClient) Put(priority uint32, delay, ttr time.Duration, data []byte) (id uint64, err error) {
	cmd := fmt.Sprintf("put %d %d %d %d\r\n", priority, uint64(delay.Seconds()), uint64(ttr.Seconds()), len(data))
	cmd = cmd + string(data) + string(crnl)

	_, reply, err := this.sendReply(cmd)

	if err != nil {
		return 0, err
	}

	switch {
	case strings.Index(reply, "INSERTED") == 0:
		var id uint64
		_, perr := fmt.Sscanf(reply, "INSERTED %d\r\n", &id)
		return id, perr
	case strings.Index(reply, "BURIED") == 0:
		var id uint64
		_, perr := fmt.Sscanf(reply, "BURIED %d\r\n", &id)
		return id, perr
	case reply == "EXPECTED_CRLF\r\n":
		return 0, errExpectedCrlf
	case reply == "JOB_TOO_BIG\r\n":
		return 0, errJobTooBig
	case reply == "DRAINING\r\n":
		return 0, errDraining
	default:
		return 0, this.parseError(reply)
	}

}
예제 #27
0
// 返回秒
func (clock *Clock) End_tick() float64 {
	now_tick := time.Now()
	var dur_time time.Duration = now_tick.Sub(clock.m_tick)

	var elapsed_sec float64 = dur_time.Seconds()
	return elapsed_sec
}
예제 #28
0
파일: stats.go 프로젝트: JalfResi/gostalk
func (server *server) statistics() serverStats {
	stats := *server.stats
	stats.Uptime = time.Since(server.startedAt).Seconds()
	stats.CurrentTubes = len(server.tubes)
	stats.TotalJobs = len(server.jobs)

	for _, tube := range server.tubes {
		stats.CurrentJobsBuried += tube.buried.Len()
		stats.CurrentJobsDelayed += tube.delayed.Len()
		stats.CurrentJobsReady += tube.ready.Len()
		stats.CurrentJobsReserved += tube.reserved.Len()
	}

	var duration time.Duration
	usage := new(syscall.Rusage)
	err := syscall.Getrusage(syscall.RUSAGE_SELF, usage)
	if err == nil {
		s, ns := usage.Utime.Unix()
		duration, err = time.ParseDuration(fmt.Sprintf("%d.%ds", s, ns))
		stats.RusageStime = duration.Seconds()

		s, ns = usage.Stime.Unix()
		duration, err = time.ParseDuration(fmt.Sprintf("%d.%ds", s, ns))
		stats.RusageUtime = duration.Seconds()
	} else {
		pf("failed to get rusage : %v", err)
	}

	return stats
}
예제 #29
0
func (i *RateInterval) GetCost(duration, startSecond time.Duration) float64 {
	price, _, rateUnit := i.
		GetRateParameters(startSecond)
	price /= rateUnit.Seconds()
	d := duration.Seconds()
	return d * price
}
예제 #30
0
파일: eng.go 프로젝트: toophy/vu
// runEngine calls Create once and Update continuously and regularly.
// The application loop generates device polling and render requests
// for the machine.
func runEngine(app App, wx, wy, ww, wh int, machine chan msg, stop chan bool) {
	defer catchErrors()
	eng := newEngine(machine)
	eng.stop = stop
	eng.data.state.setScreen(wx, wy, ww, wh)
	app.Create(eng, eng.data.state)
	ut := uint64(0)         // kick off initial update...
	eng.update(app, dt, ut) // first update queues the load asset requests.

	// Initialize timers and kick off the main control loop.
	var loopStart time.Time = time.Now()
	var updateStart time.Time
	var timeUsed time.Duration
	var updateTimer time.Duration // Track when to trigger an update.
	var renderTimer time.Duration // Track when to trigger a render.
	var frame []render.Draw       // New render frame, nil if no updated frame.
	for eng.alive {
		timeUsed = time.Since(loopStart) // Count previous loop.
		eng.times.Elapsed += timeUsed    // Track total time.
		if timeUsed > capTime {          // Avoid slow update death.
			timeUsed = capTime
		}
		loopStart = time.Now()

		// Trigger update based on current elapsed time.
		// This advances state at a constant rate (dt).
		updateTimer += timeUsed
		for updateTimer >= dt {
			updateStart = time.Now() // Time the update.
			ut += 1                  // Track the total update ticks.
			updateTimer -= dt        // Remove delta time used.

			// Perform the update.
			eng.update(app, dt, ut)   // Update state, physics, etc.
			frame = eng.frames[ut%3]  // Cycle between three render frames.
			frame = eng.render(frame) // ... update the render frame.
			eng.frames[ut%3] = frame  // ... remember the updated frame.

			// Reset and start counting times for the next update.
			eng.times.Zero()
			eng.times.Update += time.Since(updateStart)
		}

		// Interpolation is the fraction of unused delta time between 0 and 1.
		// ie: State state = currentState*interpolation + previousState * (1.0 - interpolation);
		interpolation := updateTimer.Seconds() / dt.Seconds()

		// A render frame request is sent to the machine. Redraw everything, using
		// interpolation when there is no new frame. Ignore excess render time.
		renderTimer += timeUsed
		if renderTimer >= rt {
			eng.times.Renders += 1
			eng.machine <- &renderFrame{frame: frame, interp: interpolation, ut: ut}
			frame = nil                    // mark frame as rendered.
			renderTimer = renderTimer % rt // drop extra render time.
		}
		eng.communicate() // process go-routine messages.
	}
	// Exiting state update.
}