func (p *Process) Stop() { p.lock.Lock() p.stopped = true if proc := p.process; proc != nil { var timer *time.Timer p.shutdown() if p.stopTime > 0 { timer = time.AfterFunc(p.stopTime, func() { p.logger.Printf("Graceful shutdown timed out") p.lock.Lock() p.kill() p.lock.Unlock() }) } p.lock.Unlock() p.waiter.Wait() p.lock.Lock() if timer != nil { timer.Stop() } } p.process = nil p.lock.Unlock() }
// Reset a timer - Doesn't work properly < go 1.1 // // This is quite hard to do properly under go < 1.1 so we do a crude // approximation and hope that everyone upgrades to go 1.1 quickly func resetTimer(t *time.Timer, d time.Duration) { t.Stop() // Very likely this doesn't actually work if we are already // selecting on t.C. However we've stopped the original timer // so won't break transfers but may not time them out :-( *t = *time.NewTimer(d) }
// Dequeue is used to perform a blocking dequeue func (b *EvalBroker) Dequeue(schedulers []string, timeout time.Duration) (*structs.Evaluation, string, error) { var timeoutTimer *time.Timer var timeoutCh <-chan time.Time SCAN: // Scan for work eval, token, err := b.scanForSchedulers(schedulers) if err != nil { if timeoutTimer != nil { timeoutTimer.Stop() } return nil, "", err } // Check if we have something if eval != nil { if timeoutTimer != nil { timeoutTimer.Stop() } return eval, token, nil } // Setup the timeout channel the first time around if timeoutTimer == nil && timeout != 0 { timeoutTimer = time.NewTimer(timeout) timeoutCh = timeoutTimer.C } // Block until we get work scan := b.waitForSchedulers(schedulers, timeoutCh) if scan { goto SCAN } return nil, "", nil }
// watchSerial monitors for a change in a specific serial number. It returns // the new serial number when it changes. If the serial number has not // changed in the given duration then the old value is returned. A poll // can be done by supplying 0 for the expiration. func (m *Manager) watchSerial(old int64, src *int64, expire time.Duration) int64 { expired := false cv := sync.NewCond(&m.mx) var timer *time.Timer var rv int64 // Schedule timeout if expire > 0 { timer = time.AfterFunc(expire, func() { m.lock() expired = true cv.Broadcast() m.unlock() }) } else { expired = true } m.lock() m.cvs[cv] = true for { rv = *src if rv != old || expired { break } cv.Wait() } delete(m.cvs, cv) m.unlock() if timer != nil { timer.Stop() } return rv }
func (h *FSMHandler) established() bgp.FSMState { fsm := h.fsm h.conn = fsm.conn h.t.Go(h.sendMessageloop) h.msgCh = h.incoming h.t.Go(h.recvMessageloop) var holdTimer *time.Timer if fsm.negotiatedHoldTime == 0 { holdTimer = &time.Timer{} } else { holdTimer = time.NewTimer(time.Second * time.Duration(fsm.negotiatedHoldTime)) } for { select { case <-h.t.Dying(): return 0 case conn, ok := <-fsm.connCh: if !ok { break } conn.Close() log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.peerConfig.NeighborAddress, }).Warn("Closed an accepted connection") case <-h.errorCh: h.conn.Close() h.t.Kill(nil) h.reason = "Peer closed the session" return bgp.BGP_FSM_IDLE case <-holdTimer.C: log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.peerConfig.NeighborAddress, "data": bgp.BGP_FSM_ESTABLISHED, }).Warn("hold timer expired") m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil) h.outgoing <- m h.reason = "HoldTimer expired" return bgp.BGP_FSM_IDLE case <-h.holdTimerResetCh: if fsm.negotiatedHoldTime != 0 { holdTimer.Reset(time.Second * time.Duration(fsm.negotiatedHoldTime)) } case s := <-fsm.adminStateCh: err := h.changeAdminState(s) if err == nil { switch s { case ADMIN_STATE_DOWN: m := bgp.NewBGPNotificationMessage( bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, nil) h.outgoing <- m } } } } return 0 }
// execute some action in the context of the current process. Actions // executed via this func are to be executed in a concurrency-safe manner: // no two actions should execute at the same time. invocations of this func // should not block for very long, unless the action backlog is full or the // process is terminating. // returns errProcessTerminated if the process already ended. func (self *procImpl) doLater(deferredAction Action) (err <-chan error) { a := Action(func() { self.wg.Add(1) defer self.wg.Done() deferredAction() }) scheduled := false self.writeLock.Lock() defer self.writeLock.Unlock() var timer *time.Timer for err == nil && !scheduled { switch s := self.state.get(); s { case stateRunning: select { case self.backlog <- a: scheduled = true default: if timer == nil { timer = time.AfterFunc(self.maxRescheduleWait, self.changed.Broadcast) } else { timer.Reset(self.maxRescheduleWait) } self.changed.Wait() timer.Stop() } case stateTerminal: err = ErrorChan(errProcessTerminated) default: err = ErrorChan(errIllegalState) } } return }
func process_action(ac chan action, sv *server) { var a action var timer *time.Timer ch := make(chan bool, 2) defer func() { if timer != nil { timer.Stop() } sv.lock.Lock() delete(sv.kt, a.key) close(ac) sv.lock.Unlock() close(ch) }() for { select { case a = <-ac: if timer != nil { timer.Stop() } timer = time.AfterFunc(a.exptime, func() { sv.s.db.Delete([]byte(a.key), sv.s.wo) ch <- true }) case <-ch: relog.Info("delete successed") return } } }
func internalPubAsync(clientTimer *time.Timer, msgBody *bytes.Buffer, topic *nsqd.Topic) error { if topic.Exiting() { return nsqd.ErrExiting } info := &nsqd.PubInfo{ Done: make(chan struct{}), MsgBody: msgBody, StartPub: time.Now(), } if clientTimer == nil { clientTimer = time.NewTimer(time.Second * 5) } else { clientTimer.Reset(time.Second * 5) } select { case topic.GetWaitChan() <- info: default: select { case topic.GetWaitChan() <- info: case <-topic.QuitChan(): nsqd.NsqLogger().Infof("topic %v put messages failed at exiting", topic.GetFullName()) return nsqd.ErrExiting case <-clientTimer.C: nsqd.NsqLogger().Infof("topic %v put messages timeout ", topic.GetFullName()) return ErrPubToWaitTimeout } } <-info.Done return info.Err }
// fillBatch coalesces individual log lines into batches. Delivery of the // batch happens on timeout after at least one message is received // or when the batch is full. func (batcher *Batcher) fillBatch(batch *Batch) (chanOpen bool) { timeout := new(time.Timer) // Gives us a nil channel and no timeout to start with chanOpen = true // Assume the channel is open for { select { case <-timeout.C: return !chanOpen case line, chanOpen := <-batcher.inLogs: if !chanOpen { return !chanOpen } // We have a line now, so set a timeout if timeout.C == nil { defer func(t time.Time) { batcher.stats <- NewNamedValue("batch.fill.time", time.Since(t).Seconds()) }(time.Now()) timeout = time.NewTimer(batcher.timeout) defer timeout.Stop() // ensure timer is stopped when done } batch.Write(line) if batch.Full() { return !chanOpen } } } }
func (r repository) clone(schema string) error { cmd := exec.Command( "git", "clone", "--depth=1", "-b", r.params.version, r.cloneURL(schema), ) cmd.Dir = r.dir var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Start() if err != nil { r.logger.WithFields(log.Fields{"command": cmd.Path, "args": cmd.Args, "stdout": stdout.String(), "stderr": stderr.String()}).Warnf("git clone error: %v", err) return err } // kill process if token is invalid (wait password) var timer *time.Timer timer = time.AfterFunc(30*time.Second, func() { cmd.Process.Kill() }) err = cmd.Wait() if err != nil { r.logger.WithFields(log.Fields{"command": cmd.Path, "args": cmd.Args, "stdout": stdout.String(), "stderr": stderr.String()}).Warnf("git clone error: %v", err) return err } timer.Stop() r.logger.WithFields(log.Fields{"command": cmd.Path, "args": cmd.Args, "stdout": stdout.String(), "stderr": stderr.String()}).Info("git clone successfully") return err }
func main() { broker := NewBroker() port, err := strconv.Atoi(os.Args[1]) if err != nil { fmt.Println(err.Error()) os.Exit(1) } if len(os.Args) > 2 { registryURL := os.Args[2] myURL := os.Args[3] err := announceBroker(registryURL, myURL) if err != nil { fmt.Println(err.Error()) os.Exit(1) } var timer *time.Timer timer = time.AfterFunc(RegistryAnnouncePeriod, func() { err := announceBroker(registryURL, myURL) if err != nil { fmt.Println("Error reannouncing to broker!") } timer.Reset(RegistryAnnouncePeriod) }) } http.Handle("/", broker) http.ListenAndServe(fmt.Sprintf(":%d", port), nil) }
func (r repository) listRemote() error { cmd := exec.Command( "git", "ls-remote", r.cloneURL("https"), "HEAD", ) cmd.Dir = r.dir err := cmd.Start() if err != nil { return err } // kill process if token is invalid (wait password) var timer *time.Timer timer = time.AfterFunc(5*time.Second, func() { cmd.Process.Kill() }) err = cmd.Wait() if err != nil { return err } timer.Stop() return err }
// Read is used to read from the stream func (s *Stream) Read(b []byte) (n int, err error) { var bufsiz int defer asyncNotify(s.recvNotifyCh) START: s.stateLock.Lock() switch s.state { case streamLocalClose: fallthrough case streamRemoteClose: fallthrough case streamClosed: s.recvLock.Lock() if s.recvBuf == nil || s.recvBuf.Len() == 0 { s.recvLock.Unlock() s.stateLock.Unlock() return 0, io.EOF } s.recvLock.Unlock() case streamReset: s.stateLock.Unlock() return 0, ErrConnectionReset } s.stateLock.Unlock() // If there is no data available, block s.recvLock.Lock() if s.recvBuf == nil || s.recvBuf.Len() == 0 { s.recvLock.Unlock() goto WAIT } // Read any bytes n, _ = s.recvBuf.Read(b) bufsiz = s.recvBuf.Len() s.recvLock.Unlock() // Send a window update potentially if uint32(bufsiz)+atomic.LoadUint32(&s.recvWindow) < s.session.config.MaxStreamWindowSize { err = s.sendWindowUpdate() } return n, err WAIT: var timeout <-chan time.Time var timer *time.Timer if !s.readDeadline.IsZero() { delay := s.readDeadline.Sub(time.Now()) timer = time.NewTimer(delay) timeout = timer.C } select { case <-s.recvNotifyCh: if timer != nil { timer.Stop() } goto START case <-timeout: return 0, ErrTimeout } }
func process(cmd *Command) { var timer *time.Timer var err error log.Print(cmd.Command) sp := exec.Command("sh", "-c", cmd.Command) sp.Stdout = os.Stdout sp.Stderr = os.Stderr if err = sp.Start(); err != nil { log.Printf("%s failed: %s", err.Error(), cmd.Command) return } if cmd.Timeout > 0 { timer = time.AfterFunc(time.Duration(cmd.Timeout)*time.Second, func() { timer = nil if sp.ProcessState == nil { sp.Process.Kill() } }) } err = sp.Wait() if timer != nil { timer.Stop() } if err != nil { log.Printf("%s failed: %s", err.Error(), cmd.Command) } }
func (s *Scheme) runReader(reader factory.Reader, timer *time.Timer) { if s.warmupMessagesPerRun > 0 { logs.Logger.Info("Reading %d warmup messages", s.warmupMessagesPerRun) } var startTime, lastMessageTime time.Time buffer := make([]byte, s.bytesPerMessage*2) for i := 0; i < s.warmupMessagesPerRun; i++ { reader.Read(buffer) } logs.Logger.Info("Starting reading %d messages...", s.messagesPerRun) for { count, err := reader.Read(buffer) if err == io.EOF { break } lastMessageTime = time.Now() if startTime.IsZero() { startTime = lastMessageTime } timer.Reset(s.waitForLastMessage) s.countMessage(count, err) } logs.Logger.Info("Finished.") s.runTime = lastMessageTime.Sub(startTime) }
func (log *Log) Watch(last int64, expire time.Duration) int64 { expired := false var timer *time.Timer cv := sync.NewCond(&log.mx) if expire > 0 { timer = time.AfterFunc(expire, func() { log.lock() expired = true cv.Broadcast() log.unlock() }) } else { expired = true } log.lock() log.cvs[cv] = true for { if log.id != last || expired { break } cv.Wait() } delete(log.cvs, cv) if log.id != last { last = log.id } log.unlock() if timer != nil { timer.Stop() } return last }
// fillBatch coalesces individual log lines into batches. Delivery of the // batch happens on timeout after at least one message is received // or when the batch is full. // returns the channel status, completed batch func (batcher Batcher) fillBatch() (bool, Batch) { batch := NewBatch(batcher.batchSize) // Make a batch timeout := new(time.Timer) // Gives us a nil channel and no timeout to start with chanOpen := true // Assume the channel is open count := 0 for { select { case <-timeout.C: return !chanOpen, batch case line, chanOpen := <-batcher.inLogs: if !chanOpen { return !chanOpen, batch } // We have a line now, so set a timeout if timeout.C == nil { defer func(t time.Time) { batcher.stats <- NewNamedValue("batch.fill.time", time.Since(t).Seconds()) }(time.Now()) timeout = time.NewTimer(batcher.timeout) defer timeout.Stop() // ensure timer is stopped when done } batch.Add(line) count += 1 if count >= batcher.batchSize { return !chanOpen, batch } } } }
func releaseTimer(t *time.Timer, wasRead bool) { stopped := t.Stop() if !wasRead && !stopped { <-t.C } timerPool.Put(t) }
func appMain(driver gxui.Driver) { theme := dark.CreateTheme(driver) label := theme.CreateLabel() label.SetText("This is a progress bar:") progressBar := theme.CreateProgressBar() progressBar.SetDesiredSize(math.Size{W: 400, H: 20}) progressBar.SetTarget(100) layout := theme.CreateLinearLayout() layout.AddChild(label) layout.AddChild(progressBar) layout.SetHorizontalAlignment(gxui.AlignCenter) window := theme.CreateWindow(800, 600, "Progress bar") window.SetScale(flags.DefaultScaleFactor) window.AddChild(layout) window.OnClose(driver.Terminate) progress := 0 pause := time.Millisecond * 500 var timer *time.Timer timer = time.AfterFunc(pause, func() { driver.Call(func() { progress = (progress + 3) % progressBar.Target() progressBar.SetProgress(progress) timer.Reset(pause) }) }) }
// blockingRPC is used for queries that need to wait for a // minimum index. This is used to block and wait for changes. func (s *Server) blockingRPC(opts *blockingOptions) error { var timeout *time.Timer var notifyCh chan struct{} var state *state.StateStore // Fast path non-blocking if opts.queryOpts.MinQueryIndex == 0 { goto RUN_QUERY } // Restrict the max query time, and ensure there is always one if opts.queryOpts.MaxQueryTime > maxQueryTime { opts.queryOpts.MaxQueryTime = maxQueryTime } else if opts.queryOpts.MaxQueryTime <= 0 { opts.queryOpts.MaxQueryTime = defaultQueryTime } // Apply a small amount of jitter to the request opts.queryOpts.MaxQueryTime += randomStagger(opts.queryOpts.MaxQueryTime / jitterFraction) // Setup a query timeout timeout = time.NewTimer(opts.queryOpts.MaxQueryTime) // Setup the notify channel notifyCh = make(chan struct{}, 1) // Ensure we tear down any watchers on return state = s.fsm.State() defer func() { timeout.Stop() if opts.allocWatch != "" { state.StopWatchAllocs(opts.allocWatch, notifyCh) } }() REGISTER_NOTIFY: // Register the notification channel. This may be done // multiple times if we have not reached the target wait index. if opts.allocWatch != "" { state.WatchAllocs(opts.allocWatch, notifyCh) } RUN_QUERY: // Update the query meta data s.setQueryMeta(opts.queryMeta) // Run the query function metrics.IncrCounter([]string{"nomad", "rpc", "query"}, 1) err := opts.run() // Check for minimum query time if err == nil && opts.queryMeta.Index > 0 && opts.queryMeta.Index <= opts.queryOpts.MinQueryIndex { select { case <-notifyCh: goto REGISTER_NOTIFY case <-timeout.C: } } return err }
// timerLoop loops indefinitely to query the given API, until "wait" times // out, using the "tick" timer to delay the API queries. It writes the // result to the given output. func timerLoop(api APIClient, requestedId string, wait, tick *time.Timer) (params.ActionResult, error) { var ( result params.ActionResult err error ) // Loop over results until we get "failed" or "completed". Wait for // timer, and reset it each time. for { result, err = fetchResult(api, requestedId) if err != nil { return result, err } // Whether or not we're waiting for a result, if a completed // result arrives, we're done. switch result.Status { case params.ActionRunning, params.ActionPending: default: return result, nil } // Block until a tick happens, or the timeout arrives. select { case _ = <-wait.C: return result, nil case _ = <-tick.C: tick.Reset(2 * time.Second) } } }
// GetDuplicates returns all the duplicate evaluations and blocks until the // passed timeout. func (b *BlockedEvals) GetDuplicates(timeout time.Duration) []*structs.Evaluation { var timeoutTimer *time.Timer var timeoutCh <-chan time.Time SCAN: b.l.Lock() if len(b.duplicates) != 0 { dups := b.duplicates b.duplicates = nil b.l.Unlock() return dups } b.l.Unlock() // Create the timer if timeoutTimer == nil && timeout != 0 { timeoutTimer = time.NewTimer(timeout) timeoutCh = timeoutTimer.C defer timeoutTimer.Stop() } select { case <-b.stopCh: return nil case <-timeoutCh: return nil case <-b.duplicateCh: goto SCAN } }
// fillBatch coalesces individual log lines into batches. Delivery of the // batch happens on timeout after at least one message is received // or when the batch is full. // returns the channel status, completed batch func (b Batcher) fillBatch() (bool, Batch) { batch := NewBatch(b.batchSize) timeout := new(time.Timer) // start with a nil channel and no timeout for { select { case <-timeout.C: return false, batch case line, chanOpen := <-b.inLogs: // if the channel is closed, then line will be a zero value Line, so just // return the batch and signal shutdown if !chanOpen { return true, batch } // Set a timeout if we don't have one if timeout.C == nil { defer func(t time.Time) { b.fillTime.UpdateSince(t) }(time.Now()) timeout = time.NewTimer(b.timeout) defer timeout.Stop() // ensure timer is stopped when done } if full := batch.Add(line); full { return false, batch } } } }
// timedDecoder returns a Decorated decoder that generates the given error if no events // are decoded for some number of sequential timeouts. The returned Decoder is not safe // to share across goroutines. // TODO(jdef) this probably isn't the right place for all of this logic (and it's not // just monitoring the heartbeat messages, it's counting all of them..). Heartbeat monitoring // has specific requirements. Get rid of this and implement something better elsewhere. func timedDecoder(dec records.Decoder, dur time.Duration, timeouts int, err error) records.Decoder { var t *time.Timer return records.DecoderFunc(func(v interface{}) error { if t == nil { t = time.NewTimer(dur) } else { t.Reset(dur) } defer t.Stop() errCh := make(chan error, 1) go func() { // there's no way to abort this so someone else will have // to make sure that it dies (and it should if the response // body is closed) errCh <- dec.Decode(v) }() for x := 0; x < timeouts; x++ { select { case <-t.C: // check for a tie select { case e := <-errCh: return e default: // noop, continue } case e := <-errCh: return e } } return err }) }
func (t *Transport) tries(req *http.Request, try uint) (*http.Response, error) { startTime := time.Now() var timer *time.Timer rCancler := make(chan struct{}) req.Cancel = rCancler if t.RequestTimeout != 0 { timer = time.AfterFunc(t.RequestTimeout, func() { //t.CancelRequest(req) t.startOnce.Do(t.start) if bc, ok := req.Body.(*bodyCloser); ok { bc.timer.Stop() } close(rCancler) t.transport.CancelRequest(req) }) } res, err := t.transport.RoundTrip(req) headerTime := time.Now() if err != nil { if timer != nil { timer.Stop() } var stats *Stats if t.Stats != nil { stats = &Stats{ Request: req, Response: res, Error: err, } stats.Duration.Header = headerTime.Sub(startTime) stats.Retry.Count = try } if try < t.MaxTries && req.Method == "GET" && t.shouldRetryError(err) { if t.Stats != nil { stats.Retry.Pending = true t.Stats(stats) } return t.tries(req, try+1) } if t.Stats != nil { t.Stats(stats) } return nil, err } res.Body = &bodyCloser{ ReadCloser: res.Body, timer: timer, res: res, transport: t, startTime: startTime, headerTime: headerTime, } return res, nil }
func (s *Ssdp) advertiseTimer(ads *AdvertisableServer, d time.Duration, age int) *time.Timer { var timer *time.Timer timer = time.AfterFunc(d, func() { s.advertiseServer(ads, true) timer.Reset(d + time.Duration(age)*time.Second) }) return timer }
func setTimer(timer *time.Timer, d time.Duration) *time.Timer { if timer == nil { return time.NewTimer(d) } timer.Reset(d) return timer }
func (m *Monitor) Run() { // catch interrupt signal userInterrupt := make(chan os.Signal, 1) signal.Notify(userInterrupt, os.Interrupt) stats := &Stats{} stats.responseTimeData = make([]time.Duration, 0, m.c.config.requests) var timelimit time.Timer if m.c.config.timelimit > 0 { timelimit = *time.NewTimer(time.Duration(m.c.config.timelimit) * time.Second) } defer timelimit.Stop() // waiting for all of http workers to start m.c.start.Wait() fmt.Printf("Benchmarking %s (be patient)\n", m.c.config.host) sw := &StopWatch{} sw.Start() loop: for { select { case record := <-m.collector: updateStats(stats, record) if record.Error != nil && !ContinueOnError { break loop } if stats.totalRequests >= 10 && stats.totalRequests%(m.c.config.requests/10) == 0 { fmt.Printf("Completed %d requests\n", stats.totalRequests) } if stats.totalRequests == m.c.config.requests { fmt.Printf("Finished %d requests\n", stats.totalRequests) break loop } case <-timelimit.C: break loop case <-userInterrupt: break loop } } sw.Stop() stats.totalExecutionTime = sw.Elapsed // shutdown benchmark and all of httpworkers to stop close(m.c.stop) signal.Stop(userInterrupt) m.output <- stats }
func initTimer(t *time.Timer, timeout time.Duration) *time.Timer { if t == nil { return time.NewTimer(timeout) } if t.Reset(timeout) { panic("BUG: active timer trapped into initTimer()") } return t }
func main() { start := time.Now() var t *time.Timer t = time.AfterFunc(randomDuration(), func() { fmt.Println(time.Now().Sub(start)) t.Reset(randomDuration()) }) time.Sleep(5 * time.Second) }