// setDefaults sets default values for any Worker fields that are // uninitialized. func (w *Worker) setDefaults() { if w.WorkerID == "" { // May as well use a UUID here, "it's what we've always done" w.WorkerID = uuid.NewV4().String() } if w.Concurrency == 0 { w.Concurrency = runtime.NumCPU() } if w.PollInterval == time.Duration(0) { w.PollInterval = time.Duration(1) * time.Second } if w.HeartbeatInterval == time.Duration(0) { w.HeartbeatInterval = time.Duration(15) * time.Second } if w.MaxAttempts == 0 { w.MaxAttempts = 100 } if w.Clock == nil { w.Clock = clock.New() } }
// Ensure that the clock's time matches the standary library. func TestClock_Now(t *testing.T) { a := time.Now().Round(time.Second) b := clock.New().Now().Round(time.Second) if !a.Equal(b) { t.Errorf("not equal: %s != %s", a, b) } }
func newRateLimiter(perSec int, maxBurst time.Duration) *rateLimiter { maxPerBatch := int64(perSec / int(time.Second/maxBurst)) return &rateLimiter{ limitPerSec: perSec, resolution: maxBurst, time: clock.New(), maxPerBatch: maxPerBatch, } }
func NewLWWSetWithBias(bias BiasType) (*LWWSet, error) { if bias != BiasAdd && bias != BiasRemove { return nil, ErrNoSuchBias } return &LWWSet{ addMap: make(map[interface{}]time.Time), rmMap: make(map[interface{}]time.Time), bias: bias, clock: clock.New(), }, nil }
func NewEngine() *Engine { e := &Engine{ ticker: NewTicker(time.Now(), time.Second*0, clock.New()), execQueue: make(chan *Job, 1000), scheduler: NewScheduler(), evalHandler: NewEvalHandler(), ruleReader: NewRuleReader(), log: log.New("alerting.engine"), resultHandler: NewResultHandler(), } return e }
// Create a new zone database func NewZoneDb(config ZoneConfig) (zone *ZoneDb, err error) { zone = &ZoneDb{ domain: config.Domain, idents: make(identRecordSet), mdnsCli: config.MDNSClient, mdnsSrv: config.MDNSServer, iface: config.Iface, clock: config.Clock, relevantLimit: time.Duration(DefaultRelevantTime) * time.Second, refreshCloseChan: make(chan bool), } // fix the default configuration parameters if zone.clock == nil { zone.clock = clock.New() } if len(zone.domain) == 0 { zone.domain = DefaultLocalDomain } if config.RefreshInterval > 0 { zone.refreshInterval = time.Duration(config.RefreshInterval) * time.Second } if config.RelevantTime > 0 { zone.relevantLimit = time.Duration(config.RelevantTime) * time.Second } if zone.refreshInterval > 0 { zone.refreshScheds = NewSchedQueue(zone.clock) } // Create the mDNS client and server if zone.mdnsCli == nil { if zone.mdnsCli, err = NewMDNSClient(); err != nil { return } } if zone.mdnsSrv == nil { mdnsTTL := DefaultLocalTTL if config.LocalTTL > 0 { mdnsTTL = config.LocalTTL } if zone.mdnsSrv, err = NewMDNSServer(zone, false, mdnsTTL); err != nil { return } } return }
// Spawn initializes the limiter func (l *Limiter) Spawn(id int) utils.Composer { l.keepSending = make(chan struct{}, l.Config.Burst) if l.clk == nil { l.clk = clock.New() } go func() { for { <-l.clk.Timer(1 * time.Second).C l.keepSending <- struct{}{} } }() return l }
// Ensure that the clock's timer can be stopped. func TestClock_Timer_Stop(t *testing.T) { var ok bool go func() { time.Sleep(10 * time.Millisecond) ok = true }() timer := clock.New().Timer(20 * time.Millisecond) timer.Stop() select { case <-timer.C: t.Fatal("unexpected send") case <-time.After(30 * time.Millisecond): } }
// Ensure that the clock sleeps for the appropriate amount of time. func TestClock_Sleep(t *testing.T) { var ok bool go func() { time.Sleep(10 * time.Millisecond) ok = true }() go func() { time.Sleep(30 * time.Millisecond) t.Fatal("too late") }() gosched() clock.New().Sleep(20 * time.Millisecond) if !ok { t.Fatal("too early") } }
// NewCache creates a cache of the given capacity func NewCache(capacity int, clk clock.Clock) (*Cache, error) { if capacity <= 0 { return nil, errInvalidCapacity } c := &Cache{ capacity: capacity, entries: make(entries, capacity), clock: clk, } if c.clock == nil { c.clock = clock.New() } heap.Init(&c.entriesH) return c, nil }
// Ensure that the clock's ticker ticks correctly. func TestClock_Ticker(t *testing.T) { var ok bool go func() { time.Sleep(100 * time.Millisecond) ok = true }() go func() { time.Sleep(200 * time.Millisecond) t.Fatal("too late") }() gosched() ticker := clock.New().Ticker(50 * time.Millisecond) <-ticker.C <-ticker.C if !ok { t.Fatal("too early") } }
func defaultOptions() *Options { opts := &Options{ SuspicionTimeout: 5000 * time.Millisecond, MinProtocolPeriod: 200 * time.Millisecond, JoinTimeout: 1000 * time.Millisecond, PingTimeout: 1500 * time.Millisecond, PingRequestTimeout: 5000 * time.Millisecond, PingRequestSize: 3, RollupFlushInterval: 5000 * time.Millisecond, RollupMaxUpdates: 250, Clock: clock.New(), } return opts }
// Ensure that the clock ticks correctly. func TestClock_Tick(t *testing.T) { var ok bool go func() { time.Sleep(10 * time.Millisecond) ok = true }() go func() { time.Sleep(50 * time.Millisecond) t.Fatal("too late") }() gosched() c := clock.New().Tick(20 * time.Millisecond) <-c <-c if !ok { t.Fatal("too early") } }
// start statsdaemon instance with standard network daemon behaviors func (s *StatsDaemon) Run(listen_addr, admin_addr, graphite_addr string) { s.listen_addr = listen_addr s.admin_addr = admin_addr s.graphite_addr = graphite_addr s.submitFunc = s.GraphiteQueue s.Clock = clock.New() s.graphiteQueue = make(chan []byte, 1000) log.Printf("statsdaemon instance '%s' starting\n", s.instance) output := &common.Output{ Metrics: s.Metrics, MetricAmounts: s.metricAmounts, Valid_lines: s.valid_lines, Invalid_lines: s.Invalid_lines, } go udp.StatsListener(s.listen_addr, s.prefix, output) // set up udp listener that writes messages to output's channels (i.e. s's channels) go s.adminListener() // tcp admin_addr to handle requests go s.metricStatsMonitor() // handles requests fired by telnet api go s.graphiteWriter() // writes to graphite in the background s.metricsMonitor() // takes data from s.Metrics and puts them in the guage/timers/etc objects. pointers guarded by select. also listens for signals. }
// Ensure that the clock's AfterFunc executes at the correct time. func TestClock_AfterFunc(t *testing.T) { var ok bool go func() { time.Sleep(10 * time.Millisecond) ok = true }() go func() { time.Sleep(30 * time.Millisecond) t.Fatal("too late") }() gosched() var wg sync.WaitGroup wg.Add(1) clock.New().AfterFunc(20*time.Millisecond, func() { wg.Done() }) wg.Wait() if !ok { t.Fatal("too early") } }
// ServeCBORRPC runs a CBOR-RPC server on the specified local address. // This serves connections forever, and probably wants to be run in a // goroutine. Panics on any error in the initial setup or in accepting // connections. func ServeCBORRPC( coord coordinate.Coordinate, gConfig map[string]interface{}, network, laddr string, reqLogger *logrus.Logger, ) { var ( cbor *codec.CborHandle err error namespace coordinate.Namespace ln net.Listener conn net.Conn jobd *jobserver.JobServer ) cbor = new(codec.CborHandle) if err == nil { err = cborrpc.SetExts(cbor) } if err == nil { namespace, err = coord.Namespace("") } if err == nil { jobd = &jobserver.JobServer{ Namespace: namespace, GlobalConfig: gConfig, Clock: clock.New(), } ln, err = net.Listen(network, laddr) } for err == nil { conn, err = ln.Accept() if err == nil { go handleConnection(conn, jobd, cbor, reqLogger) } } panic(err) }
// Dispatcher dispatches, every second, all jobs that should run for that second // every job has an id so that you can run multiple dispatchers (for HA) while still only processing each job once. // (provided jobs get consistently routed to executors) func Dispatcher(jobQueue JobQueue) { go dispatchJobs(jobQueue) offset := time.Duration(LoadOrSetOffset()) * time.Second // no need to try resuming where we left off in the past. // see https://github.com/raintank/grafana/issues/266 lastProcessed := time.Now().Truncate(time.Second).Add(-offset) cl := clock.New() ticker := NewTicker(lastProcessed, offset, cl) go func() { offsetReadTicker := cl.Ticker(time.Duration(1) * time.Second) for range offsetReadTicker.C { offset := time.Duration(LoadOrSetOffset()) * time.Second ticker.updateOffset(offset) } }() for { select { case tick := <-ticker.C: tickQueueItems.Value(int64(len(tickQueue))) tickQueueSize.Value(int64(setting.TickQueueSize)) // let's say jobs with freq 60 and offset 7 trigger at 7, 67, 127, ... // and offset was 30 seconds, so we query for data with last point at 37, 97, 157, ... // so we should find the checks where ts-30 % frequency == offset // and then ts-30 was a ts of the last point we should query for select { case tickQueue <- tick: default: dispatcherTicksSkippedDueToSlowTickQueue.Inc(1) } tickQueueItems.Value(int64(len(tickQueue))) tickQueueSize.Value(int64(setting.TickQueueSize)) } } }
func newCounter() *rateCounter { return &rateCounter{ time: clock.New(), } }
// Profile will wrap a writer and reader pair and profile where // time is spent: writing or reading. The result is returned when // the `done` func is called. The `done` func can be called multiple // times. // // There is a small performance overhead of ~µs per Read/Write call. // This is negligible in most I/O workloads. If the overhead is too // much for your needs, use the `ProfileSample` call. func Profile(w io.Writer, r io.Reader) (pw io.Writer, pr io.Reader, done func() TimeProfile) { return profile(clock.New(), w, r) }
// New creates a new Coordinate interface that operates purely in // memory. func New() coordinate.Coordinate { clk := clock.New() return NewWithClock(clk) }
// defaultClock sets the ringpop clock interface to use the system clock func defaultClock(r *Ringpop) error { return Clock(clock.New())(r) }
// ProfileSample will wrap a writer and reader pair and collect // samples of where time is spent: writing or reading. The result // is an approximation that is returned when the `done` func is // called. The `done` func can be called *only once*. // // This call is not as precise as the `Profile` call, but the // performance overhead is much reduced. func ProfileSample(w io.Writer, r io.Reader, res time.Duration) (pw io.Writer, pr io.Reader, done func() SamplingProfile) { return profileSample(clock.New(), w, r, res) }
// New creates a new coordinate.Coordinate connection object using // the provided PostgreSQL connection string. The connection string // may be an expanded PostgreSQL string, a "postgres:" URL, or a URL // without a scheme. These are all equivalent: // // "host=localhost user=postgres password=postgres dbname=postgres" // "postgres://*****:*****@localhost/postgres" // "//postgres:postgres@localhost/postgres" // // See http://godoc.org/github.com/lib/pq for more details. If // parameters are missing from this string (or if you pass an empty // string) they can be filled in from environment variables as well; // see // http://www.postgresql.org/docs/current/static/libpq-envars.html. // // The returned Coordinate object carries around a connection pool // with it. It can (and should) be shared across the application. // This New() function should be called sparingly, ideally exactly once. func New(connectionString string) (coordinate.Coordinate, error) { clk := clock.New() return NewWithClock(connectionString, clk) }
// Creates a new DNS server func NewDNSServer(config DNSServerConfig) (s *DNSServer, err error) { s = &DNSServer{ Zone: config.Zone, Domain: DefaultLocalDomain, ListenAddr: fmt.Sprintf(":%d", config.Port), listenersWg: new(sync.WaitGroup), timeout: DefaultTimeout * time.Millisecond, readTimeout: DefaultTimeout * time.Millisecond, cacheDisabled: false, maxAnswers: DefaultMaxAnswers, localTTL: DefaultLocalTTL, clock: config.Clock, } // check some basic parameters are valid if s.Zone == nil { return nil, fmt.Errorf("No valid Zone provided in server initialization") } if len(s.Domain) == 0 { return nil, fmt.Errorf("No valid Domain provided in server initialization") } if s.clock == nil { s.clock = clock.New() } // fill empty parameters with defaults... if config.UpstreamCfg != nil { s.Upstream = config.UpstreamCfg } else { cfgFile := DefaultCLICfgFile if len(config.UpstreamCfgFile) > 0 { cfgFile = config.UpstreamCfgFile } if s.Upstream, err = dns.ClientConfigFromFile(cfgFile); err != nil { return nil, err } } if config.Timeout > 0 { s.timeout = time.Duration(config.Timeout) * time.Millisecond } if config.ListenReadTimeout > 0 { s.readTimeout = time.Duration(config.ListenReadTimeout) * time.Millisecond } if config.UDPBufLen > 0 { s.udpBuf = config.UDPBufLen } if config.MaxAnswers > 0 { s.maxAnswers = config.MaxAnswers } if config.LocalTTL > 0 { s.localTTL = config.LocalTTL } if config.CacheNegLocalTTL > 0 { s.negLocalTTL = config.CacheNegLocalTTL } else { s.negLocalTTL = s.localTTL } if config.CacheDisabled { s.cacheDisabled = true } if !s.cacheDisabled { if config.Cache != nil { s.cache = config.Cache } else { cacheLen := DefaultCacheLen if config.CacheLen > 0 { cacheLen = config.CacheLen } if s.cache, err = NewCache(cacheLen, s.clock); err != nil { return } } } return }
// Spawn starts a gorutine that can receive: // - New messages that will be added to a existing or new batch of messages // - A batch of messages that is ready to send (i.e. batch timeout has expired) func (batcher *Batcher) Spawn(id int) utils.Composer { var wg sync.WaitGroup b := *batcher b.id = id b.batches = cmap.New() b.readyBatches = make(chan *Batch) b.finished = make(chan struct{}) b.incoming = make(chan struct { m *utils.Message done utils.Done }) if b.clk == nil { b.clk = clock.New() } wg.Add(1) go func() { wg.Done() for { select { case message := <-b.incoming: if !message.m.Opts.Has("batch_group") { message.done(message.m, 0, "") } else { tmp, _ := message.m.Opts.Get("batch_group") group := tmp.(string) if tmp, exists := b.batches.Get(group); exists { batch := tmp.(*Batch) batch.Add(message.m) if batch.MessageCount >= b.Config.Limit && !batch.Sent { batch.Send(func() { b.batches.Remove(group) batch.Done(batch.Message, 0, "limit") batch.Sent = true }) } } else { b.batches.Set(group, NewBatch(message.m, group, b.Config.Deflate, message.done, b.clk, b.Config.TimeoutMillis, b.readyBatches)) } } b.finished <- struct{}{} case batch := <-b.readyBatches: if !batch.Sent { batch.Send(func() { b.batches.Remove(batch.Group) batch.Done(batch.Message, 0, "timeout") batch.Sent = true }) } } } }() wg.Wait() return &b }