Exemplo n.º 1
0
func main() {

	// state will be a map
	var state = make(map[int]int)

	// mutex will synchronize across to state
	var mutex = &sync.Mutex{}

	// ops will count how many operations we perform against the state
	var ops int64 = 0

	// start 100 goroutines to execute repeated reads against the state
	for r := 0; r < 100; r++ {
		go func() {
			total := 0
			// for each read pick a key to access
			// Lock() the mutex to ensure exclusive access to state
			// read value at chosen key
			// Unlock() mutex
			// increment ops count
			for {
				key := rand.Intn(5)
				mutex.Lock()
				total += state[key]
				mutex.Unlock()
				atomic.AddInt64(&ops, 1)
				// explicitly yield after each operation
				// to ensure goroutine doesn't starve scheduler
				runtime.Gosched()
			}
		}()
	}

	// start 10 goroutines to simulate writes
	for w := 0; w < 10; w++ {
		go func() {
			for {
				key := rand.Intn(5)
				val := rand.Intn(100)
				mutex.Lock()
				state[key] = val
				mutex.Unlock()
				atomic.AddInt64(&ops, 1)
				runtime.Gosched()
			}
		}()
	}

	// allow 10 goroutines to work on state and mutex for 1 second
	time.Sleep(time.Second)

	// take and report final ops count
	opsFinal := atomic.LoadInt64(&ops)
	fmt.Println("ops:", opsFinal)

	// final lock state, show ending point
	mutex.Lock()
	fmt.Println("state:", state)
	mutex.Unlock()
}
Exemplo n.º 2
0
func (s *Service) serve() {
	// From https://collectd.org/wiki/index.php/Binary_protocol
	//   1024 bytes (payload only, not including UDP / IP headers)
	//   In versions 4.0 through 4.7, the receive buffer has a fixed size
	//   of 1024 bytes. When longer packets are received, the trailing data
	//   is simply ignored. Since version 4.8, the buffer size can be
	//   configured. Version 5.0 will increase the default buffer size to
	//   1452 bytes (the maximum payload size when using UDP/IPv6 over
	//   Ethernet).
	buffer := make([]byte, 1452)

	for {
		select {
		case <-s.done:
			// We closed the connection, time to go.
			return
		default:
			// Keep processing.
		}

		n, _, err := s.conn.ReadFromUDP(buffer)
		if err != nil {
			atomic.AddInt64(&s.stats.ReadFail, 1)
			s.Logger.Printf("collectd ReadFromUDP error: %s", err)
			continue
		}
		if n > 0 {
			atomic.AddInt64(&s.stats.BytesReceived, int64(n))
			s.handleMessage(buffer[:n])
		}
	}
}
Exemplo n.º 3
0
func (o *S3SplitFileOutput) writeMessage(fi *SplitFileInfo, msgBytes []byte) (rotate bool, err error) {
	rotate = false
	atomic.AddInt64(&o.processMessageCount, 1)

	file, e := o.openCurrent(fi)
	if e != nil {
		atomic.AddInt64(&o.processMessageFailures, 1)
		return rotate, fmt.Errorf("Error getting open file %s: %s", fi.name, e)
	}

	n, e := file.Write(msgBytes)

	atomic.AddInt64(&o.processMessageBytes, int64(n))

	// Note that if these files are being written to elsewhere, the size-based
	// rotation will not work as expected. A more robust approach would be to
	// use something like `file.Seek(0, os.SEEK_CUR)` to get the current
	// offset into the file.
	fi.size += uint32(n)

	if e != nil {
		atomic.AddInt64(&o.processMessageFailures, 1)
		return rotate, fmt.Errorf("Can't write to %s: %s", fi.name, e)
	} else if n != len(msgBytes) {
		return rotate, fmt.Errorf("Truncated output for %s", fi.name)
	} else {
		if fi.size >= o.MaxFileSize {
			rotate = true
		}
	}
	return
}
Exemplo n.º 4
0
func (v *VBucket) setVBMeta(newMeta *VBMeta) (err error) {
	// This should only be called when holding the bucketstore
	// service/apply "lock", to ensure a Flush between changes stream
	// update and COLL_VBMETA update is atomic.
	var j []byte
	j, err = json.Marshal(newMeta)
	if err != nil {
		return err
	}
	k := []byte(fmt.Sprintf("%d", v.vbid))
	i := &item{
		key:  nil, // A nil key means it's a VBMeta change.
		cas:  newMeta.MetaCas,
		data: j,
	}

	deltaItemBytes, err := v.ps.set(i, nil)
	if err != nil {
		return err
	}
	if err = v.bs.collMeta(COLL_VBMETA).Set(k, j); err != nil {
		return err
	}
	atomic.StorePointer(&v.meta, unsafe.Pointer(newMeta))

	atomic.AddInt64(&v.stats.ItemBytes, deltaItemBytes)
	atomic.AddInt64(v.bucketItemBytes, deltaItemBytes)

	return nil
}
Exemplo n.º 5
0
// writeToShards writes points to a shard.
func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error {
	atomic.AddInt64(&w.stats.PointWriteReqLocal, int64(len(points)))

	err := w.TSDBStore.WriteToShard(shard.ID, points)
	if err == nil {
		atomic.AddInt64(&w.stats.WriteOK, 1)
		return nil
	}

	// If we've written to shard that should exist on the current node, but the store has
	// not actually created this shard, tell it to create it and retry the write
	if err == tsdb.ErrShardNotFound {
		err = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true)
		if err != nil {
			w.Logger.Printf("write failed for shard %d: %v", shard.ID, err)

			atomic.AddInt64(&w.stats.WriteErr, 1)
			return err
		}
	}
	err = w.TSDBStore.WriteToShard(shard.ID, points)
	if err != nil {
		w.Logger.Printf("write failed for shard %d: %v", shard.ID, err)
		atomic.AddInt64(&w.stats.WriteErr, 1)
		return err
	}

	atomic.AddInt64(&w.stats.WriteOK, 1)
	return nil
}
Exemplo n.º 6
0
func (s *Storage) Fetch(ref blob.Ref) (file io.ReadCloser, size uint32, err error) {
	s.mu.RLock()
	defer s.mu.RUnlock()
	if s.lru != nil {
		s.lru.Get(ref.String()) // force to head
	}
	if s.m == nil {
		err = os.ErrNotExist
		return
	}
	b, ok := s.m[ref]
	if !ok {
		err = os.ErrNotExist
		return
	}
	size = uint32(len(b))
	atomic.AddInt64(&s.blobsFetched, 1)
	atomic.AddInt64(&s.bytesFetched, int64(len(b)))

	return struct {
		*io.SectionReader
		io.Closer
	}{
		io.NewSectionReader(bytes.NewReader(b), 0, int64(size)),
		types.NopCloser,
	}, size, nil
}
Exemplo n.º 7
0
// Processes the given |entry| in the specified log.
func (s *Scanner) processEntry(entry ct.LogEntry, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) {
	atomic.AddInt64(&s.certsProcessed, 1)
	switch entry.Leaf.TimestampedEntry.EntryType {
	case ct.X509LogEntryType:
		if s.opts.PrecertOnly {
			// Only interested in precerts and this is an X.509 cert, early-out.
			return
		}
		cert, err := x509.ParseCertificate(entry.Leaf.TimestampedEntry.X509Entry)
		if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
			// We hit an unparseable entry, already logged inside handleParseEntryError()
			return
		}
		if s.opts.Matcher.CertificateMatches(cert) {
			entry.X509Cert = cert
			foundCert(&entry)
		}
	case ct.PrecertLogEntryType:
		c, err := x509.ParseTBSCertificate(entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate)
		if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
			// We hit an unparseable entry, already logged inside handleParseEntryError()
			return
		}
		precert := &ct.Precertificate{
			Raw:            entry.Chain[0],
			TBSCertificate: *c,
			IssuerKeyHash:  entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash}
		if s.opts.Matcher.PrecertificateMatches(precert) {
			entry.Precert = precert
			foundPrecert(&entry)
		}
		atomic.AddInt64(&s.precertsSeen, 1)
	}
}
Exemplo n.º 8
0
func (p *Pumper) bgRead(ctx context.Context) {
	defer p.brEnding()

	for q := false; !q; {
		id, m := p.readMsg()
		p.pushRMsg(id, m)
		atomic.AddInt64(&p.stat.InN, 1)

		if p.imax > 0 && p.rQ.Len() > p.imax {

			for q := false; !q && p.rQ.Len() > p.imax; {
				atomic.AddInt64(&p.stat.PauseN, 1)
				select {
				case <-ctx.Done():
					q = true
				case <-time.After(readPauseTime):
				}
			}
		}

		select {
		case <-ctx.Done():
			q = true
		default:
		}
	}
}
Exemplo n.º 9
0
// Invoked when the caller believes the item has expired.  We double
// check here in case some concurrent race has mutated the item.
func (v *VBucket) expire(key []byte, now time.Time) (err error) {
	var deltaItemBytes int64
	var expireCas uint64

	v.Apply(func() {
		var i *item
		i, err = v.ps.get(key)
		if err != nil || i == nil {
			return
		}
		if i.isExpired(now) {
			expireCas = atomic.AddUint64(&v.Meta().LastCas, 1)
			deltaItemBytes, err = v.ps.del(key, expireCas, i)
		}
	})

	atomic.AddInt64(&v.stats.ItemBytes, deltaItemBytes)
	atomic.AddInt64(v.bucketItemBytes, deltaItemBytes)

	if err == nil && expireCas != 0 {
		v.markStale()
		v.observer.Submit(mutation{v.vbid, key, expireCas, true})
	}

	return err
}
Exemplo n.º 10
0
func (l *leaseUpdater) waitForProgress(item ovfFileItem) {
	var pos, total int64

	total = item.item.Size

	for {
		select {
		case <-l.done:
			return
		case p, ok := <-item.ch:
			// Return in case of error
			if ok && p.Error() != nil {
				return
			}

			if !ok {
				// Last element on the channel, add to total
				atomic.AddInt64(&l.pos, total-pos)
				return
			}

			// Approximate progress in number of bytes
			x := int64(float32(total) * (p.Percentage() / 100.0))
			atomic.AddInt64(&l.pos, x-pos)
			pos = x
		}
	}
}
Exemplo n.º 11
0
func (c *Client) Get(ctx context.Context, url string) (r *http.Response, err error) {
	ctx = trace.Enter(ctx, "HTTP")

	if maximum := c.MaximumInFlight; maximum != 0 {
		defer atomic.AddInt64(&c.inflight, -1)

		// too many in-flight?
		if n := atomic.AddInt64(&c.inflight, 1); n >= maximum {
			trace.Leave(ctx, "Errors.TooManyInFlight")
			return
		}
	}

	r, err = defaults.Client(c.Client).Get(url)
	if err != nil {
		atomic.AddInt64(&c.count, -1)
		trace.Error(ctx, "Errors.Fail", err)
		return
	}

	if r.StatusCode != http.StatusOK && r.StatusCode != http.StatusNoContent {
		atomic.AddInt64(&c.count, -1)
		trace.Error(ctx, "Errors.Status", fmt.Errorf("%s", r.Status))
		return
	}

	trace.Leave(ctx, "Check")
	return
}
Exemplo n.º 12
0
func SendMessages(s servernode) {
	for {
		select {
		case msg, valid := <-s.Outbox():
			if !valid {
				return
			} else {
				targetID := msg.Pid
				if targetID == BROADCAST {
					for i := 0; i < len(Peers); i++ {
						msg.Pid = Peers[i]
						msg.MsgId = s.MessageId
						atomic.AddInt64(&s.MessageId, 1)
						_, err := s.Peersockets[peerIdIndex[msg.Pid]].SendMessage(msg)
						check_err(err)
						//fmt.Println(n, "and", len(msg.Msg))
						//fmt.Println("SENDING: (Src,Dst) --> (",s.ServerId,",",msg.Pid,") (Message:",*msg,")")
					}
				} else {
					targetID = peerIdIndex[msg.Pid]
					msg.MsgId = s.MessageId
					atomic.AddInt64(&s.MessageId, 1)
					_, err := s.Peersockets[targetID].SendMessage(msg)
					//fmt.Println(n, "and", msg)
					check_err(err)
					//fmt.Println("SENDING: (Src,Dst) --> (",s.ServerId,",",msg.Pid,") (Message:",*msg,")")
				}
			}
		}
	}
}
Exemplo n.º 13
0
func TestDoWithPanic(t *testing.T) {
	var counter int64 = 0
	cm := New(func() {})
	tests := []Test{
		// No panic
		func(sem *Semaphore) {
			defer atomic.AddInt64(&counter, 1)
			sem.Ready()
		},
		// Panic after sem.Ready()
		func(sem *Semaphore) {
			defer atomic.AddInt64(&counter, 1)
			sem.Ready()
			panic("Panic after calling sem.Ready()")
		},
		// Panic before sem.Ready()
		func(sem *Semaphore) {
			defer atomic.AddInt64(&counter, 1)
			panic("Panic before calling sem.Ready()")
		},
	}
	for _, test := range tests {
		cm.Register(test)
	}
	cm.Do()
	// Check that all funcs in tests were called.
	if int(counter) != len(tests) {
		t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter)
	}
}
Exemplo n.º 14
0
// WriteMulti writes the map of keys and associated values to the cache. This function is goroutine-safe.
// It returns an error if the cache has exceeded its max size.
func (c *Cache) WriteMulti(values map[string][]Value) error {
	totalSz := 0
	for _, v := range values {
		totalSz += Values(v).Size()
	}

	// Enough room in the cache?
	c.mu.RLock()
	newSize := c.size + uint64(totalSz)
	if c.maxSize > 0 && newSize+c.snapshotSize > c.maxSize {
		c.mu.RUnlock()
		atomic.AddInt64(&c.stats.WriteErr, 1)
		return ErrCacheMemoryExceeded
	}
	c.mu.RUnlock()

	for k, v := range values {
		c.entry(k).add(v)
	}
	c.mu.Lock()
	c.size += uint64(totalSz)
	c.mu.Unlock()

	// Update the memory size stat
	c.updateMemSize(int64(totalSz))
	atomic.AddInt64(&c.stats.WriteOK, 1)

	return nil
}
Exemplo n.º 15
0
// WritePoints will write the raw data points and any new metadata to the index in the shard
func (s *Shard) WritePoints(points []models.Point) error {
	if err := s.ready(); err != nil {
		return err
	}

	s.mu.RLock()
	defer s.mu.RUnlock()

	atomic.AddInt64(&s.stats.WriteReq, 1)

	fieldsToCreate, err := s.validateSeriesAndFields(points)
	if err != nil {
		return err
	}
	atomic.AddInt64(&s.stats.FieldsCreated, int64(len(fieldsToCreate)))

	// add any new fields and keep track of what needs to be saved
	if err := s.createFieldsAndMeasurements(fieldsToCreate); err != nil {
		return err
	}

	// Write to the engine.
	if err := s.engine.WritePoints(points); err != nil {
		atomic.AddInt64(&s.stats.WritePointsFail, 1)
		return fmt.Errorf("engine: %s", err)
	}
	atomic.AddInt64(&s.stats.WritePointsOK, int64(len(points)))

	return nil
}
Exemplo n.º 16
0
// TODO: handle "no such file"
func (input *S3SplitFileInput) readS3File(runner pipeline.InputRunner, d *pipeline.Deliverer, sr *pipeline.SplitterRunner, s3Key string) (err error) {
	runner.LogMessage(fmt.Sprintf("Preparing to read: %s", s3Key))
	if input.bucket == nil {
		runner.LogMessage(fmt.Sprintf("Dude, where's my bucket: %s", s3Key))
		return
	}

	var lastGoodOffset uint64
	var attempt uint32

RetryS3:
	for attempt = 1; attempt <= input.S3Retries; attempt++ {
		for r := range S3FileIterator(input.bucket, s3Key, lastGoodOffset) {
			record := r.Record
			err := r.Err

			if err != nil && err != io.EOF {
				runner.LogError(fmt.Errorf("Error in attempt %d reading %s at offset %d: %s", attempt, s3Key, lastGoodOffset, err))
				atomic.AddInt64(&input.processMessageFailures, 1)
				continue RetryS3
			}
			if len(record) > 0 {
				lastGoodOffset += uint64(r.BytesRead)
				atomic.AddInt64(&input.processMessageCount, 1)
				atomic.AddInt64(&input.processMessageBytes, int64(len(record)))
				(*sr).DeliverRecord(record, *d)
			}
		}
		break
	}

	return
}
Exemplo n.º 17
0
Arquivo: work.go Projeto: vonwenm/work
// Run wait for the goroutine pool to take the work
// to be executed.
func (p *Pool) Run(work Worker) {
	atomic.AddInt64(&p.pending, 1)
	{
		p.tasks <- work
	}
	atomic.AddInt64(&p.pending, -1)
}
Exemplo n.º 18
0
func (s *session) proxy(c1, c2 net.Conn) error {
	if debug {
		log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr())
	}

	atomic.AddInt64(&numProxies, 1)
	defer atomic.AddInt64(&numProxies, -1)

	buf := make([]byte, 65536)
	for {
		c1.SetReadDeadline(time.Now().Add(networkTimeout))
		n, err := c1.Read(buf)
		if err != nil {
			return err
		}

		atomic.AddInt64(&bytesProxied, int64(n))

		if debug {
			log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr())
		}

		if s.rateLimit != nil {
			s.rateLimit(int64(n))
		}

		c2.SetWriteDeadline(time.Now().Add(networkTimeout))
		_, err = c2.Write(buf[:n])
		if err != nil {
			return err
		}
	}
}
Exemplo n.º 19
0
func TestLockFile(t *testing.T) {
	d, err := ioutil.TempDir("", "cookiejar_test")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(d)
	filename := filepath.Join(d, "lockfile")
	concurrentCount := int64(0)
	var wg sync.WaitGroup
	locker := func() {
		defer wg.Done()
		closer, err := lockFile(filename)
		if err != nil {
			t.Errorf("cannot obtain lock: %v", err)
			return
		}
		x := atomic.AddInt64(&concurrentCount, 1)
		if x > 1 {
			t.Errorf("multiple locks held at one time")
		}
		defer closer.Close()
		time.Sleep(10 * time.Millisecond)
		atomic.AddInt64(&concurrentCount, -1)
	}
	wg.Add(4)
	for i := 0; i < 4; i++ {
		go locker()
	}
	wg.Wait()
	if concurrentCount != 0 {
		t.Errorf("expected no running goroutines left")
	}
}
Exemplo n.º 20
0
// Message sending function for buffered plugins using the old-style API.
func (foRunner foRunner) SendRecord(pack *PipelinePack) error {
	select {
	case foRunner.inChan <- pack:
		// Wait until pack is delivered.
		select {
		case err := <-pack.DelivErrChan:
			if err == nil {
				atomic.AddInt64(&foRunner.processMessageCount, 1)
				pack.recycle()
			} else {
				if _, ok := err.(RetryMessageError); !ok {
					foRunner.LogError(fmt.Errorf("can't send record: %s", err))
					atomic.AddInt64(&foRunner.dropMessageCount, 1)
					pack.recycle()
					err = nil // Swallow the error so there's no retry.
				}
			}
			return err
		case <-foRunner.stopChan:
			pack.recycle()
			return ErrStopping
		}
	case <-foRunner.stopChan:
		pack.recycle()
		return ErrStopping
	}
}
Exemplo n.º 21
0
func (self *SourceServer) innerSend(events []*flume.ThriftFlumeEvent) {

	for i := 0; i < 3; i++ {
		pool := self.getFlumeClientPool()
		flumeclient, err := pool.Get(5 * time.Second)
		if nil != err || nil == flumeclient {
			log.Printf("LOG_SOURCE|GET FLUMECLIENT|FAIL|%s|%s|TRY:%d\n", self.business, err, i)
			continue
		}

		err = flumeclient.AppendBatch(events)
		defer func() {
			if err := recover(); nil != err {
				//回收这个坏的连接
				pool.ReleaseBroken(flumeclient)
			} else {
				pool.Release(flumeclient)
			}
		}()

		if nil != err {
			atomic.AddInt64(&self.monitorCount.currFailValue, int64(1*self.batchSize))
			log.Printf("LOG_SOURCE|SEND FLUME|FAIL|%s|%s|TRY:%d\n", self.business, err.Error(), i)

		} else {
			atomic.AddInt64(&self.monitorCount.currSuccValue, int64(1*self.batchSize))
			if rand.Int()%10000 == 0 {
				log.Printf("trace|send 2 flume succ|%s|%d\n", flumeclient.HostPort(), len(events))
			}
			break
		}

	}
}
Exemplo n.º 22
0
// Count counts the objects and their sizes in the Fs
func Count(f Fs) (objects int64, size int64, err error) {
	err = ListFn(f, func(o Object) {
		atomic.AddInt64(&objects, 1)
		atomic.AddInt64(&size, o.Size())
	})
	return
}
Exemplo n.º 23
0
func vbGet(v *VBucket, w io.Writer, req *gomemcached.MCRequest) (res *gomemcached.MCResponse) {
	atomic.AddInt64(&v.stats.Gets, 1)

	i, err := v.getUnexpired(req.Key, time.Now())
	if err != nil {
		return &gomemcached.MCResponse{
			Status: gomemcached.TMPFAIL,
			Body:   []byte(fmt.Sprintf("Store get error %v", err)),
		}
	}
	if i == nil {
		atomic.AddInt64(&v.stats.GetMisses, 1)
		if req.Opcode.IsQuiet() {
			return nil
		}
		return &gomemcached.MCResponse{Status: gomemcached.KEY_ENOENT}
	}

	res = &gomemcached.MCResponse{
		Cas:    i.cas,
		Extras: make([]byte, 4),
		Body:   i.data,
	}
	binary.BigEndian.PutUint32(res.Extras, i.flag)
	wantsKey := (req.Opcode == gomemcached.GETK || req.Opcode == gomemcached.GETKQ)
	if wantsKey {
		res.Key = req.Key
	}

	atomic.AddInt64(&v.stats.OutgoingValueBytes, int64(len(i.data)))

	return res
}
Exemplo n.º 24
0
// ServeHTTP responds to HTTP request to the handler.
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	atomic.AddInt64(&h.stats.Requests, 1)
	atomic.AddInt64(&h.stats.ActiveRequests, 1)
	defer atomic.AddInt64(&h.stats.ActiveRequests, -1)
	start := time.Now()

	// Add version header to all InfluxDB requests.
	w.Header().Add("X-Influxdb-Version", h.Version)

	// FIXME(benbjohnson): Add pprof enabled flag.
	if strings.HasPrefix(r.URL.Path, "/debug/pprof") {
		switch r.URL.Path {
		case "/debug/pprof/cmdline":
			pprof.Cmdline(w, r)
		case "/debug/pprof/profile":
			pprof.Profile(w, r)
		case "/debug/pprof/symbol":
			pprof.Symbol(w, r)
		default:
			pprof.Index(w, r)
		}
	} else if strings.HasPrefix(r.URL.Path, "/debug/vars") {
		h.serveExpvar(w, r)
	} else {
		h.mux.ServeHTTP(w, r)
	}

	atomic.AddInt64(&h.stats.RequestDuration, time.Since(start).Nanoseconds())
}
Exemplo n.º 25
0
// compactCache continually checks if the WAL cache should be written to disk
func (e *Engine) compactCache(quit <-chan struct{}) {
	t := time.NewTimer(time.Second)
	defer t.Stop()
	for {
		select {
		case <-quit:
			return

		case <-t.C:
			e.Cache.UpdateAge()
			if e.ShouldCompactCache(e.WAL.LastWriteTime()) {
				start := time.Now()
				e.traceLogger.Info(fmt.Sprintf("Compacting cache for %s", e.path))
				err := e.WriteSnapshot()
				if err != nil && err != errCompactionsDisabled {
					e.logger.Info(fmt.Sprintf("error writing snapshot: %v", err))
					atomic.AddInt64(&e.stats.CacheCompactionErrors, 1)
				} else {
					atomic.AddInt64(&e.stats.CacheCompactions, 1)
				}
				atomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())
			}
		}
		t.Reset(time.Second)
	}
}
Exemplo n.º 26
0
func (t *Throttler) FilterRequest(req *falcore.Request) *http.Response {
	req.CurrentStage.Status = 0

	if t.Condition != nil && t.Condition(req) == false {
		return nil
	}

	t.tickerM.RLock()
	tt := t.ticker
	t.tickerM.RUnlock()

	if tt != nil {
		req.CurrentStage.Status = 1
		atomic.AddInt64(&t.count, 1)
	TICK:
		for {
			select {
			case <-tt.C:
				break TICK
			case <-t.tickerClose:
				// Get new ticker
				t.tickerM.RLock()
				tt = t.ticker
				t.tickerM.RUnlock()

				// If throttling has been disabled, continue.
				if t.ticker == nil {
					break TICK
				}
			}
		}
		atomic.AddInt64(&t.count, -1)
	}
	return nil
}
Exemplo n.º 27
0
// handleTCPConnection services an individual TCP connection for the Graphite input.
func (s *Service) handleTCPConnection(conn net.Conn) {
	defer s.wg.Done()
	defer conn.Close()
	defer atomic.AddInt64(&s.stats.ActiveConnections, -1)
	defer s.untrackConnection(conn)
	atomic.AddInt64(&s.stats.ActiveConnections, 1)
	atomic.AddInt64(&s.stats.HandledConnections, 1)
	s.trackConnection(conn)

	reader := bufio.NewReader(conn)

	for {
		// Read up to the next newline.
		buf, err := reader.ReadBytes('\n')
		if err != nil {
			return
		}

		// Trim the buffer, even though there should be no padding
		line := strings.TrimSpace(string(buf))

		atomic.AddInt64(&s.stats.PointsReceived, 1)
		atomic.AddInt64(&s.stats.BytesReceived, int64(len(buf)))
		s.handleLine(line)
	}
}
Exemplo n.º 28
0
func (th *teeHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	t0 := time.Now()
	if *mimeType != "" {
		w.Header().Set("Content-Type", *mimeType)
	} else if *mp3only {
		w.Header().Set("Content-Type", "audio/mpeg")
	}
	cw := &countingWriter{Writer: w}
	sc := &signalCloser{Writer: cw, Closed: make(chan struct{})}
	log.Printf("%d +%+q", atomic.AddInt64(&th.clients, 1), req.RemoteAddr)
	th.Add(sc)

	errs := make(chan error)
	go func() {
		if w, ok := w.(http.CloseNotifier); ok {
			<-w.CloseNotify()
		} else {
			<-sc.Closed
		}
		errs <- th.RemoveAndClose(sc)
	}()
	err := <-errs
	errStr := ""
	if err != nil {
		errStr = err.Error()
	}

	t := time.Since(t0)
	log.Printf("%d -%+q %s %d =%dB/s %+q", atomic.AddInt64(&th.clients, -1), req.RemoteAddr, t, cw.Count(), int64(float64(cw.Count())/t.Seconds()), errStr)
}
Exemplo n.º 29
0
func (o *HTTPOutput) Worker() {
	client := NewHTTPClient(o.address, &HTTPClientConfig{
		FollowRedirects: o.config.redirectLimit,
		Debug:           o.config.Debug,
	})

	death_count := 0

	atomic.AddInt64(&o.activeWorkers, 1)

	for {
		select {
		case data := <-o.queue:
			o.sendRequest(client, data)
			death_count = 0
		case <-time.After(time.Millisecond * 100):
			// When dynamic scaling enabled workers die after 2s of inactivity
			if o.config.workers == 0 {
				death_count += 1
			} else {
				continue
			}

			if death_count > 20 {
				workersCount := atomic.LoadInt64(&o.activeWorkers)

				// At least 1 worker should be alive
				if workersCount != 1 {
					atomic.AddInt64(&o.activeWorkers, -1)
					return
				}
			}
		}
	}
}
Exemplo n.º 30
0
func (manager CacheManager) AddTask(key KeyType, req *http.Request) (CacheTask, bool) {
	manager.tasksMutex.Lock()
	defer manager.tasksMutex.Unlock()

	cacheTask, ok := manager.tasks[key]
	if !ok {
		atomic.AddInt64(&manager.stat.cacheMisses, 1)

		cacheTask = CacheTask{
			chItem: make(chan CacheItem),
			chQuit: make(chan bool),

			statMutex: &sync.Mutex{},
			stat:      &CacheTaskStat{},
		}

		//atomic.cacheTask.stat.cacheTime
		cacheTime := time.Now().UnixNano()
		atomic.StoreInt64(&cacheTask.stat.cacheTime, cacheTime)

		manager.tasks[key] = cacheTask

		go manager.fetchItem(key, req, cacheTask)
	} else {
		atomic.AddInt64(&manager.stat.cacheHits, 1)
	}

	return cacheTask, ok
}