Example #1
0
func CountAnAssetRequest(original http.Handler) http.Handler {
	return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
		atomic.AddUint64(&totalRequestCount, 1)
		atomic.AddUint64(&assetRequestCount, 1)
		original.ServeHTTP(response, request)
	})
}
Example #2
0
func (pool *bufferPool) GetOutBuffer() (out *OutBuffer) {
	var ptr unsafe.Pointer
	for {
		ptr = atomic.LoadPointer(&pool.out)
		if ptr == nil {
			break
		}
		if atomic.CompareAndSwapPointer(&pool.out, ptr, ((*OutBuffer)(ptr)).next) {
			break
		}
	}

	atomic.AddUint64(&pool.outGet, 1)
	if ptr == nil {
		atomic.AddUint64(&pool.outNew, 1)
		out = &OutBuffer{Data: make([]byte, 0, pool.bufferInitSize), pool: pool}
	} else {
		out = (*OutBuffer)(ptr)
		atomic.AddInt64(&pool.size, -int64(cap(out.Data)))
	}

	out.isFreed = false
	out.isBroadcast = false
	out.refCount = 0
	return out
}
Example #3
0
func (c *countingReader) Read(bs []byte) (int, error) {
	n, err := c.Reader.Read(bs)
	atomic.AddUint64(&c.tot, uint64(n))
	atomic.AddUint64(&totalIncoming, uint64(n))
	atomic.StoreInt64(&c.last, time.Now().UnixNano())
	return n, err
}
Example #4
0
File: ugen.go Project: mkb218/ugen
// RecycleBuf will put a used buffer into the recycling queue for the given BufferSize. It can block, so you should always call it from its own goroutine.
func RecycleBuf(b []float32, op OutputParams) {
	// pc, filename, line, ok := runtime.Caller(1)
	// var logstr string
	// if ok {
	// 	f := runtime.FuncForPC(pc)
	// 	logstr = fmt.Sprintf("%s:%d:%s", filename, line, f.Name())
	// } else {
	// 	logstr = "???"
	// }
	if len(b) == op.BufferSize && cap(b) == op.BufferSize {
		recyclers.RLock()
		var ok bool
		if _, ok = recyclers.m[op.BufferSize]; !ok {
			recyclers.RUnlock()
			MakeRecycleChannel(op)
			recyclers.RLock()
		}
		select {
		case recyclers.m[op.BufferSize] <- b:
			// logger.Println("sending buffer to recycler for",logstr)
			atomic.AddUint64(&RecycleStats.Recycled, 1)
		default:
			// logger.Println("dropping buffer from",logstr)
			atomic.AddUint64(&RecycleStats.Lost, 1)
			// logger.Println("recycling channel full", len(recyclers.m[op.BufferSize]))
		}
		recyclers.RUnlock()
	} else {
		// logger.Println("no recycler for buffer from",logstr)
		atomic.AddUint64(&RecycleStats.Lost, 1)
	}
}
Example #5
0
// Do generates load using the given function.
func (gen *Generator) Do(f func() error) error {
	ticker := time.NewTicker(gen.period)
	defer ticker.Stop()

	timeout := time.After(gen.duration + gen.warmup)
	warmed := time.Now().Add(gen.warmup)

	for {
		select {
		case start := <-ticker.C:
			err := f()
			if start.After(warmed) {
				if err == nil {
					// record success
					elapsed := us(time.Now().Sub(start))
					if err := gen.hist.RecordCorrectedValue(elapsed, us(gen.period)); err != nil {
						log.Println(err)
					}
					atomic.AddUint64(gen.success, 1)
				} else {
					// record failure
					atomic.AddUint64(gen.failure, 1)
				}
			}
		case <-timeout:
			return nil
		}
	}
}
Example #6
0
func (s *state) handleNQuads(wg *sync.WaitGroup) {
	for nq := range s.cnq {
		edge, err := nq.ToEdge(s.instanceIdx, s.numInstances)
		for err != nil {
			// Just put in a retry loop to tackle temporary errors.
			if err == posting.E_TMP_ERROR {
				time.Sleep(time.Microsecond)

			} else {
				glog.WithError(err).WithField("nq", nq).
					Error("While converting to edge")
				return
			}
			edge, err = nq.ToEdge(s.instanceIdx, s.numInstances)
		}

		// Only handle this edge if the attribute satisfies the modulo rule
		if farm.Fingerprint64([]byte(edge.Attribute))%s.numInstances ==
			s.instanceIdx {
			key := posting.Key(edge.Entity, edge.Attribute)
			plist := posting.GetOrCreate(key, dataStore)
			plist.AddMutation(edge, posting.Set)
			atomic.AddUint64(&s.ctr.processed, 1)
		} else {
			atomic.AddUint64(&s.ctr.ignored, 1)
		}

	}
	wg.Done()
}
Example #7
0
func newSmolderingCouchTermFieldReader(indexReader *IndexReader, term []byte, field uint16, includeFreq, includeNorm, includeTermVectors bool) (*SmolderingCouchTermFieldReader, error) {
	dictionaryRow := NewDictionaryRow(term, field, 0)
	val, err := indexReader.kvreader.Get(dictionaryRow.Key())
	if err != nil {
		return nil, err
	}
	if val == nil {
		atomic.AddUint64(&indexReader.index.stats.termSearchersStarted, uint64(1))
		return &SmolderingCouchTermFieldReader{
			count:   0,
			term:    term,
			tfrNext: &TermFrequencyRow{},
			field:   field,
		}, nil
	}

	err = dictionaryRow.parseDictionaryV(val)
	if err != nil {
		return nil, err
	}

	tfrk := TermFrequencyRowStart(term, field, []byte{})
	it := indexReader.kvreader.PrefixIterator(tfrk)

	atomic.AddUint64(&indexReader.index.stats.termSearchersStarted, uint64(1))
	return &SmolderingCouchTermFieldReader{
		indexReader: indexReader,
		iterator:    it,
		count:       dictionaryRow.count,
		term:        term,
		tfrNext:     &TermFrequencyRow{},
		field:       field,
	}, nil
}
Example #8
0
func (s *httpServer) mgetHandler(w http.ResponseWriter, req *http.Request) {
	req.ParseForm()
	if len(req.Form["key"]) == 0 {
		http.Error(w, "MISSING_ARG_KEY", 400)
		return
	}
	startTime := time.Now()
	atomic.AddUint64(&s.Requests, 1)
	atomic.AddUint64(&s.MgetRequests, 1)

	w.Header().Set("Content-Type", "text/plain")
	var numFound int
	for _, key := range req.Form["key"] {
		needle := append([]byte(key), s.ctx.db.RecordSeparator)
		line := s.ctx.db.Search(needle)
		if len(line) != 0 {
			numFound += 1
			w.Write(line)
			w.Write([]byte{s.ctx.db.LineEnding})
		}
	}
	if numFound == 0 {
		atomic.AddUint64(&s.MgetMisses, 1)
		w.WriteHeader(200)
	} else {
		atomic.AddUint64(&s.MgetHits, 1)
	}
	s.MgetMetrics.Status(startTime)
}
Example #9
0
func (s *httpServer) getHandler(w http.ResponseWriter, req *http.Request) {
	key := req.FormValue("key")
	if key == "" {
		http.Error(w, "MISSING_ARG_KEY", 400)
		return
	}
	startTime := time.Now()
	atomic.AddUint64(&s.Requests, 1)
	atomic.AddUint64(&s.GetRequests, 1)

	needle := append([]byte(key), s.ctx.db.RecordSeparator)
	line := s.ctx.db.Search(needle)

	if len(line) == 0 {
		atomic.AddUint64(&s.GetMisses, 1)
		http.Error(w, "NOT_FOUND", 404)
	} else {
		// we only output the 'value'
		line = line[len(needle):]
		atomic.AddUint64(&s.GetHits, 1)
		w.Header().Set("Content-Type", "text/plain")
		w.Header().Set("Content-Length", strconv.Itoa(len(line)+1))
		w.Write(line)
		w.Write([]byte{s.ctx.db.LineEnding})
	}
	s.GetMetrics.Status(startTime)
}
Example #10
0
func (pipeline *mk_itemPipeline) Send(item base.MKItem) []error {
	atomic.AddUint64(&pipeline.processingNumber, 1)
	defer atomic.AddUint64(&pipeline.processingNumber, ^uint64(0))

	atomic.AddUint64(&pipeline.sent, 1)
	errs := make([]error, 0)
	if item == nil {
		errs = append(errs, errors.New("无效条目!"))
		return errs
	}

	atomic.AddUint64(&pipeline.accepted, 1)

	var currentItem base.MKItem = item
	for _, itemProcessor := range pipeline.itemProcessors {
		processedItem, err := itemProcessor(currentItem)
		if err != nil {
			errs = append(errs, err)
			if pipeline.failFast {
				break
			}
		}

		if processedItem != nil {
			currentItem = processedItem
		}
	}

	atomic.AddUint64(&pipeline.processed, 1)

	return errs
}
Example #11
0
func (z *zeroSum) monkey(tableID uint32, d time.Duration) {
	r := newRand()
	zipf := z.accountDistribution(r)

	for {
		time.Sleep(time.Duration(rand.Float64() * float64(d)))

		key := keys.MakeTablePrefix(tableID)
		key = encoding.EncodeVarintAscending(key, int64(zipf.Uint64()))
		key = keys.MakeRowSentinelKey(key)

		switch r.Intn(2) {
		case 0:
			if err := z.Split(z.RandNode(r.Intn), key); err != nil {
				if strings.Contains(err.Error(), "range is already split at key") ||
					strings.Contains(err.Error(), storage.ErrMsgConflictUpdatingRangeDesc) {
					continue
				}
				z.maybeLogError(err)
			} else {
				atomic.AddUint64(&z.stats.splits, 1)
			}
		case 1:
			if transferred, err := z.TransferLease(z.RandNode(r.Intn), r, key); err != nil {
				z.maybeLogError(err)
			} else if transferred {
				atomic.AddUint64(&z.stats.transfers, 1)
			}
		}
	}
}
Example #12
0
func (ptree *ptree) applyNode(n *node, adds, deletes []*keyBundle) {
	for _, kb := range deletes {
		if n.keys.len() == 0 {
			break
		}

		deleted := n.keys.delete(kb.key)
		if deleted != nil {
			atomic.AddUint64(&ptree.number, ^uint64(0))
		}
	}

	for _, kb := range adds {
		if n.keys.len() == 0 {
			oldKey, _ := n.keys.insert(kb.key)
			if n.isLeaf && oldKey == nil {
				atomic.AddUint64(&ptree.number, 1)
			}
			if kb.left != nil {
				n.nodes.push(kb.left)
				n.nodes.push(kb.right)
			}
			continue
		}

		oldKey, index := n.keys.insert(kb.key)
		if n.isLeaf && oldKey == nil {
			atomic.AddUint64(&ptree.number, 1)
		}
		if kb.left != nil {
			n.nodes.replaceAt(index, kb.left)
			n.nodes.insertAt(index+1, kb.right)
		}
	}
}
Example #13
0
// A worker connects to one data manager server.
func (d *bucketDataSource) workerStart(server string, workerCh chan []uint16) {
	backoffFactor := d.options.DataManagerBackoffFactor
	if backoffFactor <= 0.0 {
		backoffFactor = DefaultBucketDataSourceOptions.DataManagerBackoffFactor
	}
	sleepInitMS := d.options.DataManagerSleepInitMS
	if sleepInitMS <= 0 {
		sleepInitMS = DefaultBucketDataSourceOptions.DataManagerSleepInitMS
	}
	sleepMaxMS := d.options.DataManagerSleepMaxMS
	if sleepMaxMS <= 0 {
		sleepMaxMS = DefaultBucketDataSourceOptions.DataManagerSleepMaxMS
	}

	// Use exponential backoff loop to handle reconnect retries to the server.
	go func() {
		atomic.AddUint64(&d.stats.TotWorkerStart, 1)

		ExponentialBackoffLoop("cbdatasource.worker-"+server,
			func() int { return d.worker(server, workerCh) },
			sleepInitMS, backoffFactor, sleepMaxMS)

		atomic.AddUint64(&d.stats.TotWorkerDone, 1)
	}()
}
Example #14
0
// mangedLogError will take an error and log it to the host, depending on the
// type of error and whether or not the DEBUG flag has been set.
func (h *Host) managedLogError(err error) {
	// Determine the type of error and the number of times that this error has
	// been logged.
	var num uint64
	var probability int // Error will be logged with 1/probability chance.
	switch err.(type) {
	case ErrorCommunication:
		num = atomic.LoadUint64(&h.atomicCommunicationErrors)
		probability = errorCommunicationProbability
	case ErrorConnection:
		num = atomic.LoadUint64(&h.atomicConnectionErrors)
		probability = errorConnectionProbability
	case ErrorConsensus:
		num = atomic.LoadUint64(&h.atomicConsensusErrors)
		probability = errorConsensusProbability
	case ErrorInternal:
		num = atomic.LoadUint64(&h.atomicInternalErrors)
		probability = errorInternalProbability
	default:
		num = atomic.LoadUint64(&h.atomicNormalErrors)
		probability = errorNormalProbability
	}

	// If num > logFewLimit, substantially decrease the probability that the error
	// gets logged.
	if num > logFewLimit {
		probability = probability * 25
	}

	// If we've seen less than logAllLimit of that type of error before, log
	// the error as a normal logging statement. Otherwise, probabilistically
	// log the statement. In debugging mode, log all statements.
	logged := false
	rand, randErr := crypto.RandIntn(probability + 1)
	if randErr != nil {
		h.log.Critical("random number generation failed")
	}
	if num < logAllLimit || rand == probability {
		logged = true
		h.log.Println(err)
	} else {
		h.log.Debugln(err)
	}

	// If the error was logged, increment the log counter.
	if logged {
		switch err.(type) {
		case ErrorCommunication:
			atomic.AddUint64(&h.atomicCommunicationErrors, 1)
		case ErrorConnection:
			atomic.AddUint64(&h.atomicConnectionErrors, 1)
		case ErrorConsensus:
			atomic.AddUint64(&h.atomicConsensusErrors, 1)
		case ErrorInternal:
			atomic.AddUint64(&h.atomicInternalErrors, 1)
		default:
			atomic.AddUint64(&h.atomicNormalErrors, 1)
		}
	}
}
Example #15
0
// This does the actual send of a buffer, which has already been formatted
// into bytes of ES formatted bulk data
func (b *BulkIndexer) Send(buf *bytes.Buffer) error {
	type responseStruct struct {
		Took   int64                    `json:"took"`
		Errors bool                     `json:"errors"`
		Items  []map[string]interface{} `json:"items"`
	}

	response := responseStruct{}

	body, err := b.conn.DoCommand("POST", fmt.Sprintf("/_bulk?refresh=%t", b.Refresh), nil, buf)

	if err != nil {
		atomic.AddUint64(&b.numErrors, 1)
		return err
	}
	// check for response errors, bulk insert will give 200 OK but then include errors in response
	jsonErr := json.Unmarshal(body, &response)
	if jsonErr == nil {
		if response.Errors {
			atomic.AddUint64(&b.numErrors, uint64(len(response.Items)))
			return fmt.Errorf("Bulk Insertion Error. Failed item count [%d]", len(response.Items))
		}
	}
	return nil
}
Example #16
0
func (this *GatewayClient) Transport() {
	defer this.Close(false)

	var (
		link  = this.owner
		front = link.owner
		pack  = front.pack
		msg   []byte
	)

	for {
		msg = this.conn.ReadInto(msg)

		if msg == nil {
			break
		}

		setUint(msg, pack, len(msg)-pack)

		setUint32(msg[pack:], this.id)

		if front.counterOn {
			atomic.AddUint64(&front.inPack, uint64(1))
			atomic.AddUint64(&front.inByte, uint64(len(msg)))
		}

		link.SendToBackend(msg)
	}
}
Example #17
0
// Write add a new needle, if key exists append to super block, then update
// needle cache offset to new offset, Write is used for multi add needles.
// Get Needle
// Lock
// for {
//   Write
// }
// Unlock
// Free Needle
func (v *Volume) Write(n *Needle) (err error) {
	var (
		ok              bool
		nc              int64
		offset, ooffset uint32
		now             = time.Now().UnixNano()
	)
	offset = v.Block.Offset
	if err = v.Block.Write(n); err == nil {
		if err = v.Indexer.Add(n.Key, offset, n.TotalSize); err == nil {
			nc, ok = v.needles[n.Key]
			v.needles[n.Key] = NeedleCache(offset, n.TotalSize)
		}
	}
	if err != nil {
		return
	}
	if log.V(1) {
		log.Infof("add needle, offset: %d, size: %d", offset, n.TotalSize)
	}
	if ok {
		ooffset, _ = NeedleCacheValue(nc)
		log.Warningf("same key: %d, old offset: %d, new offset: %d", n.Key,
			ooffset, offset)
		err = v.asyncDel(ooffset)
	}
	atomic.AddUint64(&v.Stats.TotalWriteProcessed, 1)
	atomic.AddUint64(&v.Stats.TotalWriteBytes, uint64(n.TotalSize))
	atomic.AddUint64(&v.Stats.TotalWriteDelay, uint64(time.Now().UnixNano()-
		now))
	return
}
Example #18
0
// startHarvester starts a new harvester with the given offset
// In case the HarvesterLimit is reached, an error is returned
func (p *Prospector) startHarvester(state file.State, offset int64) error {

	if p.config.HarvesterLimit > 0 && atomic.LoadUint64(&p.harvesterCounter) >= p.config.HarvesterLimit {
		harvesterSkipped.Add(1)
		return fmt.Errorf("Harvester limit reached.")
	}

	state.Offset = offset
	// Create harvester with state
	h, err := p.createHarvester(state)
	if err != nil {
		return err
	}

	p.wg.Add(1)
	// startHarvester is not run concurrently, but atomic operations are need for the decrementing of the counter
	// inside the following go routine
	atomic.AddUint64(&p.harvesterCounter, 1)
	go func() {
		defer func() {
			atomic.AddUint64(&p.harvesterCounter, ^uint64(0))
			p.wg.Done()
		}()
		// Starts harvester and picks the right type. In case type is not set, set it to defeault (log)
		h.Harvest()
	}()

	return nil
}
Example #19
0
func (sink *WebsocketSink) Run(inputChan <-chan *logmessage.Message) {
	sink.logger.Debugf("Websocket Sink %s: Created for appId [%s]", sink.clientAddress, sink.appId)

	keepAliveFailure := sink.keepAliveFailureChannel()

	buffer := RunTruncatingBuffer(inputChan, sink.wsMessageBufferSize, sink.logger)
	for {
		sink.logger.Debugf("Websocket Sink %s: Waiting for activity", sink.clientAddress)
		select {
		case <-keepAliveFailure:
			sink.logger.Debugf("Websocket Sink %s: No keep keep-alive received. Requesting close.", sink.clientAddress)
			return
		case message, ok := <-buffer.GetOutputChannel():
			if !ok {
				sink.logger.Debugf("Websocket Sink %s: Closed listener channel detected. Closing websocket", sink.clientAddress)
				return
			}
			sink.logger.Debugf("Websocket Sink %s: Got %d bytes. Sending data", sink.clientAddress, message.GetRawMessageLength())
			err := sink.ws.WriteMessage(websocket.BinaryMessage, message.GetRawMessage())
			if err != nil {
				sink.logger.Debugf("Websocket Sink %s: Error when trying to send data to sink %s. Requesting close. Err: %v", sink.clientAddress, err)
				return
			}

			sink.logger.Debugf("Websocket Sink %s: Successfully sent data", sink.clientAddress)
			atomic.AddUint64(&sink.sentMessageCount, 1)
			atomic.AddUint64(&sink.sentByteCount, uint64(message.GetRawMessageLength()))
		}
	}
}
Example #20
0
// observe sends an observation to every observer.
func (r *Raft) observe(o interface{}) {
	// In general observers should not block. But in any case this isn't
	// disastrous as we only hold a read lock, which merely prevents
	// registration / deregistration of observers.
	r.observersLock.RLock()
	defer r.observersLock.RUnlock()
	for _, or := range r.observers {
		// It's wasteful to do this in the loop, but for the common case
		// where there are no observers we won't create any objects.
		ob := Observation{Raft: r, Data: o}
		if or.filter != nil && !or.filter(&ob) {
			continue
		}
		if or.channel == nil {
			continue
		}
		if or.blocking {
			or.channel <- ob
			atomic.AddUint64(&or.numObserved, 1)
		} else {
			select {
			case or.channel <- ob:
				atomic.AddUint64(&or.numObserved, 1)
			default:
				atomic.AddUint64(&or.numDropped, 1)
			}
		}
	}
}
Example #21
0
File: ugen.go Project: mkb218/ugen
// GetNewBuf either returns a recycled buffer or newly allocated buffer with the desired buffer size.
// This should never block.
func GetNewBuf(op OutputParams) (b []float32) {
	recyclers.RLock()
	defer recyclers.RUnlock()
	// pc, filename, line, ok := runtime.Caller(1)
	// var logstr string
	// if ok {
	// f := runtime.FuncForPC(pc)
	// logstr = fmt.Sprintf("%s:%d:%s", filename, line, f.Name())
	// } else {
	// logstr = "???"
	// }
	if c, ok := recyclers.m[op.BufferSize]; ok {
		select {
		case b = <-c:
			// logger.Println("recycled a buf for ",logstr)
		default:
			// logger.Println("alloced new for", logstr)
			atomic.AddUint64(&RecycleStats.Alloced, 1)
			b = make([]float32, op.BufferSize)
		}
	} else {
		// logger.Println("alloced new for", logstr)
		atomic.AddUint64(&RecycleStats.Alloced, 1)
		b = make([]float32, op.BufferSize)
	}
	atomic.AddUint64(&RecycleStats.Issued, 1)
	return
}
Example #22
0
// Get returns existed connection from the pool or creates a new one.
func (p *connPool) Get() (cn *conn, isNew bool, err error) {
	if p.closed() {
		err = errClosed
		return
	}

	atomic.AddUint64(&p.stats.Requests, 1)

	// Fetch first non-idle connection, if available.
	if cn = p.First(); cn != nil {
		return
	}

	// Try to create a new one.
	if p.conns.Reserve() {
		cn, err = p.new()
		if err != nil {
			p.conns.Remove(nil)
			return
		}
		p.conns.Add(cn)
		isNew = true
		return
	}

	// Otherwise, wait for the available connection.
	atomic.AddUint64(&p.stats.Waits, 1)
	if cn = p.wait(); cn != nil {
		return
	}

	atomic.AddUint64(&p.stats.Timeouts, 1)
	err = errPoolTimeout
	return
}
Example #23
0
func (udc *UpsideDownCouch) Delete(id string) error {
	indexStart := time.Now()
	// start a writer for this delete
	kvwriter := udc.store.Writer()
	defer kvwriter.Close()

	// lookup the back index row
	backIndexRow, err := udc.backIndexRowForDoc(kvwriter, id)
	if err != nil {
		atomic.AddUint64(&udc.stats.errors, 1)
		return err
	}
	if backIndexRow == nil {
		atomic.AddUint64(&udc.stats.deletes, 1)
		return nil
	}

	deleteRows := make([]UpsideDownCouchRow, 0)
	deleteRows = udc.deleteSingle(id, backIndexRow, deleteRows)

	err = udc.batchRows(kvwriter, nil, nil, deleteRows)
	if err == nil {
		udc.docCount--
	}
	atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart)))
	if err == nil {
		atomic.AddUint64(&udc.stats.deletes, 1)
	} else {
		atomic.AddUint64(&udc.stats.errors, 1)
	}
	return err
}
Example #24
0
func (downloader *bucketDownloader) downloadKey(key *s3.Key) {
	path := pathForKey(downloader.targetPath, key)

	// Make the dir if it doesn't exist
	dirPath := filepath.Dir(path)
	err := os.MkdirAll(dirPath, 0777)
	if err != nil {
		log.Fatal(err)
	}

	fileWriter, err := os.Create(path)
	defer fileWriter.Close()
	if err != nil {
		log.Fatal(err)
	}

	bucketReader, err := downloader.bucket.GetReader(key.Key)
	defer bucketReader.Close()
	if err != nil {
		log.Fatal(err)
	}

	bytes, err := io.Copy(fileWriter, bucketReader)
	if err != nil {
		log.Fatal(err)
	}

	atomic.AddUint64(&downloader.syncedBytes, uint64(bytes))
	atomic.AddUint64(&downloader.syncedFiles, 1)

	log.Printf("Fetched %v (%d bytes)", key.Key, bytes)
}
Example #25
0
func (base *BaseHandler) emitAndTime(
	metrics []metric.Metric,
	emitFunc func([]metric.Metric) bool,
	callbackChannel chan<- emissionTiming,
) {
	numMetrics := len(metrics)
	beforeEmission := time.Now()
	result := emitFunc(metrics)
	afterEmission := time.Now()

	emissionDuration := afterEmission.Sub(beforeEmission)
	timing := emissionTiming{
		timestamp:   time.Now(),
		duration:    emissionDuration,
		metricsSent: numMetrics,
	}
	base.log.Info(
		fmt.Sprintf("POST of %d metrics to %s took %f seconds",
			numMetrics,
			base.name,
			emissionDuration.Seconds(),
		),
	)
	callbackChannel <- timing

	if result {
		atomic.AddUint64(&base.metricsSent, uint64(numMetrics))
	} else {
		atomic.AddUint64(&base.metricsDropped, uint64(numMetrics))
	}
}
Example #26
0
func (c *L2Cache) GetItem(key string, fback api.GetterFunc) (interface{}, error) {
	var value interface{}
	if value, found := c.sm.Find(key); found {
		atomic.AddUint64(&c.cntHit, 1)
		return value, nil
	}
	var err error
	c.sm.AtomicWait(func(m api.Mapper) {
		atomic.AddUint64(&c.cntHit, 1)
		m.SetKey(key)
		value = m.Value()
		if value != nil {
			return
		}
		c.cntMissed++
		value, err = fback(key)
		if err != nil {
			return
		}
		if len(c.order) == c.maxItems {
			c.cntGcHit++
			m.SetKey(c.order[0])
			m.Delete()
			c.order = c.order[1:]
			m.SetKey(key)
		}
		c.order = append(c.order, key)
		m.Update(value)
		c.cntInsert++
	})
	if err != nil {
		return nil, err
	}
	return value, nil
}
Example #27
0
func (nr *NetworkReader) Start() {
	connection, err := net.ListenPacket("udp4", nr.host)
	if err != nil {
		nr.logger.Fatalf("Failed to listen on port. %s", err)
	}
	nr.logger.Infof("Listening on port %s", nr.host)
	nr.lock.Lock()
	nr.connection = connection
	nr.lock.Unlock()

	readBuffer := make([]byte, 65535) //buffer with size = max theoretical UDP size
	for {
		readCount, senderAddr, err := connection.ReadFrom(readBuffer)
		if err != nil {
			nr.logger.Debugf("Error while reading. %s", err)
			return
		}
		nr.logger.Debugf("NetworkReader: Read %d bytes from address %s", readCount, senderAddr)
		readData := make([]byte, readCount) //pass on buffer in size only of read data
		copy(readData, readBuffer[:readCount])

		atomic.AddUint64(&nr.receivedMessageCount, 1)
		atomic.AddUint64(&nr.receivedByteCount, uint64(readCount))
		metrics.BatchIncrementCounter(nr.contextName + ".receivedMessageCount")
		metrics.BatchAddCounter(nr.contextName+".receivedByteCount", uint64(readCount))
		nr.writer.Write(readData)
	}
}
Example #28
0
// Handles incoming requests.
func handleRequest(conn net.Conn) {
	defer func() {
		conn.Close()
		atomic.AddInt32(&clients, -1)
	}()

	atomic.AddInt32(&clients, 1)

	// Make a buffer to hold incoming data.
	buf := memPool.Get().([]byte)
	for {
		// Read the incoming connection into the buffer.
		n, err := conn.Read(buf)
		if err != nil {
			if err != io.EOF {
				log.Println("Error reading:", err.Error())
			}

			return
		}
		atomic.AddUint64(&bytesRecved, uint64(n))

		// Send a response back to person contacting us.
		n, _ = conn.Write([]byte("Message received."))
		atomic.AddUint64(&bytesSent, uint64(n))
	}

}
Example #29
0
func (c *countingWriter) Write(bs []byte) (int, error) {
	n, err := c.Writer.Write(bs)
	atomic.AddUint64(&c.tot, uint64(n))
	atomic.AddUint64(&totalOutgoing, uint64(n))
	atomic.StoreInt64(&c.last, time.Now().UnixNano())
	return n, err
}
Example #30
0
func getNote(response http.ResponseWriter, request *http.Request) {
	parts := notePathRegexp.FindStringSubmatch(request.URL.Path)

	id := parts[1]

	buf := make([]byte, mainStore.MaxSecretSize)
	defer zeroBuffer(buf)

	nRead, code, err := mainStore.Retrieve(id, buf)

	if err == store.SecretAlreadyAccessed {
		atomic.AddUint64(&noteAlreadyOpenedRequestCount, 1)
		response.WriteHeader(http.StatusForbidden) // 403
		return
	} else if err == store.SecretExpired {
		atomic.AddUint64(&noteExpiredRequestCount, 1)
		response.WriteHeader(http.StatusGone) // 410
		return
	} else if err == store.SecretNotFound {
		atomic.AddUint64(&noteNotFoundCount, 1)
		response.WriteHeader(http.StatusNotFound) // 404
		return
	} else if err != nil {
		response.WriteHeader(http.StatusInternalServerError) // 500
		log.Print("Returning 500:", err)
		return
	}

	atomic.AddUint64(&notesOpenedCount, 1)
	response.Header().Set("Content-Type", "application/octet-stream")
	response.Header().Set("X-Note-Code", code)
	response.WriteHeader(http.StatusOK) // 200
	response.Write(buf[:nRead])
	zeroResponseBuffer(response)
}