Exemplo n.º 1
0
// Updates the age statistic
func (c *Cache) UpdateAge() {
	c.mu.RLock()
	defer c.mu.RUnlock()
	ageStat := new(expvar.Int)
	ageStat.Set(int64(time.Now().Sub(c.lastSnapshot) / time.Millisecond))
	c.statMap.Set(statCacheAgeMs, ageStat)
}
Exemplo n.º 2
0
func (l *WAL) Remove(files []string) error {
	l.mu.Lock()
	defer l.mu.Unlock()
	for _, fn := range files {
		os.RemoveAll(fn)
	}

	// Refresh the on-disk size stats
	segments, err := segmentFileNames(l.path)
	if err != nil {
		return err
	}

	var totalOldDiskSize int64
	for _, seg := range segments {
		stat, err := os.Stat(seg)
		if err != nil {
			return err
		}

		totalOldDiskSize += stat.Size()
	}
	sizeStat := new(expvar.Int)
	sizeStat.Set(totalOldDiskSize)
	l.statMap.Set(statWALOldBytes, sizeStat)

	return nil
}
Exemplo n.º 3
0
func determineProperties(sizes []int) *expvar.Map {
	var sum int
	props := new(expvar.Map).Init()
	for _, n := range sizes {
		sum += n
	}
	// Determine properties
	sort.Ints(sizes)
	if len(sizes) > 0 {
		summary := map[string]int{
			"min":    sizes[0],
			"max":    sizes[len(sizes)-1],
			"length": len(sizes),
			"sum":    sum,
			"95e":    sizes[int(float64(len(sizes))*0.95)],
		}
		mean := float64(sum) / float64(summary["length"])

		// Pack them into an expvar Map
		for k, v := range summary {
			n := new(expvar.Int)
			n.Set(int64(v))
			props.Set(k, n)
		}
		avge := new(expvar.Float)
		avge.Set(mean)
		props.Set("avg", avge)
	}

	return props
}
Exemplo n.º 4
0
func (g *goroutineTracker) recordSnapshot() {

	g.mutex.Lock()
	defer g.mutex.Unlock()

	// get number of goroutines
	numGoroutines := uint64(runtime.NumGoroutine())

	// bump the high water mark
	if numGoroutines > g.HighWaterMark {
		g.HighWaterMark = numGoroutines

		varInt := expvar.Int{}
		varInt.Set(int64(numGoroutines))
		base.StatsExpvars.Set("goroutines_highWaterMark", &varInt)
	}

	// append to history
	g.Snapshots = append(g.Snapshots, numGoroutines)

	// drop the oldest one if we've gone over the max
	if len(g.Snapshots) > kMaxGoroutineSnapshots {
		g.Snapshots = g.Snapshots[1:]
	}

}
Exemplo n.º 5
0
// Open opens and initializes the Log. Will recover from previous unclosed shutdowns
func (l *WAL) Open() error {
	l.mu.Lock()
	defer l.mu.Unlock()

	if l.LoggingEnabled {
		l.logger.Printf("tsm1 WAL starting with %d segment size\n", l.SegmentSize)
		l.logger.Printf("tsm1 WAL writing to %s\n", l.path)
	}
	if err := os.MkdirAll(l.path, 0777); err != nil {
		return err
	}

	segments, err := segmentFileNames(l.path)
	if err != nil {
		return err
	}

	if len(segments) > 0 {
		lastSegment := segments[len(segments)-1]
		id, err := idFromFileName(lastSegment)
		if err != nil {
			return err
		}

		l.currentSegmentID = id
		stat, err := os.Stat(lastSegment)
		if err != nil {
			return err
		}

		if stat.Size() == 0 {
			os.Remove(lastSegment)
			segments = segments[:len(segments)-1]
		}
		if err := l.newSegmentFile(); err != nil {
			return err
		}
	}

	var totalOldDiskSize int64
	for _, seg := range segments {
		stat, err := os.Stat(seg)
		if err != nil {
			return err
		}

		totalOldDiskSize += stat.Size()
	}
	sizeStat := new(expvar.Int)
	sizeStat.Set(totalOldDiskSize)
	l.statMap.Set(statWALOldBytes, sizeStat)

	l.closing = make(chan struct{})

	l.lastWriteTime = time.Now()

	return nil
}
Exemplo n.º 6
0
// Update the snapshotsCount and the diskSize levels
func (c *Cache) updateSnapshots() {
	// Update disk stats
	diskSizeStat := new(expvar.Int)
	diskSizeStat.Set(int64(c.snapshotSize))
	c.statMap.Set(statCacheDiskBytes, diskSizeStat)

	snapshotsStat := new(expvar.Int)
	snapshotsStat.Set(int64(c.snapshotAttempts))
	c.statMap.Set(statSnapshots, snapshotsStat)
}
Exemplo n.º 7
0
func TestDuplicatePublish(t *testing.T) {
	defer reset()
	num1 := new(expvar.Int)
	num1.Set(10)
	Publish("number", num1)
	num2 := new(expvar.Int)
	num2.Set(20)
	Publish("number", num2)
	if g := Get("number").String(); g != "20" {
		t.Errorf("number str = %s, want %s", g, "20")
	}
}
Exemplo n.º 8
0
// AddCount metrics
func AddCount(name string) {
	updLastTime(name, time.Now())

	var counter *ratecounter.RateCounter
	counter, ok := counters[name]
	if !ok {
		counter = ratecounter.NewRateCounter(1 * time.Minute)
		counters[name] = counter
	}

	var request *expvar.Int
	request, ok = requests[name]
	if !ok {
		request = expvar.NewInt(name)
		requests[name] = request
	}
	counter.Incr(1)
	request.Set(counter.Rate())
}
Exemplo n.º 9
0
func (l *WAL) writeToLog(entry WALEntry) (int, error) {
	// encode and compress the entry while we're not locked
	bytes := getBuf(walEncodeBufSize)
	defer putBuf(bytes)

	b, err := entry.Encode(bytes)
	if err != nil {
		return -1, err
	}

	encBuf := getBuf(snappy.MaxEncodedLen(len(b)))
	defer putBuf(encBuf)
	compressed := snappy.Encode(encBuf, b)

	l.mu.Lock()
	defer l.mu.Unlock()

	// Make sure the log has not been closed
	select {
	case <-l.closing:
		return -1, ErrWALClosed
	default:
	}

	// roll the segment file if needed
	if err := l.rollSegment(); err != nil {
		return -1, fmt.Errorf("error rolling WAL segment: %v", err)
	}

	// write and sync
	if err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil {
		return -1, fmt.Errorf("error writing WAL entry: %v", err)
	}

	// Update stats for current segment size
	curSize := new(expvar.Int)
	curSize.Set(int64(l.currentSegmentWriter.size))
	l.statMap.Set(statWALCurrentBytes, curSize)

	l.lastWriteTime = time.Now()

	return l.currentSegmentID, l.currentSegmentWriter.sync()
}
Exemplo n.º 10
0
func (s *Shard) monitorSize() {
	t := time.NewTicker(monitorStatInterval)
	defer t.Stop()
	for {
		select {
		case <-s.closing:
			return
		case <-t.C:
			size, err := s.DiskSize()
			if err != nil {
				s.logger.Printf("Error collecting shard size: %v", err)
				continue
			}
			sizeStat := new(expvar.Int)
			sizeStat.Set(size)
			s.statMap.Set(statDiskBytes, sizeStat)
		}
	}
}
Exemplo n.º 11
0
// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log
func (l *WAL) newSegmentFile() error {
	l.currentSegmentID++
	if l.currentSegmentWriter != nil {
		if err := l.currentSegmentWriter.close(); err != nil {
			return err
		}
		l.statMap.Add(statWALOldBytes, int64(l.currentSegmentWriter.size))
	}

	fileName := filepath.Join(l.path, fmt.Sprintf("%s%05d.%s", WALFilePrefix, l.currentSegmentID, WALFileExtension))
	fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
	if err != nil {
		return err
	}
	l.currentSegmentWriter = NewWALSegmentWriter(fd)

	// Reset the current segment size stat
	curSize := new(expvar.Int)
	curSize.Set(0)
	l.statMap.Set(statWALCurrentBytes, curSize)

	return nil
}
Exemplo n.º 12
0
func (r *routingTable) reachableNodes() (tbl map[string][]byte) {
	tbl = make(map[string][]byte)
	for addr, r := range r.addresses {
		if addr == "" {
			log.V(3).Infof("reachableNodes: found empty address for node %x.", r.id)
			continue
		}
		if r.reachable && len(r.id) == 20 {
			tbl[addr] = []byte(r.id)
		}
	}

	hexId := fmt.Sprintf("%x", r.nodeId)
	// This creates a new expvar everytime, but the alternative is too
	// bothersome (get the current value, type cast it, ensure it
	// exists..). Also I'm not using NewInt because I don't want to publish
	// the value.
	v := new(expvar.Int)
	v.Set(int64(len(tbl)))
	reachableNodes.Set(hexId, v)
	return

}
Exemplo n.º 13
0
func (f *FileStore) Replace(oldFiles, newFiles []string) error {
	f.mu.Lock()
	defer f.mu.Unlock()

	f.lastModified = time.Now()

	// Copy the current set of active files while we rename
	// and load the new files.  We copy the pointers here to minimize
	// the time that locks are held as well as to ensure that the replacement
	// is atomic.©
	var updated []TSMFile
	for _, t := range f.files {
		updated = append(updated, t)
	}

	// Rename all the new files to make them live on restart
	for _, file := range newFiles {
		var newName = file
		if strings.HasSuffix(file, ".tmp") {
			// The new TSM files have a tmp extension.  First rename them.
			newName = file[:len(file)-4]
			if err := os.Rename(file, newName); err != nil {
				return err
			}
		}

		fd, err := os.Open(newName)
		if err != nil {
			return err
		}

		tsm, err := NewTSMReader(fd)
		if err != nil {
			return err
		}
		updated = append(updated, tsm)
	}

	// We need to prune our set of active files now
	var active []TSMFile
	for _, file := range updated {
		keep := true
		for _, remove := range oldFiles {
			if remove == file.Path() {
				keep = false
				if err := file.Close(); err != nil {
					return err
				}

				if err := file.Remove(); err != nil {
					return err
				}
				break
			}
		}

		if keep {
			active = append(active, file)
		}
	}

	if err := syncDir(f.dir); err != nil {
		return err
	}

	f.files = active
	sort.Sort(tsmReaders(f.files))

	// Recalculate the disk size stat
	var totalSize int64
	for _, file := range f.files {
		totalSize += int64(file.Size())
	}
	sizeStat := new(expvar.Int)
	sizeStat.Set(totalSize)
	f.statMap.Set(statFileStoreBytes, sizeStat)

	return nil
}
Exemplo n.º 14
0
func ExampleExpvarCollector() {
	expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
		"memstats": prometheus.NewDesc(
			"expvar_memstats",
			"All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
			[]string{"type"}, nil,
		),
		"lone-int": prometheus.NewDesc(
			"expvar_lone_int",
			"Just an expvar int as an example.",
			nil, nil,
		),
		"http-request-map": prometheus.NewDesc(
			"expvar_http_request_total",
			"How many http requests processed, partitioned by status code and http method.",
			[]string{"code", "method"}, nil,
		),
	})
	prometheus.MustRegister(expvarCollector)

	// The Prometheus part is done here. But to show that this example is
	// doing anything, we have to manually export something via expvar.  In
	// real-life use-cases, some library would already have exported via
	// expvar what we want to re-export as Prometheus metrics.
	expvar.NewInt("lone-int").Set(42)
	expvarMap := expvar.NewMap("http-request-map")
	var (
		expvarMap1, expvarMap2                             expvar.Map
		expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
	)
	expvarMap1.Init()
	expvarMap2.Init()
	expvarInt11.Set(3)
	expvarInt12.Set(13)
	expvarInt21.Set(11)
	expvarInt22.Set(212)
	expvarMap1.Set("POST", &expvarInt11)
	expvarMap1.Set("GET", &expvarInt12)
	expvarMap2.Set("POST", &expvarInt21)
	expvarMap2.Set("GET", &expvarInt22)
	expvarMap.Set("404", &expvarMap1)
	expvarMap.Set("200", &expvarMap2)
	// Results in the following expvar map:
	// "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}

	// Let's see what the scrape would yield, but exclude the memstats metrics.
	metricStrings := []string{}
	metric := dto.Metric{}
	metricChan := make(chan prometheus.Metric)
	go func() {
		expvarCollector.Collect(metricChan)
		close(metricChan)
	}()
	for m := range metricChan {
		if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
			metric.Reset()
			m.Write(&metric)
			metricStrings = append(metricStrings, metric.String())
		}
	}
	sort.Strings(metricStrings)
	for _, s := range metricStrings {
		fmt.Println(strings.TrimRight(s, " "))
	}
	// Output:
	// label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
	// label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
	// label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
	// label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
	// untyped:<value:42 >
}