Beispiel #1
0
// GetTotalMemory returns either the total system memory or if possible the
// cgroups available memory.
func GetTotalMemory() (int64, error) {
	mem := gosigar.Mem{}
	if err := mem.Get(); err != nil {
		return 0, err
	}
	if mem.Total > math.MaxInt64 {
		return 0, fmt.Errorf("inferred memory size %s exceeds maximum supported memory size %s",
			humanize.IBytes(mem.Total), humanize.Bytes(math.MaxInt64))
	}
	totalMem := int64(mem.Total)
	if runtime.GOOS == "linux" {
		var err error
		var buf []byte
		if buf, err = ioutil.ReadFile(defaultCGroupMemPath); err != nil {
			if log.V(1) {
				log.Infof(context.TODO(), "can't read available memory from cgroups (%s), using system memory %s instead", err,
					humanizeutil.IBytes(totalMem))
			}
			return totalMem, nil
		}
		var cgAvlMem uint64
		if cgAvlMem, err = strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 64); err != nil {
			if log.V(1) {
				log.Infof(context.TODO(), "can't parse available memory from cgroups (%s), using system memory %s instead", err,
					humanizeutil.IBytes(totalMem))
			}
			return totalMem, nil
		}
		if cgAvlMem > math.MaxInt64 {
			if log.V(1) {
				log.Infof(context.TODO(), "available memory from cgroups is too large and unsupported %s using system memory %s instead",
					humanize.IBytes(cgAvlMem), humanizeutil.IBytes(totalMem))

			}
			return totalMem, nil
		}
		if cgAvlMem > mem.Total {
			if log.V(1) {
				log.Infof(context.TODO(), "available memory from cgroups %s exceeds system memory %s, using system memory",
					humanize.IBytes(cgAvlMem), humanizeutil.IBytes(totalMem))
			}
			return totalMem, nil
		}

		return int64(cgAvlMem), nil
	}
	return totalMem, nil
}
Beispiel #2
0
// Stop completes a monitoring region.
func (mm *MemoryMonitor) Stop(ctx context.Context) {
	// NB: No need to lock mm.mu here, when StopMonitor() is called the
	// monitor is not shared any more.
	if log.V(1) {
		log.InfofDepth(ctx, 1, "%s, memory usage max %s",
			mm.name,
			humanizeutil.IBytes(mm.mu.maxAllocated))
	}

	if mm.mu.curAllocated != 0 {
		panic(fmt.Sprintf("%s: unexpected leftover memory: %d bytes",
			mm.name,
			mm.mu.curAllocated))
	}

	mm.releaseBudget(ctx)

	if mm.maxBytesHist != nil && mm.mu.maxAllocated > 0 {
		// TODO(knz) We record the logarithm because the UI doesn't know
		// how to do logarithmic y-axes yet. See the explanatory comments
		// in sql/mem_metrics.go.
		val := int64(1000 * math.Log(float64(mm.mu.maxAllocated)) / math.Ln10)
		mm.maxBytesHist.RecordValue(val)
	}

	// Disable the pool for further allocations, so that further
	// uses outside of monitor control get errors.
	mm.pool = nil

	// Release the reserved budget to its original pool, if any.
	mm.reserved.Close()
}
Beispiel #3
0
// String returns a fully parsable version of the store spec.
func (ss StoreSpec) String() string {
	var buffer bytes.Buffer
	if len(ss.Path) != 0 {
		fmt.Fprintf(&buffer, "path=%s,", ss.Path)
	}
	if ss.InMemory {
		fmt.Fprint(&buffer, "type=mem,")
	}
	if ss.SizeInBytes > 0 {
		fmt.Fprintf(&buffer, "size=%s,", humanizeutil.IBytes(ss.SizeInBytes))
	}
	if ss.SizePercent > 0 {
		fmt.Fprintf(&buffer, "size=%s%%,", humanize.Ftoa(ss.SizePercent))
	}
	if len(ss.Attributes.Attrs) > 0 {
		fmt.Fprint(&buffer, "attrs=")
		for i, attr := range ss.Attributes.Attrs {
			if i != 0 {
				fmt.Fprint(&buffer, ":")
			}
			fmt.Fprintf(&buffer, attr)
		}
		fmt.Fprintf(&buffer, ",")
	}
	// Trim the extra comma from the end if it exists.
	if l := buffer.Len(); l > 0 {
		buffer.Truncate(l - 1)
	}
	return buffer.String()
}
Beispiel #4
0
// Start begins a monitoring region.
// Arguments:
// - pool is the upstream memory monitor that provision allocations
//   exceeding the pre-reserved budget. If pool is nil, no upstream
//   allocations are possible and the pre-reserved budget determines the
//   entire capacity of this monitor.
//
// - reserved is the pre-reserved budget (see above).
func (mm *MemoryMonitor) Start(ctx context.Context, pool *MemoryMonitor, reserved BoundAccount) {
	if mm.mu.curAllocated != 0 {
		panic(fmt.Sprintf("%s: started with %d bytes left over", mm.name, mm.mu.curAllocated))
	}
	if mm.pool != nil {
		panic(fmt.Sprintf("%s: already started with pool %s", mm.name, mm.pool.name))
	}
	mm.pool = pool
	mm.mu.curAllocated = 0
	mm.mu.maxAllocated = 0
	mm.mu.curBudget.curAllocated = 0
	mm.reserved = reserved
	if log.V(2) {
		poolname := "(none)"
		if pool != nil {
			poolname = pool.name
		}
		log.InfofDepth(ctx, 1, "%s: starting monitor, reserved %s, pool %s",
			mm.name,
			humanizeutil.IBytes(mm.reserved.curAllocated),
			poolname)
	}
}
Beispiel #5
0
// reserveMemory declares an allocation to this monitor. An error is
// returned if the allocation is denied.
func (mm *MemoryMonitor) reserveMemory(ctx context.Context, x int64) error {
	mm.mu.Lock()
	defer mm.mu.Unlock()
	if mm.mu.curAllocated > mm.mu.curBudget.curAllocated+mm.reserved.curAllocated-x {
		if err := mm.increaseBudget(ctx, x); err != nil {
			return err
		}
	}
	mm.mu.curAllocated += x
	if mm.curBytesCount != nil {
		mm.curBytesCount.Inc(x)
	}
	if mm.mu.maxAllocated < mm.mu.curAllocated {
		mm.mu.maxAllocated = mm.mu.curAllocated
	}

	// Report "large" queries to the log for further investigation.
	if mm.mu.curAllocated > mm.noteworthyUsageBytes {
		// We only report changes in binary magnitude of the size.  This
		// is to limit the amount of log messages when a size blowup is
		// caused by many small allocations.
		if util.RoundUpPowerOfTwo(mm.mu.curAllocated) != util.RoundUpPowerOfTwo(mm.mu.curAllocated-x) {
			log.Infof(ctx, "%s: memory usage increases to %s (+%d)",
				mm.name,
				humanizeutil.IBytes(mm.mu.curAllocated), x)
		}
	}

	if log.V(2) {
		// We avoid VEventf here because we want to avoid computing the
		// trace string if there is nothing to log.
		log.Infof(ctx, "%s: now at %d bytes (+%d) - %s",
			mm.name, mm.mu.curAllocated, x, util.GetSmallTrace(3))
	}
	return nil
}
Beispiel #6
0
// CreateEngines creates Engines based on the specs in ctx.Stores.
func (cfg *Config) CreateEngines() (Engines, error) {
	engines := Engines(nil)
	defer engines.Close()

	if cfg.enginesCreated {
		return Engines{}, errors.Errorf("engines already created")
	}
	cfg.enginesCreated = true

	cache := engine.NewRocksDBCache(cfg.CacheSize)
	defer cache.Release()

	var physicalStores int
	for _, spec := range cfg.Stores.Specs {
		if !spec.InMemory {
			physicalStores++
		}
	}
	openFileLimitPerStore, err := setOpenFileLimit(physicalStores)
	if err != nil {
		return Engines{}, err
	}

	skipSizeCheck := cfg.TestingKnobs.Store != nil &&
		cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs).SkipMinSizeCheck
	for _, spec := range cfg.Stores.Specs {
		var sizeInBytes = spec.SizeInBytes
		if spec.InMemory {
			if spec.SizePercent > 0 {
				sysMem, err := GetTotalMemory()
				if err != nil {
					return Engines{}, errors.Errorf("could not retrieve system memory")
				}
				sizeInBytes = int64(float64(sysMem) * spec.SizePercent / 100)
			}
			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
				return Engines{}, errors.Errorf("%f%% of memory is only %s bytes, which is below the minimum requirement of %s",
					spec.SizePercent, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
			}
			engines = append(engines, engine.NewInMem(spec.Attributes, sizeInBytes))
		} else {
			if spec.SizePercent > 0 {
				fileSystemUsage := gosigar.FileSystemUsage{}
				if err := fileSystemUsage.Get(spec.Path); err != nil {
					return Engines{}, err
				}
				sizeInBytes = int64(float64(fileSystemUsage.Total) * spec.SizePercent / 100)
			}
			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
				return Engines{}, errors.Errorf("%f%% of %s's total free space is only %s bytes, which is below the minimum requirement of %s",
					spec.SizePercent, spec.Path, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
			}

			eng, err := engine.NewRocksDB(
				spec.Attributes,
				spec.Path,
				cache,
				sizeInBytes,
				openFileLimitPerStore,
			)
			if err != nil {
				return Engines{}, err
			}
			engines = append(engines, eng)
		}
	}

	if len(engines) == 1 {
		log.Infof(context.TODO(), "1 storage engine initialized")
	} else {
		log.Infof(context.TODO(), "%d storage engines initialized", len(engines))
	}
	enginesCopy := engines
	engines = nil
	return enginesCopy, nil
}
Beispiel #7
0
// Capacity queries the underlying file system for disk capacity information.
func (r *RocksDB) Capacity() (roachpb.StoreCapacity, error) {
	fileSystemUsage := gosigar.FileSystemUsage{}
	dir := r.dir
	if dir == "" {
		// This is an in-memory instance. Pretend we're empty since we
		// don't know better and only use this for testing. Using any
		// part of the actual file system here can throw off allocator
		// rebalancing in a hard-to-trace manner. See #7050.
		return roachpb.StoreCapacity{
			Capacity:  r.maxSize,
			Available: r.maxSize,
		}, nil
	}
	if err := fileSystemUsage.Get(dir); err != nil {
		return roachpb.StoreCapacity{}, err
	}

	if fileSystemUsage.Total > math.MaxInt64 {
		return roachpb.StoreCapacity{}, fmt.Errorf("unsupported disk size %s, max supported size is %s",
			humanize.IBytes(fileSystemUsage.Total), humanizeutil.IBytes(math.MaxInt64))
	}
	if fileSystemUsage.Avail > math.MaxInt64 {
		return roachpb.StoreCapacity{}, fmt.Errorf("unsupported disk size %s, max supported size is %s",
			humanize.IBytes(fileSystemUsage.Avail), humanizeutil.IBytes(math.MaxInt64))
	}
	fsuTotal := int64(fileSystemUsage.Total)
	fsuAvail := int64(fileSystemUsage.Avail)

	// If no size limitation have been placed on the store size or if the
	// limitation is greater than what's available, just return the actual
	// totals.
	if r.maxSize == 0 || r.maxSize >= fsuTotal || r.dir == "" {
		return roachpb.StoreCapacity{
			Capacity:  fsuTotal,
			Available: fsuAvail,
		}, nil
	}

	// Find the total size of all the files in the r.dir and all its
	// subdirectories.
	var totalUsedBytes int64
	if errOuter := filepath.Walk(r.dir, func(path string, info os.FileInfo, err error) error {
		if err != nil {
			return nil
		}
		if info.Mode().IsRegular() {
			totalUsedBytes += info.Size()
		}
		return nil
	}); errOuter != nil {
		return roachpb.StoreCapacity{}, errOuter
	}

	available := r.maxSize - totalUsedBytes
	if available > fsuAvail {
		available = fsuAvail
	}
	if available < 0 {
		available = 0
	}

	return roachpb.StoreCapacity{
		Capacity:  r.maxSize,
		Available: available,
	}, nil
}
Beispiel #8
0
// newStoreSpec parses the string passed into a --store flag and returns a
// StoreSpec if it is correctly parsed.
// There are four possible fields that can be passed in, comma separated:
// - path=xxx The directory in which to the rocks db instance should be
//   located, required unless using a in memory storage.
// - type=mem This specifies that the store is an in memory storage instead of
//   an on disk one. mem is currently the only other type available.
// - size=xxx The optional maximum size of the storage. This can be in one of a
//   few different formats.
//   - 10000000000     -> 10000000000 bytes
//   - 20GB            -> 20000000000 bytes
//   - 20GiB           -> 21474836480 bytes
//   - 0.02TiB         -> 21474836480 bytes
//   - 20%             -> 20% of the available space
//   - 0.2             -> 20% of the available space
// - attrs=xxx:yyy:zzz A colon separated list of optional attributes.
// Note that commas are forbidden within any field name or value.
func newStoreSpec(value string) (StoreSpec, error) {
	if len(value) == 0 {
		return StoreSpec{}, fmt.Errorf("no value specified")
	}
	var ss StoreSpec
	used := make(map[string]struct{})
	for _, split := range strings.Split(value, ",") {
		if len(split) == 0 {
			continue
		}
		subSplits := strings.SplitN(split, "=", 2)
		var field string
		var value string
		if len(subSplits) == 1 {
			field = "path"
			value = subSplits[0]
		} else {
			field = strings.ToLower(subSplits[0])
			value = subSplits[1]
		}
		if _, ok := used[field]; ok {
			return StoreSpec{}, fmt.Errorf("%s field was used twice in store definition", field)
		}
		used[field] = struct{}{}

		if len(field) == 0 {
			continue
		}
		if len(value) == 0 {
			return StoreSpec{}, fmt.Errorf("no value specified for %s", field)
		}

		switch field {
		case "path":
			if len(value) == 0 {

			}
			ss.Path = value
		case "size":
			if len(value) == 0 {
				return StoreSpec{}, fmt.Errorf("no size specified")
			}

			if unicode.IsDigit(rune(value[len(value)-1])) &&
				(strings.HasPrefix(value, "0.") || strings.HasPrefix(value, ".")) {
				// Value is a percentage without % sign.
				var err error
				ss.SizePercent, err = strconv.ParseFloat(value, 64)
				ss.SizePercent *= 100
				if err != nil {
					return StoreSpec{}, fmt.Errorf("could not parse store size (%s) %s", value, err)
				}
				if ss.SizePercent > 100 || ss.SizePercent < 1 {
					return StoreSpec{}, fmt.Errorf("store size (%s) must be between 1%% and 100%%", value)
				}
			} else if strings.HasSuffix(value, "%") {
				// Value is a percentage.
				var err error
				ss.SizePercent, err = strconv.ParseFloat(value[:len(value)-1], 64)
				if err != nil {
					return StoreSpec{}, fmt.Errorf("could not parse store size (%s) %s", value, err)
				}
				if ss.SizePercent > 100 || ss.SizePercent < 1 {
					return StoreSpec{}, fmt.Errorf("store size (%s) must be between 1%% and 100%%", value)
				}
			} else {
				var err error
				ss.SizeInBytes, err = humanizeutil.ParseBytes(value)
				if err != nil {
					return StoreSpec{}, fmt.Errorf("could not parse store size (%s) %s", value, err)
				}
				if ss.SizeInBytes < MinimumStoreSize {
					return StoreSpec{}, fmt.Errorf("store size (%s) must be larger than %s", value,
						humanizeutil.IBytes(MinimumStoreSize))
				}
			}
		case "attrs":
			if len(value) == 0 {
				return StoreSpec{}, fmt.Errorf("no attributes specified")
			}
			// Check to make sure there are no duplicate attributes.
			attrMap := make(map[string]struct{})
			for _, attribute := range strings.Split(value, ":") {
				if _, ok := attrMap[attribute]; ok {
					return StoreSpec{}, fmt.Errorf("duplicate attribute given for store: %s", attribute)
				}
				attrMap[attribute] = struct{}{}
			}
			for attribute := range attrMap {
				ss.Attributes.Attrs = append(ss.Attributes.Attrs, attribute)
			}
			sort.Strings(ss.Attributes.Attrs)
		case "type":
			if value == "mem" {
				ss.InMemory = true
			} else {
				return StoreSpec{}, fmt.Errorf("%s is not a valid store type", value)
			}
		default:
			return StoreSpec{}, fmt.Errorf("%s is not a valid store field", field)
		}
	}
	if ss.InMemory {
		// Only in memory stores don't need a path and require a size.
		if ss.Path != "" {
			return StoreSpec{}, fmt.Errorf("path specified for in memory store")
		}
		if ss.SizePercent == 0 && ss.SizeInBytes == 0 {
			return StoreSpec{}, fmt.Errorf("size must be specified for an in memory store")
		}
	} else if ss.Path == "" {
		return StoreSpec{}, fmt.Errorf("no path specified")
	}
	return ss, nil
}
Beispiel #9
0
// TestHumanizeBytes verifies both IBytes and ParseBytes.
func TestBytes(t *testing.T) {
	defer leaktest.AfterTest(t)()

	testCases := []struct {
		value       int64
		exp         string
		expNeg      string
		parseExp    int64
		parseErr    string
		parseErrNeg string
	}{
		{0, "0 B", "0 B", 0, "", ""},
		{1024, "1.0 KiB", "-1.0 KiB", 1024, "", ""},
		{1024 << 10, "1.0 MiB", "-1.0 MiB", 1024 << 10, "", ""},
		{1024 << 20, "1.0 GiB", "-1.0 GiB", 1024 << 20, "", ""},
		{1024 << 30, "1.0 TiB", "-1.0 TiB", 1024 << 30, "", ""},
		{1024 << 40, "1.0 PiB", "-1.0 PiB", 1024 << 40, "", ""},
		{1024 << 50, "1.0 EiB", "-1.0 EiB", 1024 << 50, "", ""},
		{int64(math.MaxInt64), "8.0 EiB", "-8.0 EiB", 0, "too large: 8.0 EiB", "too large: -8.0 EiB"},
	}

	for i, testCase := range testCases {
		// Test IBytes.
		if actual := humanizeutil.IBytes(testCase.value); actual != testCase.exp {
			t.Errorf("%d: IBytes(%d) actual:%s does not match expected:%s", i, testCase.value, actual, testCase.exp)
		}
		// Test negative IBytes.
		if actual := humanizeutil.IBytes(-testCase.value); actual != testCase.expNeg {
			t.Errorf("%d: IBytes(%d) actual:%s does not match expected:%s", i, -testCase.value, actual,
				testCase.expNeg)
		}
		// Test ParseBytes.
		if actual, err := humanizeutil.ParseBytes(testCase.exp); err != nil {
			if len(testCase.parseErr) > 0 {
				if testCase.parseErr != err.Error() {
					t.Errorf("%d: ParseBytes(%s) caused an incorrect error actual:%s, expected:%s", i, testCase.exp,
						err, testCase.parseErr)
				}
			} else {
				t.Errorf("%d: ParseBytes(%s) caused an unexpected error:%s", i, testCase.exp, err)
			}
		} else if actual != testCase.parseExp {
			t.Errorf("%d: ParseBytes(%s) actual:%d does not match expected:%d", i, testCase.exp, actual,
				testCase.parseExp)
		}
		// Test negative ParseBytes.
		if actual, err := humanizeutil.ParseBytes(testCase.expNeg); err != nil {
			if len(testCase.parseErrNeg) > 0 {
				if testCase.parseErrNeg != err.Error() {
					t.Errorf("%d: ParseBytes(%s) caused an incorrect error actual:%s, expected:%s", i, testCase.expNeg,
						err, testCase.parseErrNeg)
				}
			} else {
				t.Errorf("%d: ParseBytes(%s) caused an unexpected error:%s", i, testCase.expNeg, err)
			}
		} else if actual != -testCase.parseExp {
			t.Errorf("%d: ParseBytes(%s) actual:%d does not match expected:%d", i, testCase.expNeg, actual,
				-testCase.parseExp)
		}
	}

	// Some extra error cases for good measure.
	testFailCases := []struct {
		value    string
		expected string
	}{
		{"", "parsing \"\": invalid syntax"},   // our error
		{"1 ZB", "unhandled size name: zb"},    // humanize's error
		{"-1 ZB", "unhandled size name: zb"},   // humanize's error
		{"1 ZiB", "unhandled size name: zib"},  // humanize's error
		{"-1 ZiB", "unhandled size name: zib"}, // humanize's error
		{"100 EiB", "too large: 100 EiB"},      // humanize's error
		{"-100 EiB", "too large: 100 EiB"},     // humanize's error
		{"10 EiB", "too large: 10 EiB"},        // our error
		{"-10 EiB", "too large: -10 EiB"},      // our error
	}
	for i, testCase := range testFailCases {
		if _, err := humanizeutil.ParseBytes(testCase.value); err.Error() != testCase.expected {
			t.Errorf("%d: ParseBytes(%s) caused an incorrect error actual:%s, expected:%s", i, testCase.value, err,
				testCase.expected)
		}
	}
}
Beispiel #10
0
func (b *bytesValue) String() string {
	// This uses the MiB, GiB, etc suffixes. If we use humanize.Bytes() we get
	// the MB, GB, etc suffixes, but the conversion is done in multiples of 1000
	// vs 1024.
	return humanizeutil.IBytes(*b.val)
}