Beispiel #1
0
func TestEncoding_BoolBlock_Basic(t *testing.T) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		v := true
		if i%2 == 0 {
			v = false
		}
		values[i] = tsm1.NewValue(t, v)
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	decodedValues, err = tsm1.DecodeBlock(b, decodedValues)
	if err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
	}
}
Beispiel #2
0
func TestEncoding_IntBlock_Basic(t *testing.T) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		values[i] = tsm1.NewValue(t, int64(i))
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	decodedValues, err = tsm1.DecodeBlock(b, decodedValues)
	if err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if len(decodedValues) != len(values) {
		t.Fatalf("unexpected results length:\n\tgot: %v\n\texp: %v\n", len(decodedValues), len(values))
	}

	for i := 0; i < len(decodedValues); i++ {

		if decodedValues[i].Time() != values[i].Time() {
			t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].Time(), values[i].Time())
		}

		if decodedValues[i].Value() != values[i].Value() {
			t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].Value(), values[i].Value())
		}
	}
}
Beispiel #3
0
func TestEncoding_IntBlock_Negatives(t *testing.T) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		v := int64(i)
		if i%2 == 0 {
			v = -v
		}
		values[i] = tsm1.NewValue(t, int64(v))
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
	}
}
Beispiel #4
0
// Tests that duplicate point values are merged.  There is only one case
// where this could happen and that is when a compaction completed and we replace
// the old TSM file with a new one and we crash just before deleting the old file.
// No data is lost but the same point time/value would exist in two files until
// compaction corrects it.
func TestTSMKeyIterator_Duplicate(t *testing.T) {
	dir := MustTempDir()
	defer os.RemoveAll(dir)

	v1 := tsm1.NewValue(time.Unix(1, 0), int64(1))
	v2 := tsm1.NewValue(time.Unix(1, 0), int64(2))

	writes1 := map[string][]tsm1.Value{
		"cpu,host=A#!~#value": []tsm1.Value{v1},
	}

	r1 := MustTSMReader(dir, 1, writes1)

	writes2 := map[string][]tsm1.Value{
		"cpu,host=A#!~#value": []tsm1.Value{v2},
	}

	r2 := MustTSMReader(dir, 2, writes2)

	iter, err := tsm1.NewTSMKeyIterator(1, false, r1, r2)
	if err != nil {
		t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
	}

	var readValues bool
	for iter.Next() {
		key, _, _, block, err := iter.Read()
		if err != nil {
			t.Fatalf("unexpected error read: %v", err)
		}

		values, err := tsm1.DecodeBlock(block, nil)
		if err != nil {
			t.Fatalf("unexpected error decode: %v", err)
		}

		if got, exp := key, "cpu,host=A#!~#value"; got != exp {
			t.Fatalf("key mismatch: got %v, exp %v", got, exp)
		}

		if got, exp := len(values), 1; got != exp {
			t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
		}

		readValues = true
		assertValueEqual(t, values[0], v2)
	}

	if !readValues {
		t.Fatalf("failed to read any values")
	}
}
Beispiel #5
0
// Tests that a single TSM file can be read and iterated over
func TestTSMKeyIterator_Chunked(t *testing.T) {
	t.Skip("fixme")
	dir := MustTempDir()
	defer os.RemoveAll(dir)

	v0 := tsm1.NewValue(time.Unix(1, 0), 1.1)
	v1 := tsm1.NewValue(time.Unix(2, 0), 2.1)
	writes := map[string][]tsm1.Value{
		"cpu,host=A#!~#value": []tsm1.Value{v0, v1},
	}

	r := MustTSMReader(dir, 1, writes)

	iter, err := tsm1.NewTSMKeyIterator(1, false, r)
	if err != nil {
		t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
	}

	var readValues bool
	var chunk int
	for iter.Next() {
		key, _, _, block, err := iter.Read()
		if err != nil {
			t.Fatalf("unexpected error read: %v", err)
		}

		values, err := tsm1.DecodeBlock(block, nil)
		if err != nil {
			t.Fatalf("unexpected error decode: %v", err)
		}

		if got, exp := key, "cpu,host=A#!~#value"; got != exp {
			t.Fatalf("key mismatch: got %v, exp %v", got, exp)
		}

		if got, exp := len(values), len(writes); got != exp {
			t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
		}

		for _, v := range values {
			readValues = true
			assertValueEqual(t, v, writes["cpu,host=A#!~#value"][chunk])
		}
		chunk++
	}

	if !readValues {
		t.Fatalf("failed to read any values")
	}
}
Beispiel #6
0
func TestCacheKeyIterator_Chunked(t *testing.T) {
	v0 := tsm1.NewValue(time.Unix(1, 0).UTC(), 1.0)
	v1 := tsm1.NewValue(time.Unix(2, 0).UTC(), 2.0)

	writes := map[string][]tsm1.Value{
		"cpu,host=A#!~#value": []tsm1.Value{v0, v1},
	}

	c := tsm1.NewCache(0)

	for k, v := range writes {
		if err := c.Write(k, v); err != nil {
			t.Fatalf("failed to write key foo to cache: %s", err.Error())
		}
	}

	iter := tsm1.NewCacheKeyIterator(c, 1)
	var readValues bool
	var chunk int
	for iter.Next() {
		key, _, _, block, err := iter.Read()
		if err != nil {
			t.Fatalf("unexpected error read: %v", err)
		}

		values, err := tsm1.DecodeBlock(block, nil)
		if err != nil {
			t.Fatalf("unexpected error decode: %v", err)
		}

		if got, exp := key, "cpu,host=A#!~#value"; got != exp {
			t.Fatalf("key mismatch: got %v, exp %v", got, exp)
		}

		if got, exp := len(values), 1; got != exp {
			t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
		}

		for _, v := range values {
			readValues = true
			assertValueEqual(t, v, writes["cpu,host=A#!~#value"][chunk])
		}
		chunk++
	}

	if !readValues {
		t.Fatalf("failed to read any values")
	}
}
Beispiel #7
0
func TestEncoding_FloatBlock_ZeroTime(t *testing.T) {
	values := make([]tsm1.Value, 3)
	for i := 0; i < 3; i++ {
		values[i] = tsm1.NewValue(time.Unix(0, 0), float64(i))
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
	}
}
Beispiel #8
0
func BenchmarkDecodeBlock_String_EqualSize(b *testing.B) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i))
	}

	bytes, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		b.Fatalf("unexpected error: %v", err)
	}

	decodedValues := make([]tsm1.Value, len(values))
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, err = tsm1.DecodeBlock(bytes, decodedValues)
		if err != nil {
			b.Fatalf("unexpected error decoding block: %v", err)
		}
	}
}
Beispiel #9
0
func BenchmarkDecodeBlock_Bool_Empty(b *testing.B) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		values[i] = tsm1.NewValue(t, true)
	}

	bytes, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		b.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, err = tsm1.DecodeBlock(bytes, decodedValues)
		if err != nil {
			b.Fatalf("unexpected error decoding block: %v", err)
		}
	}
}
Beispiel #10
0
func TestEncoding_FloatBlock_SimilarFloats(t *testing.T) {
	values := make([]tsm1.Value, 5)
	values[0] = tsm1.NewValue(time.Unix(0, 1444238178437870000), 6.00065e+06)
	values[1] = tsm1.NewValue(time.Unix(0, 1444238185286830000), 6.000656e+06)
	values[2] = tsm1.NewValue(time.Unix(0, 1444238188441501000), 6.000657e+06)
	values[3] = tsm1.NewValue(time.Unix(0, 1444238195286811000), 6.000659e+06)
	values[4] = tsm1.NewValue(time.Unix(0, 1444238198439917000), 6.000661e+06)

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
	}
}
Beispiel #11
0
func TestEncoding_StringBlock_Basic(t *testing.T) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i))
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
	}
}
Beispiel #12
0
func TestEncoding_FloatBlock(t *testing.T) {
	valueCount := 1000
	times := getTimes(valueCount, 60, time.Second)
	values := make([]tsm1.Value, len(times))
	for i, t := range times {
		values[i] = tsm1.NewValue(t, float64(i))
	}

	b, err := tsm1.Values(values).Encode(nil)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var decodedValues []tsm1.Value
	if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
		t.Fatalf("unexpected error decoding block: %v", err)
	}

	if !reflect.DeepEqual(decodedValues, values) {
		t.Fatalf("unexpected results:\n\tgot: %s\n\texp: %s\n", spew.Sdump(decodedValues), spew.Sdump(values))
	}
}
Beispiel #13
0
// Tests that deleted keys are not seen during iteration with
// TSM files.
func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
	dir := MustTempDir()
	defer os.RemoveAll(dir)

	v1 := tsm1.NewValue(time.Unix(2, 0), int64(1))
	points1 := map[string][]tsm1.Value{
		"cpu,host=A#!~#value": []tsm1.Value{v1},
	}

	r1 := MustTSMReader(dir, 1, points1)
	if e := r1.Delete([]string{"cpu,host=A#!~#value"}); nil != e {
		t.Fatal(e)
	}

	v2 := tsm1.NewValue(time.Unix(1, 0), float64(1))
	v3 := tsm1.NewValue(time.Unix(1, 0), float64(1))

	points2 := map[string][]tsm1.Value{
		"cpu,host=A#!~#count": []tsm1.Value{v2},
		"cpu,host=B#!~#value": []tsm1.Value{v3},
	}

	r2 := MustTSMReader(dir, 2, points2)
	r2.Delete([]string{"cpu,host=A#!~#count"})

	iter, err := tsm1.NewTSMKeyIterator(1, false, r1, r2)
	if err != nil {
		t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
	}

	var readValues bool
	var data = []struct {
		key   string
		value tsm1.Value
	}{
		{"cpu,host=B#!~#value", v3},
	}

	for iter.Next() {
		key, _, _, block, err := iter.Read()
		if err != nil {
			t.Fatalf("unexpected error read: %v", err)
		}

		values, err := tsm1.DecodeBlock(block, nil)
		if err != nil {
			t.Fatalf("unexpected error decode: %v", err)
		}

		if got, exp := key, data[0].key; got != exp {
			t.Fatalf("key mismatch: got %v, exp %v", got, exp)
		}

		if got, exp := len(values), 1; got != exp {
			t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
		}
		readValues = true

		assertValueEqual(t, values[0], data[0].value)
		data = data[1:]
	}

	if !readValues {
		t.Fatalf("failed to read any values")
	}
}
Beispiel #14
0
func cmdDumpTsm1(opts *tsdmDumpOpts) {
	var errors []error

	f, err := os.Open(opts.path)
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}

	// Get the file size
	stat, err := f.Stat()
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}

	b := make([]byte, 8)
	f.Read(b[:4])

	// Verify magic number
	if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 {
		println("Not a tsm1 file.")
		os.Exit(1)
	}

	ids, err := readIds(filepath.Dir(opts.path))
	if err != nil {
		println("Failed to read series:", err.Error())
		os.Exit(1)
	}

	invIds := map[uint64]string{}
	for k, v := range ids {
		invIds[v] = k
	}

	index, err := readIndex(f)
	if err != nil {
		println("Failed to readIndex:", err.Error())

		// Create a stubbed out index so we can still try and read the block data directly
		// w/o panicing ourselves.
		index = &tsmIndex{
			minTime: time.Unix(0, 0),
			maxTime: time.Unix(0, 0),
			offset:  stat.Size(),
		}
	}

	blockStats := &blockStats{}

	println("Summary:")
	fmt.Printf("  File: %s\n", opts.path)
	fmt.Printf("  Time Range: %s - %s\n",
		index.minTime.UTC().Format(time.RFC3339Nano),
		index.maxTime.UTC().Format(time.RFC3339Nano),
	)
	fmt.Printf("  Duration: %s ", index.maxTime.Sub(index.minTime))
	fmt.Printf("  Series: %d ", index.series)
	fmt.Printf("  File Size: %d\n", stat.Size())
	println()

	tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t"))
	for i, block := range index.blocks {
		key := invIds[block.id]
		split := strings.Split(key, "#!~#")

		// We dont' know know if we have fields so use an informative default
		var measurement, field string = "UNKNOWN", "UNKNOWN"

		// We read some IDs from the ids file
		if len(invIds) > 0 {
			// Change the default to error until we know we have a valid key
			measurement = "ERR"
			field = "ERR"

			// Possible corruption? Try to read as much as we can and point to the problem.
			if key == "" {
				errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id.", i, block.id))
			} else if len(split) < 2 {
				errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key))
			} else {
				measurement = split[0]
				field = split[1]
			}
		}

		if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) {
			continue
		}
		fmt.Fprintln(tw, "  "+strings.Join([]string{
			strconv.FormatInt(int64(i), 10),
			strconv.FormatUint(block.id, 10),
			strconv.FormatInt(int64(block.offset), 10),
			measurement,
			field,
		}, "\t"))
	}

	if opts.dumpIndex {
		println("Index:")
		tw.Flush()
		println()
	}

	tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t"))

	// Staring at 4 because the magic number is 4 bytes
	i := int64(4)
	var blockCount, pointCount, blockSize int64
	indexSize := stat.Size() - index.offset

	// Start at the beginning and read every block
	for i < index.offset {
		f.Seek(int64(i), 0)

		f.Read(b)
		id := btou64(b)
		f.Read(b[:4])
		length := binary.BigEndian.Uint32(b[:4])
		buf := make([]byte, length)
		f.Read(buf)

		blockSize += int64(len(buf)) + 12

		startTime := time.Unix(0, int64(btou64(buf[:8])))
		blockType := buf[8]

		encoded := buf[9:]

		var v []tsm1.Value
		err := tsm1.DecodeBlock(buf, &v)
		if err != nil {
			fmt.Printf("error: %v\n", err.Error())
			os.Exit(1)
		}

		pointCount += int64(len(v))

		// Length of the timestamp block
		tsLen, j := binary.Uvarint(encoded)

		// Unpack the timestamp bytes
		ts := encoded[int(j) : int(j)+int(tsLen)]

		// Unpack the value bytes
		values := encoded[int(j)+int(tsLen):]

		tsEncoding := timeEnc[int(ts[0]>>4)]
		vEncoding := encDescs[int(blockType+1)][values[0]>>4]

		typeDesc := blockTypes[blockType]

		blockStats.inc(0, ts[0]>>4)
		blockStats.inc(int(blockType+1), values[0]>>4)
		blockStats.size(len(buf))

		if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) {
			i += (12 + int64(length))
			blockCount += 1
			continue
		}

		fmt.Fprintln(tw, "  "+strings.Join([]string{
			strconv.FormatInt(blockCount, 10),
			strconv.FormatInt(i, 10),
			strconv.FormatInt(int64(len(buf)), 10),
			strconv.FormatUint(id, 10),
			typeDesc,
			startTime.UTC().Format(time.RFC3339Nano),
			strconv.FormatInt(int64(len(v)), 10),
			fmt.Sprintf("%s/%s", tsEncoding, vEncoding),
			fmt.Sprintf("%d/%d", len(ts), len(values)),
		}, "\t"))

		i += (12 + int64(length))
		blockCount += 1
	}
	if opts.dumpBlocks {
		println("Blocks:")
		tw.Flush()
		println()
	}

	fmt.Printf("Statistics\n")
	fmt.Printf("  Blocks:\n")
	fmt.Printf("    Total: %d Size: %d Min: %d Max: %d Avg: %d\n",
		blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount)
	fmt.Printf("  Index:\n")
	fmt.Printf("    Total: %d Size: %d\n", len(index.blocks), indexSize)
	fmt.Printf("  Points:\n")
	fmt.Printf("    Total: %d", pointCount)
	println()

	println("  Encoding:")
	for i, counts := range blockStats.counts {
		if len(counts) == 0 {
			continue
		}
		fmt.Printf("    %s: ", strings.Title(fieldType[i]))
		for j, v := range counts {
			fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100))
		}
		println()
	}
	fmt.Printf("  Compression:\n")
	fmt.Printf("    Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount))
	fmt.Printf("    Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount))

	if len(errors) > 0 {
		println()
		fmt.Printf("Errors (%d):\n", len(errors))
		for _, err := range errors {
			fmt.Printf("  * %v\n", err)
		}
		println()
	}
}
Beispiel #15
0
func cmdDumpTsm1dev(opts *tsdmDumpOpts) {
	var errors []error

	f, err := os.Open(opts.path)
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}

	// Get the file size
	stat, err := f.Stat()
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}
	b := make([]byte, 8)

	r, err := tsm1.NewTSMReaderWithOptions(tsm1.TSMReaderOptions{
		MMAPFile: f,
	})
	if err != nil {
		println("Error opening TSM files: ", err.Error())
	}
	defer r.Close()

	minTime, maxTime := r.TimeRange()
	keys := r.Keys()

	blockStats := &blockStats{}

	println("Summary:")
	fmt.Printf("  File: %s\n", opts.path)
	fmt.Printf("  Time Range: %s - %s\n",
		minTime.UTC().Format(time.RFC3339Nano),
		maxTime.UTC().Format(time.RFC3339Nano),
	)
	fmt.Printf("  Duration: %s ", maxTime.Sub(minTime))
	fmt.Printf("  Series: %d ", len(keys))
	fmt.Printf("  File Size: %d\n", stat.Size())
	println()

	tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t"))
	var pos int
	for _, key := range keys {
		for _, e := range r.Entries(key) {
			pos++
			split := strings.Split(key, "#!~#")

			// We dont' know know if we have fields so use an informative default
			var measurement, field string = "UNKNOWN", "UNKNOWN"

			// Possible corruption? Try to read as much as we can and point to the problem.
			measurement = split[0]
			field = split[1]

			if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) {
				continue
			}
			fmt.Fprintln(tw, "  "+strings.Join([]string{
				strconv.FormatInt(int64(pos), 10),
				e.MinTime.UTC().Format(time.RFC3339Nano),
				e.MaxTime.UTC().Format(time.RFC3339Nano),
				strconv.FormatInt(int64(e.Offset), 10),
				strconv.FormatInt(int64(e.Size), 10),
				measurement,
				field,
			}, "\t"))
		}
	}

	if opts.dumpIndex {
		println("Index:")
		tw.Flush()
		println()
	}

	tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t"))

	// Starting at 5 because the magic number is 4 bytes + 1 byte version
	i := int64(5)
	var blockCount, pointCount, blockSize int64
	indexSize := r.IndexSize()

	// Start at the beginning and read every block
	for _, key := range keys {
		for _, e := range r.Entries(key) {

			f.Seek(int64(e.Offset), 0)
			f.Read(b[:4])

			chksum := btou32(b[:4])

			buf := make([]byte, e.Size-4)
			f.Read(buf)

			blockSize += int64(len(buf)) + 4

			blockType := buf[0]

			encoded := buf[1:]

			var v []tsm1.Value
			v, err := tsm1.DecodeBlock(buf, v)
			if err != nil {
				fmt.Printf("error: %v\n", err.Error())
				os.Exit(1)
			}
			startTime := v[0].Time()

			pointCount += int64(len(v))

			// Length of the timestamp block
			tsLen, j := binary.Uvarint(encoded)

			// Unpack the timestamp bytes
			ts := encoded[int(j) : int(j)+int(tsLen)]

			// Unpack the value bytes
			values := encoded[int(j)+int(tsLen):]

			tsEncoding := timeEnc[int(ts[0]>>4)]
			vEncoding := encDescs[int(blockType+1)][values[0]>>4]

			typeDesc := blockTypes[blockType]

			blockStats.inc(0, ts[0]>>4)
			blockStats.inc(int(blockType+1), values[0]>>4)
			blockStats.size(len(buf))

			if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) {
				i += (4 + int64(e.Size))
				blockCount++
				continue
			}

			fmt.Fprintln(tw, "  "+strings.Join([]string{
				strconv.FormatInt(blockCount, 10),
				strconv.FormatUint(uint64(chksum), 10),
				strconv.FormatInt(i, 10),
				strconv.FormatInt(int64(len(buf)), 10),
				typeDesc,
				startTime.UTC().Format(time.RFC3339Nano),
				strconv.FormatInt(int64(len(v)), 10),
				fmt.Sprintf("%s/%s", tsEncoding, vEncoding),
				fmt.Sprintf("%d/%d", len(ts), len(values)),
			}, "\t"))

			i += (4 + int64(e.Size))
			blockCount++
		}
	}

	if opts.dumpBlocks {
		println("Blocks:")
		tw.Flush()
		println()
	}

	var blockSizeAvg int64
	if blockCount > 0 {
		blockSizeAvg = blockSize / blockCount
	}
	fmt.Printf("Statistics\n")
	fmt.Printf("  Blocks:\n")
	fmt.Printf("    Total: %d Size: %d Min: %d Max: %d Avg: %d\n",
		blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg)
	fmt.Printf("  Index:\n")
	fmt.Printf("    Total: %d Size: %d\n", blockCount, indexSize)
	fmt.Printf("  Points:\n")
	fmt.Printf("    Total: %d", pointCount)
	println()

	println("  Encoding:")
	for i, counts := range blockStats.counts {
		if len(counts) == 0 {
			continue
		}
		fmt.Printf("    %s: ", strings.Title(fieldType[i]))
		for j, v := range counts {
			fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100))
		}
		println()
	}
	fmt.Printf("  Compression:\n")
	fmt.Printf("    Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount))
	fmt.Printf("    Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount))

	if len(errors) > 0 {
		println()
		fmt.Printf("Errors (%d):\n", len(errors))
		for _, err := range errors {
			fmt.Printf("  * %v\n", err)
		}
		println()
	}
}
Beispiel #16
0
func dumpTsm1(path string) {
	f, err := os.Open(path)
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}

	// Get the file size
	stat, err := f.Stat()
	if err != nil {
		println(err.Error())
		os.Exit(1)
	}

	b := make([]byte, 8)
	f.Read(b[:4])

	// Verify magic number
	if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 {
		println("Not a tsm1 file.")
		os.Exit(1)
	}

	ids, err := readIds(filepath.Dir(path))
	if err != nil {
		println("Failed to read series:", err.Error())
		os.Exit(1)
	}

	invIds := map[uint64]string{}
	for k, v := range ids {
		invIds[v] = k
	}

	index := readIndex(f)
	blockStats := &blockStats{}

	println("Summary:")
	fmt.Printf("  File: %s\n", path)
	fmt.Printf("  Time Range: %s - %s\n",
		index.minTime.UTC().Format(time.RFC3339Nano),
		index.maxTime.UTC().Format(time.RFC3339Nano),
	)
	fmt.Printf("  Duration: %s ", index.maxTime.Sub(index.minTime))
	fmt.Printf("  Series: %d ", index.series)
	fmt.Printf("  File Size: %d\n", stat.Size())
	println()

	println("Index:")
	tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t"))
	for i, block := range index.blocks {
		key := invIds[block.id]
		split := strings.Split(key, "#!~#")

		fmt.Fprintln(tw, "  "+strings.Join([]string{
			strconv.FormatInt(int64(i), 10),
			strconv.FormatUint(block.id, 10),
			strconv.FormatInt(int64(block.offset), 10),
			split[0],
			split[1],
		}, "\t"))

	}
	tw.Flush()
	println()
	println("Blocks:")

	tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
	fmt.Fprintln(tw, "  "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t"))

	// Staring at 4 because the magic number is 4 bytes
	i := int64(4)
	var blockCount, pointCount, blockSize int64
	indexSize := stat.Size() - index.offset

	// Start at the beginning and read every block
	for i < index.offset {
		f.Seek(int64(i), 0)

		f.Read(b)
		id := btou64(b)
		f.Read(b[:4])
		length := binary.BigEndian.Uint32(b[:4])
		buf := make([]byte, length)
		f.Read(buf)

		blockSize += int64(len(buf)) + 12

		startTime := time.Unix(0, int64(btou64(buf[:8])))
		blockType := buf[8]

		encoded := buf[9:]

		v, err := tsm1.DecodeBlock(buf)
		if err != nil {
			fmt.Printf("error: %v\n", err.Error())
			os.Exit(1)
		}

		pointCount += int64(len(v))

		// Length of the timestamp block
		tsLen, j := binary.Uvarint(encoded)

		// Unpack the timestamp bytes
		ts := encoded[int(j) : int(j)+int(tsLen)]

		// Unpack the value bytes
		values := encoded[int(j)+int(tsLen):]

		tsEncoding := timeEnc[int(ts[0]>>4)]
		vEncoding := encDescs[int(blockType+1)][values[0]>>4]

		typeDesc := blockTypes[blockType]

		blockStats.inc(0, ts[0]>>4)
		blockStats.inc(int(blockType+1), values[0]>>4)
		blockStats.size(len(buf))

		fmt.Fprintln(tw, "  "+strings.Join([]string{
			strconv.FormatInt(blockCount, 10),
			strconv.FormatInt(i, 10),
			strconv.FormatInt(int64(len(buf)), 10),
			strconv.FormatUint(id, 10),
			typeDesc,
			startTime.UTC().Format(time.RFC3339Nano),
			strconv.FormatInt(int64(len(v)), 10),
			fmt.Sprintf("%s/%s", tsEncoding, vEncoding),
			fmt.Sprintf("%d/%d", len(ts), len(values)),
		}, "\t"))

		i += (12 + int64(length))
		blockCount += 1
	}

	tw.Flush()
	println()

	fmt.Printf("Statistics\n")
	fmt.Printf("  Blocks:\n")
	fmt.Printf("    Total: %d Size: %d Min: %d Max: %d Avg: %d\n",
		blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount)
	fmt.Printf("  Index:\n")
	fmt.Printf("    Total: %d Size: %d\n", len(index.blocks), indexSize)
	fmt.Printf("  Points:\n")
	fmt.Printf("    Total: %d", pointCount)
	println()

	println("  Encoding:")
	for i, counts := range blockStats.counts {
		if len(counts) == 0 {
			continue
		}
		fmt.Printf("    %s: ", strings.Title(fieldType[i]))
		for j, v := range counts {
			fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100))
		}
		println()
	}
	fmt.Printf("  Compression:\n")
	fmt.Printf("    Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount))
	fmt.Printf("    Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount))

	println()
}