Beispiel #1
0
// benchmarkWritePoints benchmarks writing new series to a shard.
// mCnt - measurement count
// tkCnt - tag key count
// tvCnt - tag value count (values per tag)
// pntCnt - points per series.  # of series = mCnt * (tvCnt ^ tkCnt)
func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
	// Generate test series (measurements + unique tag sets).
	series := genTestSeries(mCnt, tkCnt, tvCnt)
	// Create index for the shard to use.
	index := tsdb.NewDatabaseIndex("db")
	// Generate point data to write to the shard.
	points := []models.Point{}
	for _, s := range series {
		for val := 0.0; val < float64(pntCnt); val++ {
			p := models.MustNewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now())
			points = append(points, p)
		}
	}

	// Stop & reset timers and mem-stats before the main benchmark loop.
	b.StopTimer()
	b.ResetTimer()

	// Run the benchmark loop.
	for n := 0; n < b.N; n++ {
		tmpDir, _ := ioutil.TempDir("", "shard_test")
		tmpShard := path.Join(tmpDir, "shard")
		tmpWal := path.Join(tmpDir, "wal")
		shard := tsdb.NewShard(1, index, tmpShard, tmpWal, tsdb.NewEngineOptions())
		shard.Open()

		b.StartTimer()
		// Call the function being benchmarked.
		chunkedWrite(shard, points)

		b.StopTimer()
		shard.Close()
		os.RemoveAll(tmpDir)
	}
}
Beispiel #2
0
func TestWriteTimeTag(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{}),
		map[string]interface{}{"time": 1.0},
		time.Unix(1, 2),
	)

	buf := bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping field 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	m := index.Measurement("cpu")
	if m != nil {
		t.Fatal("unexpected cpu measurement")
	}

	pt = models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{}),
		map[string]interface{}{"value": 1.0, "time": 1.0},
		time.Unix(1, 2),
	)

	buf = bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping field 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	m = index.Measurement("cpu")
	if m == nil {
		t.Fatal("expected cpu measurement")
	}

	if got, exp := len(m.FieldNames()), 1; got != exp {
		t.Fatalf("invalid number of field names: got=%v exp=%v", got, exp)
	}
}
Beispiel #3
0
func TestShardWriteAddNewField(t *testing.T) {
	t.Skip("pending tsm1 iterator impl")

	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex()
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error openeing shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	pt = models.MustNewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0, "value2": 2.0},
		time.Unix(1, 2),
	)

	err = sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	if index.SeriesN() != 1 {
		t.Fatalf("series wasn't in index")
	}
	seriesTags := index.Series(string(pt.Key())).Tags
	if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
		t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
	}
	if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
		t.Fatalf("tag key wasn't saved to measurement index")
	}

	if len(index.Measurement("cpu").FieldNames()) != 2 {
		t.Fatalf("field names wasn't saved to measurement index")
	}
}
Beispiel #4
0
// MustOpenEngine returns a new, open instance of Engine.
func MustOpenEngine() *Engine {
	e := NewEngine()
	if err := e.Open(); err != nil {
		panic(err)
	}
	if err := e.LoadMetadataIndex(nil, tsdb.NewDatabaseIndex("db")); err != nil {
		panic(err)
	}
	return e
}
Beispiel #5
0
// MustOpenEngine returns a new, open instance of Engine.
func MustOpenEngine() *Engine {
	e := NewEngine()
	if err := e.Open(); err != nil {
		panic(err)
	}
	if err := e.LoadMetadataIndex(nil, tsdb.NewDatabaseIndex("db"), make(map[string]*tsdb.MeasurementFields)); err != nil {
		panic(err)
	}
	return e
}
Beispiel #6
0
func TestShard_MaxTagValuesLimit(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "db", "rp", "1")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")
	opts.Config.MaxValuesPerTag = 1000

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)

	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	// Writing 1K series should succeed.
	points := []models.Point{}

	for i := 0; i < 1000; i++ {
		pt := models.MustNewPoint(
			"cpu",
			models.Tags{{Key: []byte("host"), Value: []byte(fmt.Sprintf("server%d", i))}},
			map[string]interface{}{"value": 1.0},
			time.Unix(1, 2),
		)
		points = append(points, pt)
	}

	err := sh.WritePoints(points)
	if err != nil {
		t.Fatalf(err.Error())
	}

	// Writing one more series should exceed the series limit.
	pt := models.MustNewPoint(
		"cpu",
		models.Tags{{Key: []byte("host"), Value: []byte("server9999")}},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err = sh.WritePoints([]models.Point{pt})
	if err == nil {
		t.Fatal("expected error")
	} else if exp, got := `max-values-per-tag limit exceeded (1000/1000): measurement="cpu" tag="host" value="server9999" dropped=1`, err.Error(); exp != got {
		t.Fatalf("unexpected error message:\n\texp = %s\n\tgot = %s", exp, got)
	}

	sh.Close()
}
Beispiel #7
0
func TestMaxSeriesLimit(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")
	opts.Config.MaxSeriesPerDatabase = 1000

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)

	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	// Writing 1K series should succeed.
	points := []models.Point{}

	for i := 0; i < 1000; i++ {
		pt := models.MustNewPoint(
			"cpu",
			map[string]string{"host": fmt.Sprintf("server%d", i)},
			map[string]interface{}{"value": 1.0},
			time.Unix(1, 2),
		)
		points = append(points, pt)
	}

	err := sh.WritePoints(points)
	if err != nil {
		t.Fatalf(err.Error())
	}

	// Writing one more series should exceed the series limit.
	pt := models.MustNewPoint(
		"cpu",
		map[string]string{"host": "server9999"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err = sh.WritePoints([]models.Point{pt})
	if err == nil {
		t.Fatal("expected error")
	} else if err.Error() != "max series per database exceeded: cpu,host=server9999" {
		t.Fatalf("unexpected error messag:\n\texp = max series per database exceeded: cpu,host=server9999\n\tgot = %s", err.Error())
	}

	sh.Close()
}
Beispiel #8
0
func benchmarkCreateSeriesIndex(b *testing.B, series []*TestSeries) {
	idxs := make([]*tsdb.DatabaseIndex, 0, b.N)
	for i := 0; i < b.N; i++ {
		idxs = append(idxs, tsdb.NewDatabaseIndex(fmt.Sprintf("db%d", i)))
	}

	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		idx := idxs[n]
		for _, s := range series {
			idx.CreateSeriesIndexIfNotExists(s.Measurement, s.Series)
		}
	}
}
Beispiel #9
0
// NewShard returns a new instance of Shard with temp paths.
func NewShard() *Shard {
	// Create temporary path for data and WAL.
	path, err := ioutil.TempDir("", "influxdb-tsdb-")
	if err != nil {
		panic(err)
	}

	// Build engine options.
	opt := tsdb.NewEngineOptions()
	opt.Config.WALDir = filepath.Join(path, "wal")

	return &Shard{
		Shard: tsdb.NewShard(0,
			tsdb.NewDatabaseIndex("db"),
			filepath.Join(path, "data"),
			filepath.Join(path, "wal"),
			opt,
		),
		path: path,
	}
}
Beispiel #10
0
// MustOpenShard returns a temporary, opened shard.
func MustOpenShard(id uint64) *Shard {
	path, err := ioutil.TempDir("", "copier-")
	if err != nil {
		panic(err)
	}

	sh := &Shard{
		Shard: tsdb.NewShard(id,
			tsdb.NewDatabaseIndex("db"),
			filepath.Join(path, "data"),
			filepath.Join(path, "wal"),
			tsdb.NewEngineOptions(),
		),
		path: path,
	}
	if err := sh.Open(); err != nil {
		sh.Close()
		panic(err)
	}

	return sh
}
Beispiel #11
0
// Ensures that when a shard is closed, it removes any series meta-data
// from the index.
func TestShard_Close_RemoveIndex(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)

	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	pt := models.MustNewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	if got, exp := index.SeriesN(), 1; got != exp {
		t.Fatalf("series count mismatch: got %v, exp %v", got, exp)
	}

	// ensure the index gets loaded after closing and opening the shard
	sh.Close()

	if got, exp := index.SeriesN(), 0; got != exp {
		t.Fatalf("series count mismatch: got %v, exp %v", got, exp)
	}
}
Beispiel #12
0
func TestWriteTimeField(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"time": "now"}),
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	buf := bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping tag 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	key := models.MakeKey([]byte("cpu"), nil)
	series := index.Series(string(key))
	if series == nil {
		t.Fatal("expected series")
	} else if len(series.Tags) != 0 {
		t.Fatalf("unexpected number of tags: got=%v exp=%v", len(series.Tags), 0)
	}
}
Beispiel #13
0
// Ensure engine can load the metadata index after reopening.
func TestEngine_LoadMetadataIndex(t *testing.T) {
	e := MustOpenEngine()
	defer e.Close()

	if err := e.WritePointsString(`cpu,host=A value=1.1 1000000000`); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// Ensure we can close and load index from the WAL
	if err := e.Reopen(); err != nil {
		t.Fatal(err)
	}

	// Load metadata index.
	index := tsdb.NewDatabaseIndex("db")
	if err := e.LoadMetadataIndex(nil, index); err != nil {
		t.Fatal(err)
	}

	// Verify index is correct.
	if m := index.Measurement("cpu"); m == nil {
		t.Fatal("measurement not found")
	} else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "A"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	}

	// write the snapshot, ensure we can close and load index from TSM
	if err := e.WriteSnapshot(); err != nil {
		t.Fatalf("error writing snapshot: %s", err.Error())
	}

	// Ensure we can close and load index from the WAL
	if err := e.Reopen(); err != nil {
		t.Fatal(err)
	}

	// Load metadata index.
	index = tsdb.NewDatabaseIndex("db")
	if err := e.LoadMetadataIndex(nil, index); err != nil {
		t.Fatal(err)
	}

	// Verify index is correct.
	if m := index.Measurement("cpu"); m == nil {
		t.Fatal("measurement not found")
	} else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "A"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	}

	// Write a new point and ensure we can close and load index from TSM and WAL
	if err := e.WritePoints([]models.Point{
		MustParsePointString("cpu,host=B value=1.2 2000000000"),
	}); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// Ensure we can close and load index from the TSM & WAL
	if err := e.Reopen(); err != nil {
		t.Fatal(err)
	}

	// Load metadata index.
	index = tsdb.NewDatabaseIndex("db")
	if err := e.LoadMetadataIndex(nil, index); err != nil {
		t.Fatal(err)
	}

	// Verify index is correct.
	if m := index.Measurement("cpu"); m == nil {
		t.Fatal("measurement not found")
	} else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "A"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	} else if s := m.SeriesByID(2); s.Key != "cpu,host=B" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "B"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	}
}
Beispiel #14
0
func TestShardWriteAndIndex(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)

	// Calling WritePoints when the engine is not open will return
	// ErrEngineClosed.
	if got, exp := sh.WritePoints(nil), tsdb.ErrEngineClosed; got != exp {
		t.Fatalf("got %v, expected %v", got, exp)
	}

	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	pt := models.MustNewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	pt.SetTime(time.Unix(2, 3))
	err = sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	validateIndex := func() {
		if index.SeriesN() != 1 {
			t.Fatalf("series wasn't in index")
		}

		seriesTags := index.Series(string(pt.Key())).Tags
		if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
			t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
		}
		if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
			t.Fatalf("tag key wasn't saved to measurement index")
		}
	}

	validateIndex()

	// ensure the index gets loaded after closing and opening the shard
	sh.Close()

	index = tsdb.NewDatabaseIndex("db")
	sh = tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	validateIndex()

	// and ensure that we can still write data
	pt.SetTime(time.Unix(2, 6))
	err = sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}
}
Beispiel #15
0
// Tests concurrently writing to the same shard with different field types which
// can trigger a panic when the shard is snapshotted to TSM files.
func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
	if testing.Short() {
		t.Skip()
	}
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	points := make([]models.Point, 0, 1000)
	for i := 0; i < cap(points); i++ {
		if i < 500 {
			points = append(points, models.MustNewPoint(
				"cpu",
				models.NewTags(map[string]string{"host": "server"}),
				map[string]interface{}{"value": 1.0},
				time.Unix(int64(i), 0),
			))
		} else {
			points = append(points, models.MustNewPoint(
				"cpu",
				models.NewTags(map[string]string{"host": "server"}),
				map[string]interface{}{"value": int64(1)},
				time.Unix(int64(i), 0),
			))
		}
	}

	var wg sync.WaitGroup
	wg.Add(2)
	go func() {
		defer wg.Done()
		for i := 0; i < 50; i++ {
			if err := sh.DeleteMeasurement("cpu", []string{"cpu,host=server"}); err != nil {
				t.Fatalf(err.Error())
			}

			_ = sh.WritePoints(points[:500])
			if f, err := sh.CreateSnapshot(); err == nil {
				os.RemoveAll(f)
			}

		}
	}()

	go func() {
		defer wg.Done()
		for i := 0; i < 50; i++ {
			if err := sh.DeleteMeasurement("cpu", []string{"cpu,host=server"}); err != nil {
				t.Fatalf(err.Error())
			}

			_ = sh.WritePoints(points[500:])
			if f, err := sh.CreateSnapshot(); err == nil {
				os.RemoveAll(f)
			}
		}
	}()

	wg.Wait()
}