Exemple #1
0
func TestParsePointEscapedStringsAndCommas(t *testing.T) {
	// non-escaped comma and quotes
	test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": `{Hello"{,}" World}`,
			},
			time.Unix(1, 0)),
	)

	// escaped comma and quotes
	test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": `{Hello"{\,}" World}`,
			},
			time.Unix(1, 0)),
	)

}
Exemple #2
0
func TestParsePointWithStringWithCommas(t *testing.T) {
	// escaped comma
	test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": 1.0,
				"str":   `foo\,bar`, // commas in string value
			},
			time.Unix(1, 0)),
	)

	// non-escaped comma
	test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": 1.0,
				"str":   "foo,bar", // commas in string value
			},
			time.Unix(1, 0)),
	)

}
Exemple #3
0
func TestParsePointWithStringField(t *testing.T) {
	test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`,
		tsdb.NewPoint("cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": 1.0,
				"str":   "foo",
				"str2":  "bar",
			},
			time.Unix(1, 0)),
	)

	test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`,
		tsdb.NewPoint("cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"str": `foo " bar`,
			},
			time.Unix(1, 0)),
	)

}
func TestDropMeasurementStatement(t *testing.T) {
	store, executor := testStoreAndExecutor()
	defer os.RemoveAll(store.Path())

	pt := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)
	pt2 := tsdb.NewPoint(
		"memory",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	if err := store.WriteToShard(shardID, []tsdb.Point{pt, pt2}); err != nil {
		t.Fatal(err)
	}

	got := executeAndGetJSON("show series", executor)
	exepected := `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]},{"name":"memory","columns":["_key","host"],"values":[["memory,host=server","server"]]}]}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	got = executeAndGetJSON("drop measurement memory", executor)
	exepected = `[{}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	validateDrop := func() {
		got = executeAndGetJSON("show series", executor)
		exepected = `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]}]}]`
		if exepected != got {
			t.Fatalf("exp: %s\ngot: %s", exepected, got)
		}
		got = executeAndGetJSON("show measurements", executor)
		exepected = `[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]`
		if exepected != got {
			t.Fatalf("exp: %s\ngot: %s", exepected, got)
		}
		got = executeAndGetJSON("select * from memory", executor)
		exepected = `[{"error":"measurement not found: \"foo\".\"foo\".memory"}]`
		if exepected != got {
			t.Fatalf("exp: %s\ngot: %s", exepected, got)
		}
	}

	validateDrop()
	store.Close()
	store = tsdb.NewStore(store.Path())
	store.Open()
	executor.Store = store
	validateDrop()
}
Exemple #5
0
func TestShardWriteAddNewField(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")

	index := tsdb.NewDatabaseIndex()
	sh := tsdb.NewShard(index, tmpShard, tsdb.NewEngineOptions())
	if err := sh.Open(); err != nil {
		t.Fatalf("error openeing shard: %s", err.Error())
	}
	defer sh.Close()

	pt := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	pt = tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0, "value2": 2.0},
		time.Unix(1, 2),
	)

	err = sh.WritePoints([]tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	if !reflect.DeepEqual(index.Names(), []string{"cpu"}) {
		t.Fatalf("measurement names in shard didn't match")
	}
	if index.SeriesN() != 1 {
		t.Fatalf("series wasn't in index")
	}
	seriesTags := index.Series(string(pt.Key())).Tags
	if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
		t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
	}
	if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
		t.Fatalf("tag key wasn't saved to measurement index")
	}

	if len(index.Measurement("cpu").FieldNames()) != 2 {
		t.Fatalf("field names wasn't saved to measurement index")
	}

}
Exemple #6
0
func TestShardMapper_WriteAndSingleMapperRawQueryMultiValue(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	shard := mustCreateShard(tmpDir)

	pt1time := time.Unix(1, 0).UTC()
	pt1 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverA", "region": "us-east"},
		map[string]interface{}{"foo": 42, "bar": 43},
		pt1time,
	)
	pt2time := time.Unix(2, 0).UTC()
	pt2 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverB", "region": "us-east"},
		map[string]interface{}{"foo": 60, "bar": 61},
		pt2time,
	)
	err := shard.WritePoints([]tsdb.Point{pt1, pt2})
	if err != nil {
		t.Fatalf(err.Error())
	}

	var tests = []struct {
		stmt      string
		chunkSize int
		expected  []string
	}{
		{
			stmt:     `SELECT foo FROM cpu`,
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":42},{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     `SELECT foo,bar FROM cpu`,
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":{"bar":43,"foo":42}},{"time":2000000000,"value":{"bar":61,"foo":60}}]}`, `null`},
		},
	}

	for _, tt := range tests {
		stmt := mustParseSelectStatement(tt.stmt)
		mapper := openRawMapperOrFail(t, shard, stmt, tt.chunkSize)

		for _, s := range tt.expected {
			got := nextRawChunkAsJson(t, mapper)
			if got != s {
				t.Errorf("test '%s'\n\tgot      %s\n\texpected %s", tt.stmt, got, tt.expected)
				break
			}
		}
	}
}
Exemple #7
0
// benchmarkWritePoints benchmarks writing new series to a shard.
// mCnt - measurement count
// tkCnt - tag key count
// tvCnt - tag value count (values per tag)
// pntCnt - points per series.  # of series = mCnt * (tvCnt ^ tkCnt)
func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
	// Generate test series (measurements + unique tag sets).
	series := genTestSeries(mCnt, tkCnt, tvCnt)
	// Create index for the shard to use.
	index := tsdb.NewDatabaseIndex()
	// Generate point data to write to the shard.
	points := []tsdb.Point{}
	for _, s := range series {
		for val := 0.0; val < float64(pntCnt); val++ {
			p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now())
			points = append(points, p)
		}
	}

	// Stop & reset timers and mem-stats before the main benchmark loop.
	b.StopTimer()
	b.ResetTimer()

	// Run the benchmark loop.
	for n := 0; n < b.N; n++ {
		tmpDir, _ := ioutil.TempDir("", "shard_test")
		tmpShard := path.Join(tmpDir, "shard")
		shard := tsdb.NewShard(index, tmpShard, tsdb.NewEngineOptions())
		shard.Open()

		b.StartTimer()
		// Call the function being benchmarked.
		chunkedWrite(shard, points)

		b.StopTimer()
		shard.Close()
		os.RemoveAll(tmpDir)
	}
}
Exemple #8
0
func TestParsePointWithTags(t *testing.T) {
	test(t,
		"cpu,host=serverA,region=us-east value=1.0 1000000000",
		tsdb.NewPoint("cpu",
			tsdb.Tags{"host": "serverA", "region": "us-east"},
			tsdb.Fields{"value": 1.0}, time.Unix(1, 0)))
}
Exemple #9
0
func TestParsePointToString(t *testing.T) {
	line := `cpu,host=serverA,region=us-east bool=false,float=11.0,float2=12.123,int=10,str="string val" 1000000000`
	pts, err := tsdb.ParsePoints([]byte(line))
	if err != nil {
		t.Fatalf(`ParsePoints() failed. got %s`, err)
	}
	if exp := 1; len(pts) != exp {
		t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
	}
	pt := pts[0]

	got := pt.String()
	if line != got {
		t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line)
	}

	pt = tsdb.NewPoint("cpu", tsdb.Tags{"host": "serverA", "region": "us-east"},
		tsdb.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"},
		time.Unix(1, 0))

	got = pt.String()
	if line != got {
		t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line)
	}
}
func TestDropDatabase(t *testing.T) {
	store, executor := testStoreAndExecutor()
	defer os.RemoveAll(store.Path())

	pt := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err != nil {
		t.Fatal(err)
	}

	got := executeAndGetJSON("select * from cpu", executor)
	expected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]`
	if expected != got {
		t.Fatalf("exp: %s\ngot: %s", expected, got)
	}

	var name string
	me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result {
		name = stmt.(*influxql.DropDatabaseStatement).Name
		return &influxql.Result{}
	}}
	executor.MetaStatementExecutor = me

	// verify the database is there on disk
	dbPath := filepath.Join(store.Path(), "foo")
	if _, err := os.Stat(dbPath); err != nil {
		t.Fatalf("execpted database dir %s to exist", dbPath)
	}

	got = executeAndGetJSON("drop database foo", executor)
	expected = `[{}]`
	if got != expected {
		t.Fatalf("exp: %s\ngot: %s", expected, got)
	}

	if name != "foo" {
		t.Fatalf("expected the MetaStatementExecutor to be called with database name foo, but got %s", name)
	}

	if _, err := os.Stat(dbPath); !os.IsNotExist(err) {
		t.Fatalf("expected database dir %s to be gone", dbPath)
	}

	store.Close()
	store = tsdb.NewStore(store.Path())
	store.Open()
	executor.Store = store
	executor.ShardMapper = &testShardMapper{store: store}

	if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err == nil || err.Error() != "shard not found" {
		t.Fatalf("expected shard to not be found")
	}
}
Exemple #11
0
func TestNewPointUnhandledType(t *testing.T) {
	// nil value
	pt := tsdb.NewPoint("cpu", nil, tsdb.Fields{"value": nil}, time.Unix(0, 0))
	if exp := `cpu value= 0`; pt.String() != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}

	// unsupported type gets stored as string
	now := time.Unix(0, 0).UTC()
	pt = tsdb.NewPoint("cpu", nil, tsdb.Fields{"value": now}, time.Unix(0, 0))
	if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}

	if exp := "1970-01-01 00:00:00 +0000 UTC"; pt.Fields()["value"] != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}
}
Exemple #12
0
func TestNewPointEscaped(t *testing.T) {
	// commas
	pt := tsdb.NewPoint("cpu,main", tsdb.Tags{"tag,bar": "value"}, tsdb.Fields{"name,bar": 1.0}, time.Unix(0, 0))
	if exp := `cpu\,main,tag\,bar=value name\,bar=1.0 0`; pt.String() != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}

	// spaces
	pt = tsdb.NewPoint("cpu main", tsdb.Tags{"tag bar": "value"}, tsdb.Fields{"name bar": 1.0}, time.Unix(0, 0))
	if exp := `cpu\ main,tag\ bar=value name\ bar=1.0 0`; pt.String() != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}

	// equals
	pt = tsdb.NewPoint("cpu=main", tsdb.Tags{"tag=bar": "value=foo"}, tsdb.Fields{"name=bar": 1.0}, time.Unix(0, 0))
	if exp := `cpu\=main,tag\=bar=value\=foo name\=bar=1.0 0`; pt.String() != exp {
		t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
	}
}
func TestWritePointsAndExecuteQuery(t *testing.T) {
	store, executor := testStoreAndExecutor()
	defer os.RemoveAll(store.Path())

	// Write first point.
	if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)}); err != nil {
		t.Fatalf(err.Error())
	}

	// Write second point.
	if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(2, 3),
	)}); err != nil {
		t.Fatalf(err.Error())
	}

	got := executeAndGetJSON("select * from cpu", executor)
	exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1],["1970-01-01T00:00:02.000000003Z",1]]}]}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	store.Close()
	store = tsdb.NewStore(store.Path())
	if err := store.Open(); err != nil {
		t.Fatalf(err.Error())
	}
	executor.Store = store
	executor.ShardMapper = &testShardMapper{store: store}

	got = executeAndGetJSON("select * from cpu", executor)
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}
}
Exemple #14
0
func TestNewPointNegativeFloat(t *testing.T) {
	test(t, `cpu value=-0.64 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": -0.64,
			},
			time.Unix(1, 0)),
	)
}
Exemple #15
0
func TestNewPointFloatScientific(t *testing.T) {
	test(t, `cpu value=6.632243e+06 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": float64(6632243),
			},
			time.Unix(1, 0)),
	)
}
Exemple #16
0
func TestNewPointFloatNoDecimal(t *testing.T) {
	test(t, `cpu value=1. 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": 1.0,
			},
			time.Unix(1, 0)),
	)
}
Exemple #17
0
func TestNewPointLargeInteger(t *testing.T) {
	test(t, `cpu value=6632243 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": 6632243, // if incorrectly encoded as a float, it would show up as 6.632243e+06
			},
			time.Unix(1, 0)),
	)
}
func TestDropSeriesStatement(t *testing.T) {
	store, executor := testStoreAndExecutor()
	defer os.RemoveAll(store.Path())

	pt := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := store.WriteToShard(shardID, []tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	got := executeAndGetJSON("select * from cpu", executor)
	exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	got = executeAndGetJSON("drop series from cpu", executor)

	got = executeAndGetJSON("select * from cpu", executor)
	exepected = `[{}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	got = executeAndGetJSON("show tag keys from cpu", executor)
	exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	store.Close()
	store = tsdb.NewStore(store.Path())
	store.Open()
	executor.Store = store

	got = executeAndGetJSON("select * from cpu", executor)
	exepected = `[{}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}

	got = executeAndGetJSON("show tag keys from cpu", executor)
	exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]`
	if exepected != got {
		t.Fatalf("exp: %s\ngot: %s", exepected, got)
	}
}
Exemple #19
0
func TestNewPointNaN(t *testing.T) {
	test(t, `cpu value=NaN 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": math.NaN(),
			},
			time.Unix(1, 0)),
	)

	test(t, `cpu value=nAn 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{},
			tsdb.Fields{
				"value": math.NaN(),
			},
			time.Unix(1, 0)),
	)

}
Exemple #20
0
func TestParsePointUnicodeString(t *testing.T) {
	test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": "wè",
			},
			time.Unix(1, 0)),
	)
}
Exemple #21
0
func TestParsePointWithStringWithEquals(t *testing.T) {
	test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"value": 1.0,
				"str":   "foo=bar", // spaces in string value
			},
			time.Unix(1, 0)),
	)
}
Exemple #22
0
func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) {
	// Generate test series (measurements + unique tag sets).
	series := genTestSeries(mCnt, tkCnt, tvCnt)
	// Generate point data to write to the shards.
	points := []tsdb.Point{}
	for _, s := range series {
		for val := 0.0; val < float64(pntCnt); val++ {
			p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now())
			points = append(points, p)
		}
	}
	// Create a temporary directory for the test data.
	dir, _ := ioutil.TempDir("", "store_test")
	// Create the store.
	store := tsdb.NewStore(dir)
	// Open the store.
	if err := store.Open(); err != nil {
		b.Fatalf("benchmarkStoreOpen: %s", err)
	}
	// Create requested number of shards in the store & write points.
	for shardID := 0; shardID < shardCnt; shardID++ {
		if err := store.CreateShard("mydb", "myrp", uint64(shardID)); err != nil {
			b.Fatalf("benchmarkStoreOpen: %s", err)
		}
		// Write points to the shard.
		chunkedWriteStoreShard(store, shardID, points)
	}
	// Close the store.
	if err := store.Close(); err != nil {
		b.Fatalf("benchmarkStoreOpen: %s", err)
	}

	// Run the benchmark loop.
	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		store := tsdb.NewStore(dir)
		if err := store.Open(); err != nil {
			b.Fatalf("benchmarkStoreOpen: %s", err)
		}

		b.StopTimer()
		store.Close()
		b.StartTimer()
	}
}
Exemple #23
0
func TestParsePointWithBoolField(t *testing.T) {
	test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`,
		tsdb.NewPoint(
			"cpu",
			tsdb.Tags{
				"host":   "serverA",
				"region": "us-east",
			},
			tsdb.Fields{
				"t":     true,
				"T":     true,
				"true":  true,
				"True":  true,
				"TRUE":  true,
				"f":     false,
				"F":     false,
				"false": false,
				"False": false,
				"FALSE": false,
			},
			time.Unix(1, 0)),
	)
}
Exemple #24
0
// Ensure the shard will automatically flush the WAL after a threshold has been reached.
func TestShard_Autoflush_FlushInterval(t *testing.T) {
	path, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(path)

	// Open shard with a high size threshold, small time threshold.
	sh := tsdb.NewShard(tsdb.NewDatabaseIndex(), filepath.Join(path, "shard"), tsdb.EngineOptions{
		MaxWALSize:             10 * 1024 * 1024, // 10MB
		WALFlushInterval:       100 * time.Millisecond,
		WALPartitionFlushDelay: 1 * time.Millisecond,
	})
	if err := sh.Open(); err != nil {
		t.Fatal(err)
	}
	defer sh.Close()

	// Write some points.
	for i := 0; i < 100; i++ {
		if err := sh.WritePoints([]tsdb.Point{tsdb.NewPoint(
			fmt.Sprintf("cpu%d", i),
			map[string]string{"host": "server"},
			map[string]interface{}{"value": 1.0},
			time.Unix(1, 2),
		)}); err != nil {
			t.Fatal(err)
		}
	}

	// Wait for time-based flush.
	time.Sleep(100 * time.Millisecond)

	// Make sure we have series buckets created outside the WAL.
	if n, err := sh.SeriesCount(); err != nil {
		t.Fatal(err)
	} else if n < 10 {
		t.Fatalf("not enough series, expected at least 10, got %d", n)
	}
}
Exemple #25
0
func TestShardMapper_WriteAndSingleMapperAggregateQuery(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	shard := mustCreateShard(tmpDir)

	pt1time := time.Unix(10, 0).UTC()
	pt1 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverA", "region": "us-east"},
		map[string]interface{}{"value": 1},
		pt1time,
	)
	pt2time := time.Unix(20, 0).UTC()
	pt2 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverB", "region": "us-east"},
		map[string]interface{}{"value": 60},
		pt2time,
	)
	err := shard.WritePoints([]tsdb.Point{pt1, pt2})
	if err != nil {
		t.Fatalf(err.Error())
	}

	var tests = []struct {
		stmt     string
		expected []string
	}{
		{
			stmt:     `SELECT sum(value) FROM cpu`,
			expected: []string{`{"name":"cpu","values":[{"value":[61]}]}`, `null`},
		},
		{
			stmt:     `SELECT sum(value),mean(value) FROM cpu`,
			expected: []string{`{"name":"cpu","values":[{"value":[61,{"Count":2,"Mean":30.5,"ResultType":1}]}]}`, `null`},
		},
		{
			stmt: `SELECT sum(value) FROM cpu GROUP BY host`,
			expected: []string{
				`{"name":"cpu","tags":{"host":"serverA"},"values":[{"value":[1]}]}`,
				`{"name":"cpu","tags":{"host":"serverB"},"values":[{"value":[60]}]}`,
				`null`},
		},
		{
			stmt: `SELECT sum(value) FROM cpu GROUP BY region`,
			expected: []string{
				`{"name":"cpu","tags":{"region":"us-east"},"values":[{"value":[61]}]}`,
				`null`},
		},
		{
			stmt: `SELECT sum(value) FROM cpu GROUP BY region,host`,
			expected: []string{
				`{"name":"cpu","tags":{"host":"serverA","region":"us-east"},"values":[{"value":[1]}]}`,
				`{"name":"cpu","tags":{"host":"serverB","region":"us-east"},"values":[{"value":[60]}]}`,
				`null`},
		},
		{
			stmt: `SELECT sum(value) FROM cpu WHERE host='serverB'`,
			expected: []string{
				`{"name":"cpu","values":[{"value":[60]}]}`,
				`null`},
		},
		{
			stmt: fmt.Sprintf(`SELECT sum(value) FROM cpu WHERE time = '%s'`, pt1time.Format(influxql.DateTimeFormat)),
			expected: []string{
				`{"name":"cpu","values":[{"time":10000000000,"value":[1]}]}`,
				`null`},
		},
		{
			stmt: fmt.Sprintf(`SELECT sum(value) FROM cpu WHERE time > '%s'`, pt1time.Format(influxql.DateTimeFormat)),
			expected: []string{
				`{"name":"cpu","values":[{"value":[60]}]}`,
				`null`},
		},
		{
			stmt: fmt.Sprintf(`SELECT sum(value) FROM cpu WHERE time > '%s'`, pt2time.Format(influxql.DateTimeFormat)),
			expected: []string{
				`{"name":"cpu","values":[{"value":[null]}]}`,
				`null`},
		},
	}

	for _, tt := range tests {
		stmt := mustParseSelectStatement(tt.stmt)
		mapper := openLocalMapperOrFail(t, shard, stmt)

		for i := range tt.expected {
			got := aggIntervalAsJson(t, mapper)
			if got != tt.expected[i] {
				t.Errorf("test '%s'\n\tgot      %s\n\texpected %s", tt.stmt, got, tt.expected[i])
				break
			}
		}
	}
}
Exemple #26
0
func (p *Point) MarshalString() string {
	return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String()
}
Exemple #27
0
func TestShardWriteAndIndex(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")

	index := tsdb.NewDatabaseIndex()
	sh := tsdb.NewShard(index, tmpShard, tsdb.NewEngineOptions())
	if err := sh.Open(); err != nil {
		t.Fatalf("error openeing shard: %s", err.Error())
	}

	pt := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "server"},
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	pt.SetTime(time.Unix(2, 3))
	err = sh.WritePoints([]tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	validateIndex := func() {
		if !reflect.DeepEqual(index.Names(), []string{"cpu"}) {
			t.Fatalf("measurement names in shard didn't match")
		}
		if index.SeriesN() != 1 {
			t.Fatalf("series wasn't in index")
		}

		seriesTags := index.Series(string(pt.Key())).Tags
		if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
			t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
		}
		if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
			t.Fatalf("tag key wasn't saved to measurement index")
		}
	}

	validateIndex()

	// ensure the index gets loaded after closing and opening the shard
	sh.Close()

	index = tsdb.NewDatabaseIndex()
	sh = tsdb.NewShard(index, tmpShard, tsdb.NewEngineOptions())
	if err := sh.Open(); err != nil {
		t.Fatalf("error openeing shard: %s", err.Error())
	}

	validateIndex()

	// and ensure that we can still write data
	pt.SetTime(time.Unix(2, 6))
	err = sh.WritePoints([]tsdb.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}
}
Exemple #28
0
func TestShardMapper_LocalMapperTagSets(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	shard := mustCreateShard(tmpDir)

	pt1time := time.Unix(1, 0).UTC()
	pt1 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverA", "region": "us-east"},
		map[string]interface{}{"value": 42},
		pt1time,
	)
	pt2time := time.Unix(2, 0).UTC()
	pt2 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverB", "region": "us-east"},
		map[string]interface{}{"value": 60},
		pt2time,
	)
	err := shard.WritePoints([]tsdb.Point{pt1, pt2})
	if err != nil {
		t.Fatalf(err.Error())
	}

	var tests = []struct {
		stmt     string
		expected []string
	}{
		{
			stmt:     `SELECT sum(value) FROM cpu`,
			expected: []string{"cpu"},
		},
		{
			stmt:     `SELECT sum(value) FROM cpu GROUP BY host`,
			expected: []string{"cpu|host|serverA", "cpu|host|serverB"},
		},
		{
			stmt:     `SELECT sum(value) FROM cpu GROUP BY region`,
			expected: []string{"cpu|region|us-east"},
		},
		{
			stmt:     `SELECT sum(value) FROM cpu WHERE host='serverA'`,
			expected: []string{"cpu"},
		},
		{
			stmt:     `SELECT sum(value) FROM cpu WHERE host='serverB'`,
			expected: []string{"cpu"},
		},
		{
			stmt:     `SELECT sum(value) FROM cpu WHERE host='serverC'`,
			expected: []string{},
		},
	}

	for _, tt := range tests {
		stmt := mustParseSelectStatement(tt.stmt)
		mapper := openLocalMapperOrFail(t, shard, stmt)
		got := mapper.TagSets()
		if !reflect.DeepEqual(got, tt.expected) {
			t.Errorf("test '%s'\n\tgot      %s\n\texpected %s", tt.stmt, got, tt.expected)
		}
	}

}
Exemple #29
0
func TestShardMapper_WriteAndSingleMapperRawQuery(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	shard := mustCreateShard(tmpDir)

	pt1time := time.Unix(1, 0).UTC()
	pt1 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverA", "region": "us-east"},
		map[string]interface{}{"load": 42},
		pt1time,
	)
	pt2time := time.Unix(2, 0).UTC()
	pt2 := tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "serverB", "region": "us-east"},
		map[string]interface{}{"load": 60},
		pt2time,
	)
	err := shard.WritePoints([]tsdb.Point{pt1, pt2})
	if err != nil {
		t.Fatalf(err.Error())
	}

	var tests = []struct {
		stmt      string
		chunkSize int
		expected  []string
	}{
		{
			stmt:     `SELECT load FROM cpu`,
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":42},{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:      `SELECT load FROM cpu`,
			chunkSize: 1,
			expected:  []string{`{"name":"cpu","values":[{"time":1000000000,"value":42}]}`, `{"name":"cpu","values":[{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:      `SELECT load FROM cpu`,
			chunkSize: 2,
			expected:  []string{`{"name":"cpu","values":[{"time":1000000000,"value":42},{"time":2000000000,"value":60}]}`},
		},
		{
			stmt:      `SELECT load FROM cpu`,
			chunkSize: 3,
			expected:  []string{`{"name":"cpu","values":[{"time":1000000000,"value":42},{"time":2000000000,"value":60}]}`},
		},
		{
			stmt:     `SELECT load FROM cpu GROUP BY host`,
			expected: []string{`{"name":"cpu","tags":{"host":"serverA"},"values":[{"time":1000000000,"value":42}]}`, `{"name":"cpu","tags":{"host":"serverB"},"values":[{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     `SELECT load FROM cpu GROUP BY region`,
			expected: []string{`{"name":"cpu","tags":{"region":"us-east"},"values":[{"time":1000000000,"value":42},{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     `SELECT load FROM cpu WHERE host='serverA'`,
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":42}]}`, `null`},
		},
		{
			stmt:     `SELECT load FROM cpu WHERE host='serverB'`,
			expected: []string{`{"name":"cpu","values":[{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     `SELECT load FROM cpu WHERE host='serverC'`,
			expected: []string{`null`},
		},
		{
			stmt:     `SELECT load FROM cpu WHERE load = 60`,
			expected: []string{`{"name":"cpu","values":[{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     `SELECT load FROM cpu WHERE load != 60`,
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":42}]}`, `null`},
		},
		{
			stmt:     fmt.Sprintf(`SELECT load FROM cpu WHERE time = '%s'`, pt1time.Format(influxql.DateTimeFormat)),
			expected: []string{`{"name":"cpu","values":[{"time":1000000000,"value":42}]}`, `null`},
		},
		{
			stmt:     fmt.Sprintf(`SELECT load FROM cpu WHERE time > '%s'`, pt1time.Format(influxql.DateTimeFormat)),
			expected: []string{`{"name":"cpu","values":[{"time":2000000000,"value":60}]}`, `null`},
		},
		{
			stmt:     fmt.Sprintf(`SELECT load FROM cpu WHERE time > '%s'`, pt2time.Format(influxql.DateTimeFormat)),
			expected: []string{`null`},
		},
	}

	for _, tt := range tests {
		stmt := mustParseSelectStatement(tt.stmt)
		mapper := openRawMapperOrFail(t, shard, stmt, tt.chunkSize)

		for i, _ := range tt.expected {
			got := nextRawChunkAsJson(t, mapper)
			if got != tt.expected[i] {
				t.Errorf("test '%s'\n\tgot      %s\n\texpected %s", tt.stmt, got, tt.expected[i])
				break
			}
		}
	}
}
Exemple #30
0
// Test that executor correctly orders data across shards when the tagsets
// are not presented in alphabetically order across shards.
func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) {
	// Create the mock planner and its metastore
	store, query_executor := testStoreAndQueryExecutor()
	defer os.RemoveAll(store.Path())
	query_executor.MetaStore = &testQEMetastore{
		sgFunc: func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
			return []meta.ShardGroupInfo{
				{
					ID: sgID,
					Shards: []meta.ShardInfo{
						{
							ID:       uint64(sID0),
							OwnerIDs: []uint64{nID},
						},
					},
				},
				{
					ID: sgID,
					Shards: []meta.ShardInfo{
						{
							ID:       uint64(sID1),
							OwnerIDs: []uint64{nID},
						},
					},
				},
			}, nil
		},
	}

	// Write tagsets "y" and "z" to first shard.
	if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "y"},
		map[string]interface{}{"value": 100},
		time.Unix(1, 0).UTC(),
	)}); err != nil {
		t.Fatalf(err.Error())
	}
	if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "z"},
		map[string]interface{}{"value": 200},
		time.Unix(1, 0).UTC(),
	)}); err != nil {
		t.Fatalf(err.Error())
	}

	// Write tagsets "x", y" and "z" to second shard.
	if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "x"},
		map[string]interface{}{"value": 300},
		time.Unix(2, 0).UTC(),
	)}); err != nil {
		t.Fatalf(err.Error())
	}
	if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "y"},
		map[string]interface{}{"value": 400},
		time.Unix(3, 0).UTC(),
	)}); err != nil {
		t.Fatalf(err.Error())
	}
	if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint(
		"cpu",
		map[string]string{"host": "z"},
		map[string]interface{}{"value": 500},
		time.Unix(3, 0).UTC(),
	)}); err != nil {
		t.Fatalf(err.Error())
	}

	var tests = []struct {
		skip      bool   // Skip test
		stmt      string // Query statement
		chunkSize int    // Chunk size for driving the executor
		expected  string // Expected results, rendered as a string
	}{
		{
			stmt:     `SELECT sum(value) FROM cpu GROUP BY host`,
			expected: `[{"name":"cpu","tags":{"host":"x"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",300]]},{"name":"cpu","tags":{"host":"y"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",500]]},{"name":"cpu","tags":{"host":"z"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",700]]}]`,
		},
		{
			stmt:     `SELECT value FROM cpu GROUP BY host`,
			expected: `[{"name":"cpu","tags":{"host":"x"},"columns":["time","value"],"values":[["1970-01-01T00:00:02Z",300]]},{"name":"cpu","tags":{"host":"y"},"columns":["time","value"],"values":[["1970-01-01T00:00:01Z",100],["1970-01-01T00:00:03Z",400]]},{"name":"cpu","tags":{"host":"z"},"columns":["time","value"],"values":[["1970-01-01T00:00:01Z",200],["1970-01-01T00:00:03Z",500]]}]`,
		},
	}

	for _, tt := range tests {
		if tt.skip {
			t.Logf("Skipping test %s", tt.stmt)
			continue
		}
		executor, err := query_executor.Plan(mustParseSelectStatement(tt.stmt), tt.chunkSize)
		if err != nil {
			t.Fatalf("failed to plan query: %s", err.Error())
		}
		got := executeAndGetResults(executor)
		if got != tt.expected {
			t.Fatalf("Test %s\nexp: %s\ngot: %s\n", tt.stmt, tt.expected, got)
		}
	}
}