func TestParsePointWithStringWithCommas(t *testing.T) { // escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, models.MustNewPoint( "cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "value": 1.0, "str": `foo\,bar`, // commas in string value }, time.Unix(1, 0)), ) // non-escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, models.MustNewPoint( "cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "value": 1.0, "str": "foo,bar", // commas in string value }, time.Unix(1, 0)), ) }
func TestParsePointWithStringField(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, models.MustNewPoint("cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "value": 1.0, "str": "foo", "str2": "bar", }, time.Unix(1, 0)), ) test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, models.MustNewPoint("cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "str": `foo " bar`, }, time.Unix(1, 0)), ) }
func TestParsePointEscapedStringsAndCommas(t *testing.T) { // non-escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, models.MustNewPoint( "cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "value": `{Hello"{,}" World}`, }, time.Unix(1, 0)), ) // escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, models.MustNewPoint( "cpu", models.Tags{ "host": "serverA", "region": "us-east", }, models.Fields{ "value": `{Hello"{\,}" World}`, }, time.Unix(1, 0)), ) }
func TestParsePointsUnbalancedQuotedTags(t *testing.T) { pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") if err != nil { t.Fatalf("ParsePoints failed: %v", err) } if exp := 2; len(pts) != exp { t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) } // Expected " in the tag value exp := models.MustNewPoint("baz", models.Tags{"mytag": `"a`}, models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) if pts[0].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) } // Expected two points to ensure we did not overscan the line exp = models.MustNewPoint("baz", models.Tags{"mytag": `a`}, models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) if pts[1].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) } }
// Ensure the engine can write points to storage. func TestEngine_WritePoints_PointsWriter(t *testing.T) { e := OpenDefaultEngine() defer e.Close() // Points to be inserted. points := []models.Point{ models.MustNewPoint("cpu", models.Tags{}, models.Fields{"foo": "bar"}, time.Unix(0, 1)), models.MustNewPoint("cpu", models.Tags{}, models.Fields{"foo": "bar"}, time.Unix(0, 0)), models.MustNewPoint("cpu", models.Tags{}, models.Fields{"foo": "bar"}, time.Unix(1, 0)), models.MustNewPoint("cpu", models.Tags{"host": "serverA"}, models.Fields{"foo": "bar"}, time.Unix(0, 0)), } // Mock points writer to ensure points are passed through. var invoked bool e.PointsWriter.WritePointsFn = func(a []models.Point) error { invoked = true if !reflect.DeepEqual(points, a) { t.Fatalf("unexpected points: %#v", a) } return nil } // Write points against two separate series. if err := e.WritePoints(points, nil, nil); err != nil { t.Fatal(err) } else if !invoked { t.Fatal("PointsWriter.WritePoints() not called") } }
func TestShardWriteAddNewField(t *testing.T) { tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") index := tsdb.NewDatabaseIndex() opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts) if err := sh.Open(); err != nil { t.Fatalf("error openeing shard: %s", err.Error()) } defer sh.Close() pt := models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } pt = models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0, "value2": 2.0}, time.Unix(1, 2), ) err = sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } if index.SeriesN() != 1 { t.Fatalf("series wasn't in index") } seriesTags := index.Series(string(pt.Key())).Tags if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] { t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags) } if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) { t.Fatalf("tag key wasn't saved to measurement index") } if len(index.Measurement("cpu").FieldNames()) != 2 { t.Fatalf("field names wasn't saved to measurement index") } }
func TestDropMeasurementStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) pt2 := models.MustNewPoint( "memory", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []models.Point{pt, pt2}); err != nil { t.Fatal(err) } got := executeAndGetJSON("show series", executor) exepected := `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]},{"name":"memory","columns":["_key","host"],"values":[["memory,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop measurement memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } validateDrop := func() { got = executeAndGetJSON("show series", executor) exepected = `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show measurements", executor) exepected = `[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("select * from memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } } validateDrop() store.Close() store, executor = testStoreAndExecutor(store.Path()) validateDrop() }
func TestShardMapper_WriteAndSingleMapperRawQueryMultiValue(t *testing.T) { tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() pt1 := models.MustNewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"foo": 42, "bar": 43}, pt1time, ) pt2time := time.Unix(2, 0).UTC() pt2 := models.MustNewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"foo": 60, "bar": 61}, pt2time, ) err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } var tests = []struct { stmt string chunkSize int expected []string }{ { stmt: `SELECT foo FROM cpu`, expected: []string{`{"name":"cpu","fields":["foo"],"values":[{"time":1000000000,"value":42,"tags":{"host":"serverA","region":"us-east"}},{"time":2000000000,"value":60,"tags":{"host":"serverB","region":"us-east"}}]}`}, }, { stmt: `SELECT foo,bar FROM cpu`, expected: []string{`{"name":"cpu","fields":["bar","foo"],"values":[{"time":1000000000,"value":{"bar":43,"foo":42},"tags":{"host":"serverA","region":"us-east"}},{"time":2000000000,"value":{"bar":61,"foo":60},"tags":{"host":"serverB","region":"us-east"}}]}`}, }, } for _, tt := range tests { stmt := mustParseSelectStatement(tt.stmt) mapper := openRawMapperOrFail(t, shard, stmt, tt.chunkSize) for i, s := range tt.expected { got := nextRawChunkAsJson(t, mapper) if got != s { t.Errorf("test '%s'\n\tgot %s\n\texpected %s", tt.stmt, got, tt.expected[i]) break } } } }
func TestWritePointsAndExecuteQuery(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write first point. if err := store.WriteToShard(shardID, []models.Point{models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), )}); err != nil { t.Fatalf(err.Error()) } // Write second point. if err := store.WriteToShard(shardID, []models.Point{models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(2, 3), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu", executor) exepected := `[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["1970-01-01T00:00:01.000000002Z","server",1],["1970-01-01T00:00:02.000000003Z","server",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1],["1970-01-01T00:00:02.000000003Z",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } }
func TestFilterMatchMultipleWildcards(t *testing.T) { p, err := graphite.NewParser([]string{ "*.* .wrong.measurement*", "servers.* .host.measurement*", // should match this "servers.localhost .wrong.measurement*", "*.localhost .wrong.measurement*", }, nil) if err != nil { t.Fatalf("unexpected error creating parser, got %v", err) } exp := models.MustNewPoint("cpu_load", models.Tags{"host": "server01"}, models.Fields{"value": float64(11)}, time.Unix(1435077219, 0)) pt, err := p.Parse("servers.server01.cpu_load 11 1435077219") if err != nil { t.Fatalf("parse error: %v", err) } if exp.String() != pt.String() { t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String()) } }
// Ensure the shard writer returns an error when we can't get a connection. func TestShardWriter_Write_PoolMax(t *testing.T) { ts := newTestWriteService(writeShardSlow) s := cluster.NewService(cluster.Config{ ShardWriterTimeout: toml.Duration(100 * time.Millisecond), }) s.Listener = ts.muxln s.TSDBStore = ts if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(100*time.Millisecond, 1) w.MetaClient = &metaClient{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) go w.WriteShard(shardID, ownerID, points) time.Sleep(time.Millisecond) if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "timed out waiting for free connection" { t.Fatalf("unexpected error: %v", err) } }
func TestParsePointToString(t *testing.T) { line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` pts, err := models.ParsePoints([]byte(line)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] got := pt.String() if line != got { t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) } pt = models.MustNewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"}, models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, time.Unix(1, 0)) got = pt.String() if line != got { t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) } }
// benchmarkWritePoints benchmarks writing new series to a shard. // mCnt - measurement count // tkCnt - tag key count // tvCnt - tag value count (values per tag) // pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt) func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Create index for the shard to use. index := tsdb.NewDatabaseIndex() // Generate point data to write to the shard. points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { p := models.MustNewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } // Stop & reset timers and mem-stats before the main benchmark loop. b.StopTimer() b.ResetTimer() // Run the benchmark loop. for n := 0; n < b.N; n++ { tmpDir, _ := ioutil.TempDir("", "shard_test") tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") shard := tsdb.NewShard(1, index, tmpShard, tmpWal, tsdb.NewEngineOptions()) shard.Open() b.StartTimer() // Call the function being benchmarked. chunkedWrite(shard, points) b.StopTimer() shard.Close() os.RemoveAll(tmpDir) } }
func TestDropDatabase(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []models.Point{pt}); err != nil { t.Fatal(err) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) expected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if expected != got { t.Fatalf("exp: %s\ngot: %s", expected, got) } var name string me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result { name = stmt.(*influxql.DropDatabaseStatement).Name return &influxql.Result{} }} executor.MetaStatementExecutor = me // verify the database is there on disk dbPath := filepath.Join(store.Path(), "foo") if _, err := os.Stat(dbPath); err != nil { t.Fatalf("execpted database dir %s to exist", dbPath) } got = executeAndGetJSON("drop database foo", executor) expected = `[{}]` if got != expected { t.Fatalf("exp: %s\ngot: %s", expected, got) } if name != "foo" { t.Fatalf("expected the MetaStatementExecutor to be called with database name foo, but got %s", name) } if _, err := os.Stat(dbPath); !os.IsNotExist(err) { t.Fatalf("expected database dir %s to be gone", dbPath) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store executor.ShardMapper = &testShardMapper{store: store} if err := store.WriteToShard(shardID, []models.Point{pt}); err == nil || err.Error() != "shard not found" { t.Fatalf("expected shard to not be found") } }
// Ensure the shard writer returns an error when the server fails to accept the write. func TestShardWriter_WriteShard_Error(t *testing.T) { ts := newTestWriteService(writeShardFail) s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = ts if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Minute) w.MetaStore = &metaStore{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { t.Fatalf("unexpected error: %v", err) } }
func TestParsePointWithTags(t *testing.T) { test(t, "cpu,host=serverA,region=us-east value=1.0 1000000000", models.MustNewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"}, models.Fields{"value": 1.0}, time.Unix(1, 0))) }
// Ensure the shard writer returns an error when dialing times out. func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { ts := newTestWriteService(writeShardSuccess) s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = ts if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Nanosecond) w.MetaStore = &metaStore{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []models.Point points = append(points, models.MustNewPoint( "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { t.Fatalf("expected error %v, to contain %s", err, exp) } }
// NewTestPoint returns a new TestPoint. // // NewTestPoint panics if it is not a valid models.Point. func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint { return TestPoint{ RawTags: tags, RawFields: fields, RawTime: time, Point: models.MustNewPoint(name, tags, fields, time), } }
// Tests that concurrent writes when the WAL closes do not cause race conditions. func TestLog_WritePoints_CloseConcurrent(t *testing.T) { w := NewLog() defer w.Close() w.FlushMemorySizeThreshold = 1000 total := 1000 w.IndexWriter.WriteFn = func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error { return nil } if err := w.Open(); err != nil { t.Fatalf("error opening: %s", err.Error()) } done := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() i := 0 for { if i > total { return } select { case <-done: return default: } pt := models.MustNewPoint("cpu", map[string]string{"host": "A"}, map[string]interface{}{"value": i}, time.Unix(int64(i), 0), ) if err := w.WritePoints([]models.Point{pt}, nil, nil); err != nil && err != tsm1.ErrWALClosed { t.Fatalf("failed to write points: %s", err.Error()) } i++ } }() time.Sleep(10 * time.Millisecond) if err := w.Close(); err != nil { t.Fatalf("failed to close WAL: %v", err) } // Let the goroutines run for a second select { case <-time.After(1 * time.Second): close(done) } // Wait for them to exit wg.Wait() }
func TestNewPointUnhandledType(t *testing.T) { // nil value pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) if exp := `cpu value= 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // unsupported type gets stored as string now := time.Unix(0, 0).UTC() pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } if exp := "1970-01-01 00:00:00 +0000 UTC"; pt.Fields()["value"] != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } }
func TestNewPointEscaped(t *testing.T) { // commas pt := models.MustNewPoint("cpu,main", models.Tags{"tag,bar": "value"}, models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // spaces pt = models.MustNewPoint("cpu main", models.Tags{"tag bar": "value"}, models.Fields{"name bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // equals pt = models.MustNewPoint("cpu=main", models.Tags{"tag=bar": "value=foo"}, models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } }
func TestDropSeriesStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := models.MustNewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := store.WriteToShard(shardID, []models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop series from cpu", executor) got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store got = executeAndGetJSON("select * from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } }
func TestRoundedString(t *testing.T) { tags := map[string]interface{}{"value": float64(1)} tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") tests := []struct { name string precision time.Duration exp string }{ { name: "no precision", precision: time.Duration(0), exp: "cpu value=1 946730096789012345", }, { name: "nanosecond precision", precision: time.Nanosecond, exp: "cpu value=1 946730096789012345", }, { name: "microsecond precision", precision: time.Microsecond, exp: "cpu value=1 946730096789012000", }, { name: "millisecond precision", precision: time.Millisecond, exp: "cpu value=1 946730096789000000", }, { name: "second precision", precision: time.Second, exp: "cpu value=1 946730097000000000", }, { name: "minute precision", precision: time.Minute, exp: "cpu value=1 946730100000000000", }, { name: "hour precision", precision: time.Hour, exp: "cpu value=1 946731600000000000", }, } for _, test := range tests { pt := models.MustNewPoint("cpu", nil, tags, tm) act := pt.RoundedString(test.precision) if act != test.exp { t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v", test.name, act, test.exp) } } }
func TestPrecisionString(t *testing.T) { tags := map[string]interface{}{"value": float64(1)} tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") tests := []struct { name string precision string exp string }{ { name: "no precision", precision: "", exp: "cpu value=1 946730096789012345", }, { name: "nanosecond precision", precision: "ns", exp: "cpu value=1 946730096789012345", }, { name: "microsecond precision", precision: "u", exp: "cpu value=1 946730096789012", }, { name: "millisecond precision", precision: "ms", exp: "cpu value=1 946730096789", }, { name: "second precision", precision: "s", exp: "cpu value=1 946730096", }, { name: "minute precision", precision: "m", exp: "cpu value=1 15778834", }, { name: "hour precision", precision: "h", exp: "cpu value=1 262980", }, } for _, test := range tests { pt := models.MustNewPoint("cpu", nil, tags, tm) act := pt.PrecisionString(test.precision) if act != test.exp { t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", test.name, act, test.exp) } } }
func TestNewPointFloatScientific(t *testing.T) { test(t, `cpu value=6.632243e+06 1000000000`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": float64(6632243), }, time.Unix(1, 0)), ) }
func TestParsePointNegativeTimestamp(t *testing.T) { test(t, `cpu value=1 -1`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": 1.0, }, time.Unix(0, -1)), ) }
func TestParsePointMinTimestamp(t *testing.T) { test(t, `cpu value=1 -9223372036854775807`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": 1.0, }, time.Unix(0, -int64(1<<63-1))), ) }
func TestNewPointNegativeFloat(t *testing.T) { test(t, `cpu value=-0.64 1000000000`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": -0.64, }, time.Unix(1, 0)), ) }
func TestNewPointFloatNoDecimal(t *testing.T) { test(t, `cpu value=1. 1000000000`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) }
func TestNewPointLargeInteger(t *testing.T) { test(t, `cpu value=6632243i 1000000000`, models.MustNewPoint( "cpu", models.Tags{}, models.Fields{ "value": 6632243, // if incorrectly encoded as a float, it would show up as 6.632243e+06 }, time.Unix(1, 0)), ) }