func TestStoreEnsureSeriesPersistedInNewShards(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if err := s.CreateShard("foo", "default", 1); err != nil { t.Fatalf("error creating shard: %v", err) } p, _ := tsdb.ParsePoints([]byte("cpu val=1")) if err := s.WriteToShard(1, p); err != nil { t.Fatalf("error writing to shard: %v", err) } if err := s.CreateShard("foo", "default", 2); err != nil { t.Fatalf("error creating shard: %v", err) } if err := s.WriteToShard(2, p); err != nil { t.Fatalf("error writing to shard: %v", err) } d := s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } // delete the shard, close the store and reopen it and confirm the measurement is still there s.DeleteShard(1) s.Close() s = tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } d = s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } }
func TestDropDatabase(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err != nil { t.Fatal(err) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) expected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if expected != got { t.Fatalf("exp: %s\ngot: %s", expected, got) } var name string me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result { name = stmt.(*influxql.DropDatabaseStatement).Name return &influxql.Result{} }} executor.MetaStatementExecutor = me // verify the database is there on disk dbPath := filepath.Join(store.Path(), "foo") if _, err := os.Stat(dbPath); err != nil { t.Fatalf("execpted database dir %s to exist", dbPath) } got = executeAndGetJSON("drop database foo", executor) expected = `[{}]` if got != expected { t.Fatalf("exp: %s\ngot: %s", expected, got) } if name != "foo" { t.Fatalf("expected the MetaStatementExecutor to be called with database name foo, but got %s", name) } if _, err := os.Stat(dbPath); !os.IsNotExist(err) { t.Fatalf("expected database dir %s to be gone", dbPath) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store executor.ShardMapper = &testShardMapper{store: store} if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err == nil || err.Error() != "shard not found" { t.Fatalf("expected shard to not be found") } }
func TestStoreOpenNotRPDir(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } path := filepath.Join(dir, "mydb") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } rpPath := filepath.Join(path, "myrp") if _, err := os.Create(rpPath); err != nil { t.Fatalf("Store.Open() failed to create test retention policy directory: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database myb does not exist") } if got, exp := s.ShardN(), 0; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } }
func TestStoreOpenNotDatabaseDir(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) path := filepath.Join(dir, "bad_db_path") if _, err := os.Create(path); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 0; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if got, exp := s.ShardN(), 0; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } }
func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) { // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Generate point data to write to the shards. points := []tsdb.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } // Create a temporary directory for the test data. dir, _ := ioutil.TempDir("", "store_test") // Create the store. store := tsdb.NewStore(dir) // Open the store. if err := store.Open(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Create requested number of shards in the store & write points. for shardID := 0; shardID < shardCnt; shardID++ { if err := store.CreateShard("mydb", "myrp", uint64(shardID)); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Write points to the shard. chunkedWriteStoreShard(store, shardID, points) } // Close the store. if err := store.Close(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Run the benchmark loop. b.ResetTimer() for n := 0; n < b.N; n++ { store := tsdb.NewStore(dir) if err := store.Open(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } b.StopTimer() store.Close() b.StartTimer() } }
func TestDropSeriesStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := store.WriteToShard(shardID, []tsdb.Point{pt}) if err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop series from cpu", executor) got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store got = executeAndGetJSON("select * from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } }
func testStore() *tsdb.Store { path, _ := ioutil.TempDir("", "") store := tsdb.NewStore(path) store.EngineOptions.Config.WALDir = filepath.Join(path, "wal") err := store.Open() if err != nil { panic(err) } return store }
func TestWritePointsAndExecuteQuery(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write first point. if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), )}); err != nil { t.Fatalf(err.Error()) } // Write second point. if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(2, 3), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu", executor) exepected := `[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["1970-01-01T00:00:01.000000002Z","server",1],["1970-01-01T00:00:02.000000003Z","server",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1],["1970-01-01T00:00:02.000000003Z",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } }
func TestStoreOpenShardCreateDelete(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } path := filepath.Join(dir, "mydb", "myrp") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database mydb does not exist") } if err := s.CreateShard("mydb", "myrp", 1); err != nil { t.Fatalf("Store.Open() failed to create shard") } if got, exp := s.ShardN(), 1; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } shardIDs := s.ShardIDs() if len(shardIDs) != 1 || shardIDs[0] != 1 { t.Fatalf("Store.Open() ShardIDs not correct: got %v, exp %v", s.ShardIDs(), []uint64{1}) } if err := s.DeleteShard(1); err != nil { t.Fatalf("Store.Open() failed to delete shard: %v", err) } if sh := s.Shard(1); sh != nil { t.Fatal("Store.Open() shard ID 1 still exists") } }
// Ensure writing a point and updating it results in only a single point. func TestWritePointsAndExecuteQuery_Update(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write original point. if err := store.WriteToShard(1, []tsdb.Point{tsdb.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 100.0}, time.Unix(0, 0), )}); err != nil { t.Fatalf(err.Error()) } // Restart store. store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} // Rewrite point with new value. if err := store.WriteToShard(1, []tsdb.Point{tsdb.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 200.0}, time.Unix(0, 0), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("select * from temperature", executor) exp := `[{"series":[{"name":"temperature","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",200]]}]}]` if exp != got { t.Fatalf("\n\nexp: %s\ngot: %s", exp, got) } }
func TestStoreOpen(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("failed to create temp dir: %v", err) } defer os.RemoveAll(dir) if err := os.MkdirAll(filepath.Join(dir, "mydb"), 0600); err != nil { t.Fatalf("failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("database index count mismatch: got %v, exp %v", got, exp) } }
func TestStoreOpenShard(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) path := filepath.Join(dir, "mydb", "myrp") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } shardPath := filepath.Join(path, "1") if _, err := os.Create(shardPath); err != nil { t.Fatalf("Store.Open() failed to create test shard 1: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database myb does not exist") } if got, exp := s.ShardN(), 1; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } if sh := s.Shard(1); sh.Path() != shardPath { t.Errorf("Store.Open() shard path mismatch: got %v, exp %v", sh.Path(), shardPath) } }
func testStoreAndExecutor(storePath string) (*tsdb.Store, *tsdb.QueryExecutor) { if storePath == "" { storePath, _ = ioutil.TempDir("", "") } store := tsdb.NewStore(storePath) store.EngineOptions.Config.WALDir = filepath.Join(storePath, "wal") err := store.Open() if err != nil { panic(err) } database := "foo" retentionPolicy := "bar" shardID := uint64(1) store.CreateShard(database, retentionPolicy, shardID) executor := tsdb.NewQueryExecutor(store) executor.MetaStore = &testMetastore{} executor.ShardMapper = &testShardMapper{store: store} return store, executor }