func TestStoreEnsureSeriesPersistedInNewShards(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if err := s.CreateShard("foo", "default", 1); err != nil { t.Fatalf("error creating shard: %v", err) } p, _ := tsdb.ParsePoints([]byte("cpu val=1")) if err := s.WriteToShard(1, p); err != nil { t.Fatalf("error writing to shard: %v", err) } if err := s.CreateShard("foo", "default", 2); err != nil { t.Fatalf("error creating shard: %v", err) } if err := s.WriteToShard(2, p); err != nil { t.Fatalf("error writing to shard: %v", err) } d := s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } // delete the shard, close the store and reopen it and confirm the measurement is still there s.DeleteShard(1) s.Close() s = tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } d = s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } }
func TestDropMeasurementStatement(t *testing.T) { store, executor := testStoreAndExecutor() defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) pt2 := tsdb.NewPoint( "memory", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []tsdb.Point{pt, pt2}); err != nil { t.Fatal(err) } got := executeAndGetJSON("show series", executor) exepected := `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]},{"name":"memory","columns":["_key","host"],"values":[["memory,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop measurement memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } validateDrop := func() { got = executeAndGetJSON("show series", executor) exepected = `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show measurements", executor) exepected = `[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("select * from memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } } validateDrop() store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store validateDrop() }
func TestStoreOpenNotRPDir(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } path := filepath.Join(dir, "mydb") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } rpPath := filepath.Join(path, "myrp") if _, err := os.Create(rpPath); err != nil { t.Fatalf("Store.Open() failed to create test retention policy directory: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database myb does not exist") } if got, exp := s.ShardN(), 0; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } }
func TestDropDatabase(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []models.Point{pt}); err != nil { t.Fatal(err) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) expected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if expected != got { t.Fatalf("exp: %s\ngot: %s", expected, got) } var name string me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result { name = stmt.(*influxql.DropDatabaseStatement).Name return &influxql.Result{} }} executor.MetaStatementExecutor = me // verify the database is there on disk dbPath := filepath.Join(store.Path(), "foo") if _, err := os.Stat(dbPath); err != nil { t.Fatalf("execpted database dir %s to exist", dbPath) } got = executeAndGetJSON("drop database foo", executor) expected = `[{}]` if got != expected { t.Fatalf("exp: %s\ngot: %s", expected, got) } if name != "foo" { t.Fatalf("expected the MetaStatementExecutor to be called with database name foo, but got %s", name) } if _, err := os.Stat(dbPath); !os.IsNotExist(err) { t.Fatalf("expected database dir %s to be gone", dbPath) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store executor.ShardMapper = &testShardMapper{store: store} if err := store.WriteToShard(shardID, []models.Point{pt}); err == nil || err.Error() != "shard not found" { t.Fatalf("expected shard to not be found") } }
func TestStoreOpenNotDatabaseDir(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) path := filepath.Join(dir, "bad_db_path") if _, err := os.Create(path); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 0; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if got, exp := s.ShardN(), 0; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, version string) (*Server, error) { // Construct base meta store and data store. s := &Server{ version: version, err: make(chan error), closing: make(chan struct{}), Hostname: c.Meta.Hostname, BindAddress: c.Meta.BindAddress, MetaStore: meta.NewStore(c.Meta), TSDBStore: tsdb.NewStore(c.Data.Dir), reportingDisabled: c.ReportingDisabled, } // Copy TSDB configuration. s.TSDBStore.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MetaStore = s.MetaStore s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) s.ShardWriter.MetaStore = s.MetaStore // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.MetaStore = s.MetaStore s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff // Append services. s.appendClusterService(c.Cluster) s.appendPrecreatorService(c.Precreator) s.appendSnapshotterService() s.appendAdminService(c.Admin) s.appendContinuousQueryService(c.ContinuousQuery) s.appendHTTPDService(c.HTTPD) s.appendCollectdService(c.Collectd) if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { return nil, err } s.appendUDPService(c.UDP) s.appendRetentionPolicyService(c.Retention) for _, g := range c.Graphites { if err := s.appendGraphiteService(g); err != nil { return nil, err } } return s, nil }
func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) { // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Generate point data to write to the shards. points := []tsdb.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } // Create a temporary directory for the test data. dir, _ := ioutil.TempDir("", "store_test") // Create the store. store := tsdb.NewStore(dir) // Open the store. if err := store.Open(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Create requested number of shards in the store & write points. for shardID := 0; shardID < shardCnt; shardID++ { if err := store.CreateShard("mydb", "myrp", uint64(shardID)); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Write points to the shard. chunkedWriteStoreShard(store, shardID, points) } // Close the store. if err := store.Close(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } // Run the benchmark loop. b.ResetTimer() for n := 0; n < b.N; n++ { store := tsdb.NewStore(dir) if err := store.Open(); err != nil { b.Fatalf("benchmarkStoreOpen: %s", err) } b.StopTimer() store.Close() b.StartTimer() } }
func testStore() *tsdb.Store { path, _ := ioutil.TempDir("", "") store := tsdb.NewStore(path) err := store.Open() if err != nil { panic(err) } return store }
func TestDropSeriesStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := store.WriteToShard(shardID, []models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop series from cpu", executor) got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store got = executeAndGetJSON("select * from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } }
func testStore() *tsdb.Store { path, _ := ioutil.TempDir("", "") store := tsdb.NewStore(path) store.EngineOptions.Config.WALDir = filepath.Join(path, "wal") err := store.Open() if err != nil { panic(err) } return store }
func TestWritePointsAndExecuteQuery(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write first point. if err := store.WriteToShard(shardID, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), )}); err != nil { t.Fatalf(err.Error()) } // Write second point. if err := store.WriteToShard(shardID, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(2, 3), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu", executor) exepected := `[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["1970-01-01T00:00:01.000000002Z","server",1],["1970-01-01T00:00:02.000000003Z","server",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1],["1970-01-01T00:00:02.000000003Z",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } }
func TestStoreOpenShardCreateDelete(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } path := filepath.Join(dir, "mydb", "myrp") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database mydb does not exist") } if err := s.CreateShard("mydb", "myrp", 1); err != nil { t.Fatalf("Store.Open() failed to create shard") } if got, exp := s.ShardN(), 1; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } shardIDs := s.ShardIDs() if len(shardIDs) != 1 || shardIDs[0] != 1 { t.Fatalf("Store.Open() ShardIDs not correct: got %v, exp %v", s.ShardIDs(), []uint64{1}) } if err := s.DeleteShard(1); err != nil { t.Fatalf("Store.Open() failed to delete shard: %v", err) } if sh := s.Shard(1); sh != nil { t.Fatal("Store.Open() shard ID 1 still exists") } }
func testStoreAndExecutor() (*tsdb.Store, *tsdb.QueryExecutor) { path, _ := ioutil.TempDir("", "") store := tsdb.NewStore(path) err := store.Open() if err != nil { panic(err) } database := "foo" retentionPolicy := "bar" shardID := uint64(1) store.CreateShard(database, retentionPolicy, shardID) executor := tsdb.NewQueryExecutor(store) executor.MetaStore = &testMetastore{} executor.ShardMapper = &testShardMapper{store: store} return store, executor }
func TestStoreOpen(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("failed to create temp dir: %v", err) } defer os.RemoveAll(dir) if err := os.MkdirAll(filepath.Join(dir, "mydb"), 0600); err != nil { t.Fatalf("failed to create test db dir: %v", err) } s := tsdb.NewStore(dir) if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("database index count mismatch: got %v, exp %v", got, exp) } }
// Ensure writing a point and updating it results in only a single point. func TestWritePointsAndExecuteQuery_Update(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write original point. if err := store.WriteToShard(1, []models.Point{models.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 100.0}, time.Unix(0, 0), )}); err != nil { t.Fatalf(err.Error()) } // Restart store. store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} // Rewrite point with new value. if err := store.WriteToShard(1, []models.Point{models.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 200.0}, time.Unix(0, 0), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("select * from temperature", executor) exp := `[{"series":[{"name":"temperature","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",200]]}]}]` if exp != got { t.Fatalf("\n\nexp: %s\ngot: %s", exp, got) } }
func TestStoreOpenShard(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) path := filepath.Join(dir, "mydb", "myrp") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } shardPath := filepath.Join(path, "1") if _, err := os.Create(shardPath); err != nil { t.Fatalf("Store.Open() failed to create test shard 1: %v", err) } s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database myb does not exist") } if got, exp := s.ShardN(), 1; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } if sh := s.Shard(1); sh.Path() != shardPath { t.Errorf("Store.Open() shard path mismatch: got %v, exp %v", sh.Path(), shardPath) } }
func testStoreAndExecutor(storePath string) (*tsdb.Store, *tsdb.QueryExecutor) { if storePath == "" { storePath, _ = ioutil.TempDir("", "") } store := tsdb.NewStore(storePath) store.EngineOptions.Config.WALDir = filepath.Join(storePath, "wal") err := store.Open() if err != nil { panic(err) } database := "foo" retentionPolicy := "bar" shardID := uint64(1) store.CreateShard(database, retentionPolicy, shardID) executor := tsdb.NewQueryExecutor(store) executor.MetaStore = &testMetastore{} executor.ShardMapper = &testShardMapper{store: store} return store, executor }
func TestStoreOpenShardBadShardPath(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) path := filepath.Join(dir, "mydb", "myrp") if err := os.MkdirAll(path, 0700); err != nil { t.Fatalf("Store.Open() failed to create test db dir: %v", err) } // Non-numeric shard ID shardPath := filepath.Join(path, "bad_shard_path") if _, err := os.Create(shardPath); err != nil { t.Fatalf("Store.Open() failed to create test shard 1: %v", err) } s := tsdb.NewStore(dir) if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if got, exp := s.DatabaseIndexN(), 1; got != exp { t.Fatalf("Store.Open() database index count mismatch: got %v, exp %v", got, exp) } if di := s.DatabaseIndex("mydb"); di == nil { t.Errorf("Store.Open() database myb does not exist") } if got, exp := s.ShardN(), 0; got != exp { t.Fatalf("Store.Open() shard count mismatch: got %v, exp %v", got, exp) } }
func cmdInfo(path string) { tstore := tsdb.NewStore(filepath.Join(path, "data")) tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") tstore.EngineOptions.Config.WALLoggingEnabled = false tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") if err := tstore.Open(); err != nil { fmt.Printf("Failed to open dir: %v\n", err) os.Exit(1) } size, err := tstore.DiskSize() if err != nil { fmt.Printf("Failed to determine disk usage: %v\n", err) } // Summary stats fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n\n", tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) shardIDs := tstore.ShardIDs() databases := tstore.Databases() sort.Strings(databases) for _, db := range databases { index := tstore.DatabaseIndex(db) measurements := index.Measurements() sort.Sort(measurements) for _, m := range measurements { tags := m.TagKeys() tagValues := 0 for _, tag := range tags { tagValues += len(m.TagValues(tag)) } fields := m.FieldNames() sort.Strings(fields) series := m.SeriesKeys() sort.Strings(series) sort.Sort(ShardIDs(shardIDs)) // Sample a point from each measurement to determine the field types for _, shardID := range shardIDs { shard := tstore.Shard(shardID) codec := shard.FieldCodec(m.Name) for _, field := range codec.Fields() { ft := fmt.Sprintf("%s:%s", field.Name, field.Type) fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, len(fields), ft, len(series)) } } } } tw.Flush() }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // We need to ensure that a meta directory always exists even if // we don't start the meta store. node.json is always stored under // the meta directory. if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { return nil, fmt.Errorf("mkdir all: %s", err) } // 0.10-rc1 and prior would sometimes put the node.json at the root // dir which breaks backup/restore and restarting nodes. This moves // the file from the root so it's always under the meta dir. oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") newPath := filepath.Join(c.Meta.Dir, "node.json") if _, err := os.Stat(oldPath); err == nil { if err := os.Rename(oldPath, newPath); err != nil { return nil, err } } // load the node information metaAddresses := []string{c.Meta.HTTPBindAddress} if !c.Meta.Enabled { metaAddresses = c.Meta.JoinPeers } node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses) if err != nil { if !os.IsNotExist(err) { return nil, err } else { node = influxdb.NewNode(c.Meta.Dir, metaAddresses) } } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress if c.Meta.BindAddress != "" { bind = c.Meta.BindAddress } if !c.Data.Enabled && !c.Meta.Enabled { return nil, fmt.Errorf("must run as either meta node or data node or both") } httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress) if err != nil { return nil, err } tcpBindAddress, err := defaultHost(DefaultHostname, bind) if err != nil { return nil, err } s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, Node: node, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, joinPeers: c.Meta.JoinPeers, metaUseTLS: c.Meta.HTTPSEnabled, httpAPIAddr: httpBindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: tcpBindAddress, config: c, } if c.Meta.Enabled { s.MetaService = meta.NewService(c.Meta) } if c.Data.Enabled { s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.TSDBStore = s.TSDBStore s.ShardMapper.Node = node // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), c.Cluster.MaxRemoteWriteConnections) // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber s.PointsWriter.Node = s.Node // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = s.PointsWriter } return s, nil }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // Construct base meta store and data store. tsdbStore := tsdb.NewStore(c.Data.Dir) tsdbStore.EngineOptions.Config = c.Data s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), Hostname: c.Meta.Hostname, BindAddress: c.Meta.BindAddress, MetaStore: meta.NewStore(c.Meta), TSDBStore: tsdbStore, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, } // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.MetaStore = s.MetaStore s.ShardMapper.TSDBStore = s.TSDBStore // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MetaStore = s.MetaStore s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) s.ShardWriter.MetaStore = s.MetaStore // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.MetaStore = s.MetaStore s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.MetaStore = s.MetaStore s.Monitor.PointsWriter = s.PointsWriter // Append services. s.appendClusterService(c.Cluster) s.appendPrecreatorService(c.Precreator) s.appendSnapshotterService() s.appendCopierService() s.appendAdminService(c.Admin) s.appendContinuousQueryService(c.ContinuousQuery) s.appendHTTPDService(c.HTTPD) s.appendCollectdService(c.Collectd) if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { return nil, err } for _, g := range c.UDPs { s.appendUDPService(g) } s.appendRetentionPolicyService(c.Retention) for _, g := range c.Graphites { if err := s.appendGraphiteService(g); err != nil { return nil, err } } return s, nil }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // load the node information. Before 0.10 this was in the meta directory, // so use that if the top level directory isn't specified dir := c.Dir if dir == "" { dir = c.Meta.Dir } // load the node information node, err := influxdb.LoadNode(dir, c.Meta.HTTPBindAddress) if err != nil { if !os.IsNotExist(err) { return nil, err } else { node = influxdb.NewNode(dir, c.Meta.HTTPBindAddress) } } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress if c.Meta.BindAddress != "" { bind = c.Meta.BindAddress } if !c.Data.Enabled && !c.Meta.Enabled { return nil, fmt.Errorf("must run as either meta node or data node or both") } httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress) if err != nil { return nil, err } tcpBindAddress, err := defaultHost(DefaultHostname, bind) if err != nil { return nil, err } s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, Node: node, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, joinPeers: c.Meta.JoinPeers, metaUseTLS: c.Meta.HTTPSEnabled, httpAPIAddr: httpBindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: tcpBindAddress, config: c, } if c.Meta.Enabled { s.MetaService = meta.NewService(c.Meta) } if c.Data.Enabled { s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.TSDBStore = s.TSDBStore s.ShardMapper.Node = node // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), c.Cluster.MaxRemoteWriteConnections) // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber s.PointsWriter.Node = s.Node // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = s.PointsWriter } return s, nil }
func main() { var path string flag.StringVar(&path, "p", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") flag.Parse() tstore := tsdb.NewStore(filepath.Join(path, "data")) tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") tstore.EngineOptions.Config.WALLoggingEnabled = false tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") if err := tstore.Open(); err != nil { fmt.Printf("Failed to open dir: %v\n", err) os.Exit(1) } size, err := tstore.DiskSize() if err != nil { fmt.Printf("Failed to determine disk usage: %v\n", err) } // Summary stats fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n", tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) fmt.Println() tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) shardIDs := tstore.ShardIDs() databases := tstore.Databases() sort.Strings(databases) for _, db := range databases { index := tstore.DatabaseIndex(db) measurements := index.Measurements() sort.Sort(measurements) for _, m := range measurements { tags := m.TagKeys() tagValues := 0 for _, tag := range tags { tagValues += len(m.TagValues(tag)) } fields := m.FieldNames() sort.Strings(fields) series := m.SeriesKeys() sort.Strings(series) sort.Sort(ShardIDs(shardIDs)) // Sample a point from each measurement to determine the field types for _, shardID := range shardIDs { shard := tstore.Shard(shardID) tx, err := shard.ReadOnlyTx() if err != nil { fmt.Printf("Failed to get transaction: %v", err) } for _, key := range series { fieldSummary := []string{} cursor := tx.Cursor(key, tsdb.Forward) // Series doesn't exist in this shard if cursor == nil { continue } // Seek to the beginning _, value := cursor.Seek([]byte{}) codec := shard.FieldCodec(m.Name) if codec != nil { fields, err := codec.DecodeFieldsWithNames(value) if err != nil { fmt.Printf("Failed to decode values: %v", err) } for field, value := range fields { fieldSummary = append(fieldSummary, fmt.Sprintf("%s:%T", field, value)) } sort.Strings(fieldSummary) } fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, len(fields), strings.Join(fieldSummary, ","), len(series)) break } tx.Rollback() } } } tw.Flush() }