func TestService_WaitForDataChanged(t *testing.T) { dataChanged := make(chan bool) ms := MetaStore{} ms.WaitForDataChangedFn = func() error { <-dataChanged return nil } calls := make(chan bool, 2) ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { calls <- true return nil, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaStore = ms // Explicitly closed below for testing s.Open() // Should be called once during open select { case <-calls: case <-time.After(10 * time.Millisecond): t.Fatal("expected call") } select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } // Signal that data has changed dataChanged <- true // Should be called once more after data changed select { case <-calls: case <-time.After(10 * time.Millisecond): t.Fatal("expected call") } select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } //Close service ensure not called s.Close() dataChanged <- true select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } close(dataChanged) }
func TestService_IgnoreNonMatch(t *testing.T) { dataChanged := make(chan struct{}) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { return dataChanged } ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { return []meta.DatabaseInfo{ { Name: "db0", RetentionPolicies: []meta.RetentionPolicyInfo{ { Name: "rp0", Subscriptions: []meta.SubscriptionInfo{ {Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}}, }, }, }, }, }, nil } prs := make(chan *cluster.WritePointsRequest, 2) urls := make(chan url.URL, 2) newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) { sub := Subscription{} sub.WritePointsFn = func(p *cluster.WritePointsRequest) error { prs <- p return nil } urls <- u return sub, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaClient = ms s.NewPointsWriter = newPointsWriter s.Open() defer s.Close() // Signal that data has changed dataChanged <- struct{}{} for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} { var u url.URL expURL, _ := url.Parse(expURLStr) select { case u = <-urls: case <-time.After(10 * time.Millisecond): t.Fatal("expected urls") } if expURL.String() != u.String() { t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String()) } } // Write points that don't match any subscription. s.Points() <- &cluster.WritePointsRequest{ Database: "db1", RetentionPolicy: "rp0", } s.Points() <- &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp2", } // Shouldn't get any prs back select { case pr := <-prs: t.Fatalf("unexpected points request %v", pr) default: } close(dataChanged) }
func TestService_Multiple(t *testing.T) { dataChanged := make(chan struct{}) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { return dataChanged } ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { return []meta.DatabaseInfo{ { Name: "db0", RetentionPolicies: []meta.RetentionPolicyInfo{ { Name: "rp0", Subscriptions: []meta.SubscriptionInfo{ {Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}}, }, }, { Name: "rp1", Subscriptions: []meta.SubscriptionInfo{ {Name: "s1", Mode: "ALL", Destinations: []string{"udp://h2:9093", "udp://h3:9093"}}, }, }, }, }, }, nil } prs := make(chan *cluster.WritePointsRequest, 4) urls := make(chan url.URL, 4) newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) { sub := Subscription{} sub.WritePointsFn = func(p *cluster.WritePointsRequest) error { prs <- p return nil } urls <- u return sub, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaClient = ms s.NewPointsWriter = newPointsWriter s.Open() defer s.Close() // Signal that data has changed dataChanged <- struct{}{} for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093", "udp://h2:9093", "udp://h3:9093"} { var u url.URL expURL, _ := url.Parse(expURLStr) select { case u = <-urls: case <-time.After(10 * time.Millisecond): t.Fatal("expected urls") } if expURL.String() != u.String() { t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String()) } } // Write points that don't match any subscription. s.Points() <- &cluster.WritePointsRequest{ Database: "db1", RetentionPolicy: "rp0", } s.Points() <- &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp2", } // Write points that match subscription with mode ANY expPR := &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp0", } s.Points() <- expPR // Validate we get the pr back just once var pr *cluster.WritePointsRequest select { case pr = <-prs: case <-time.After(10 * time.Millisecond): t.Fatal("expected points request") } if pr != expPR { t.Errorf("unexpected points request: got %v, exp %v", pr, expPR) } // shouldn't get it a second time select { case pr = <-prs: t.Fatalf("unexpected points request %v", pr) default: } // Write points that match subscription with mode ALL expPR = &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp1", } s.Points() <- expPR // Should get pr back twice for i := 0; i < 2; i++ { select { case pr = <-prs: case <-time.After(10 * time.Millisecond): t.Fatalf("expected points request: got %d exp 2", i) } if pr != expPR { t.Errorf("unexpected points request: got %v, exp %v", pr, expPR) } } close(dataChanged) }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // We need to ensure that a meta directory always exists even if // we don't start the meta store. node.json is always stored under // the meta directory. if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { return nil, fmt.Errorf("mkdir all: %s", err) } // 0.10-rc1 and prior would sometimes put the node.json at the root // dir which breaks backup/restore and restarting nodes. This moves // the file from the root so it's always under the meta dir. oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") newPath := filepath.Join(c.Meta.Dir, "node.json") if _, err := os.Stat(oldPath); err == nil { if err := os.Rename(oldPath, newPath); err != nil { return nil, err } } // load the node information metaAddresses := []string{c.Meta.HTTPBindAddress} if !c.Meta.Enabled { metaAddresses = c.Meta.JoinPeers } node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses) if err != nil { if !os.IsNotExist(err) { return nil, err } else { node = influxdb.NewNode(c.Meta.Dir, metaAddresses) } } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress if c.Meta.BindAddress != "" { bind = c.Meta.BindAddress } if !c.Data.Enabled && !c.Meta.Enabled { return nil, fmt.Errorf("must run as either meta node or data node or both") } httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress) if err != nil { return nil, err } tcpBindAddress, err := defaultHost(DefaultHostname, bind) if err != nil { return nil, err } s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, Node: node, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, joinPeers: c.Meta.JoinPeers, metaUseTLS: c.Meta.HTTPSEnabled, httpAPIAddr: httpBindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: tcpBindAddress, config: c, } if c.Meta.Enabled { s.MetaService = meta.NewService(c.Meta) } if c.Data.Enabled { s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.TSDBStore = s.TSDBStore s.ShardMapper.Node = node // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), c.Cluster.MaxRemoteWriteConnections) // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber s.PointsWriter.Node = s.Node // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = s.PointsWriter } return s, nil }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // load the node information. Before 0.10 this was in the meta directory, // so use that if the top level directory isn't specified dir := c.Dir if dir == "" { dir = c.Meta.Dir } // load the node information node, err := influxdb.LoadNode(dir, c.Meta.HTTPBindAddress) if err != nil { if !os.IsNotExist(err) { return nil, err } else { node = influxdb.NewNode(dir, c.Meta.HTTPBindAddress) } } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress if c.Meta.BindAddress != "" { bind = c.Meta.BindAddress } if !c.Data.Enabled && !c.Meta.Enabled { return nil, fmt.Errorf("must run as either meta node or data node or both") } httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress) if err != nil { return nil, err } tcpBindAddress, err := defaultHost(DefaultHostname, bind) if err != nil { return nil, err } s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, Node: node, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, joinPeers: c.Meta.JoinPeers, metaUseTLS: c.Meta.HTTPSEnabled, httpAPIAddr: httpBindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: tcpBindAddress, config: c, } if c.Meta.Enabled { s.MetaService = meta.NewService(c.Meta) } if c.Data.Enabled { s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.TSDBStore = s.TSDBStore s.ShardMapper.Node = node // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), c.Cluster.MaxRemoteWriteConnections) // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber s.PointsWriter.Node = s.Node // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = s.PointsWriter } return s, nil }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // Construct base meta store and data store. tsdbStore := tsdb.NewStore(c.Data.Dir) tsdbStore.EngineOptions.Config = c.Data s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), Hostname: c.Meta.Hostname, BindAddress: c.Meta.BindAddress, MetaStore: meta.NewStore(c.Meta), TSDBStore: tsdbStore, Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, } // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) // Set the shard mapper s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping s.ShardMapper.MetaStore = s.MetaStore s.ShardMapper.TSDBStore = s.TSDBStore // Initialize query executor. s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) s.QueryExecutor.MetaStore = s.MetaStore s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} s.QueryExecutor.ShardMapper = s.ShardMapper s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled // Set the shard writer s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) s.ShardWriter.MetaStore = s.MetaStore // Create the hinted handoff service s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore) s.HintedHandoff.Monitor = s.Monitor // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) s.Subscriber.MetaStore = s.MetaStore // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.MetaStore = s.MetaStore s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.ShardWriter = s.ShardWriter s.PointsWriter.HintedHandoff = s.HintedHandoff s.PointsWriter.Subscriber = s.Subscriber // needed for executing INTO queries. s.QueryExecutor.IntoWriter = s.PointsWriter // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.MetaStore = s.MetaStore s.Monitor.PointsWriter = s.PointsWriter // Append services. s.appendClusterService(c.Cluster) s.appendPrecreatorService(c.Precreator) s.appendRegistrationService(c.Registration) s.appendSnapshotterService() s.appendCopierService() s.appendAdminService(c.Admin) s.appendContinuousQueryService(c.ContinuousQuery) s.appendHTTPDService(c.HTTPD) s.appendCollectdService(c.Collectd) if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { return nil, err } for _, g := range c.UDPs { s.appendUDPService(g) } s.appendRetentionPolicyService(c.Retention) for _, g := range c.Graphites { if err := s.appendGraphiteService(g); err != nil { return nil, err } } return s, nil }