func TestService_WaitForDataChanged(t *testing.T) { dataChanged := make(chan struct{}, 1) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { return dataChanged } calls := make(chan bool, 2) ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { calls <- true return nil, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaClient = ms // Explicitly closed below for testing s.Open() // Should be called once during open select { case <-calls: case <-time.After(10 * time.Millisecond): t.Fatal("expected call") } select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } // Signal that data has changed dataChanged <- struct{}{} // Should be called once more after data changed select { case <-calls: case <-time.After(10 * time.Millisecond): t.Fatal("expected call") } select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } //Close service ensure not called s.Close() dataChanged <- struct{}{} select { case <-calls: t.Fatal("unexpected call") case <-time.After(time.Millisecond): } close(dataChanged) }
// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // We need to ensure that a meta directory always exists even if // we don't start the meta store. node.json is always stored under // the meta directory. if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { return nil, fmt.Errorf("mkdir all: %s", err) } // 0.10-rc1 and prior would sometimes put the node.json at the root // dir which breaks backup/restore and restarting nodes. This moves // the file from the root so it's always under the meta dir. oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") newPath := filepath.Join(c.Meta.Dir, "node.json") if _, err := os.Stat(oldPath); err == nil { if err := os.Rename(oldPath, newPath); err != nil { return nil, err } } _, err := influxdb.LoadNode(c.Meta.Dir) if err != nil { if !os.IsNotExist(err) { return nil, err } } // Check to see if there is a raft db, if so, error out with a message // to downgrade, export, and then import the meta data raftFile := filepath.Join(c.Meta.Dir, "raft.db") if _, err := os.Stat(raftFile); err == nil { return nil, fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile) } // In 0.10.0 bind-address got moved to the top level. Check // The old location to keep things backwards compatible bind := c.BindAddress s := &Server{ buildInfo: *buildInfo, err: make(chan error), closing: make(chan struct{}), BindAddress: bind, MetaClient: meta.NewClient(c.Meta), Monitor: monitor.New(c.Monitor), reportingDisabled: c.ReportingDisabled, httpAPIAddr: c.HTTPD.BindAddress, httpUseTLS: c.HTTPD.HTTPSEnabled, tcpAddr: bind, config: c, } if err := s.MetaClient.Open(); err != nil { return nil, err } s.TSDBStore = tsdb.NewStore(c.Data.Dir) s.TSDBStore.EngineOptions.Config = c.Data // Copy TSDB configuration. s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine // Create the Subscriber service s.Subscriber = subscriber.NewService(c.Subscriber) // Initialize points writer. s.PointsWriter = cluster.NewPointsWriter() s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) s.PointsWriter.TSDBStore = s.TSDBStore s.PointsWriter.Subscriber = s.Subscriber // Initialize query executor. s.QueryExecutor = cluster.NewQueryExecutor() s.QueryExecutor.MetaClient = s.MetaClient s.QueryExecutor.TSDBStore = s.TSDBStore s.QueryExecutor.Monitor = s.Monitor s.QueryExecutor.PointsWriter = s.PointsWriter s.QueryExecutor.QueryTimeout = time.Duration(c.Cluster.QueryTimeout) s.QueryExecutor.QueryManager = influxql.DefaultQueryManager(c.Cluster.MaxConcurrentQueries) s.QueryExecutor.MaxSelectPointN = c.Cluster.MaxSelectPointN s.QueryExecutor.MaxSelectSeriesN = c.Cluster.MaxSelectSeriesN s.QueryExecutor.MaxSelectBucketsN = c.Cluster.MaxSelectBucketsN if c.Data.QueryLogEnabled { s.QueryExecutor.LogOutput = os.Stderr } // Initialize the monitor s.Monitor.Version = s.buildInfo.Version s.Monitor.Commit = s.buildInfo.Commit s.Monitor.Branch = s.buildInfo.Branch s.Monitor.BuildTime = s.buildInfo.Time s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter) return s, nil }
func TestService_IgnoreNonMatch(t *testing.T) { dataChanged := make(chan struct{}) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { return dataChanged } ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { return []meta.DatabaseInfo{ { Name: "db0", RetentionPolicies: []meta.RetentionPolicyInfo{ { Name: "rp0", Subscriptions: []meta.SubscriptionInfo{ {Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}}, }, }, }, }, }, nil } prs := make(chan *cluster.WritePointsRequest, 2) urls := make(chan url.URL, 2) newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) { sub := Subscription{} sub.WritePointsFn = func(p *cluster.WritePointsRequest) error { prs <- p return nil } urls <- u return sub, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaClient = ms s.NewPointsWriter = newPointsWriter s.Open() defer s.Close() // Signal that data has changed dataChanged <- struct{}{} for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} { var u url.URL expURL, _ := url.Parse(expURLStr) select { case u = <-urls: case <-time.After(10 * time.Millisecond): t.Fatal("expected urls") } if expURL.String() != u.String() { t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String()) } } // Write points that don't match any subscription. s.Points() <- &cluster.WritePointsRequest{ Database: "db1", RetentionPolicy: "rp0", } s.Points() <- &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp2", } // Shouldn't get any prs back select { case pr := <-prs: t.Fatalf("unexpected points request %v", pr) default: } close(dataChanged) }
func TestService_Multiple(t *testing.T) { dataChanged := make(chan struct{}) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { return dataChanged } ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) { return []meta.DatabaseInfo{ { Name: "db0", RetentionPolicies: []meta.RetentionPolicyInfo{ { Name: "rp0", Subscriptions: []meta.SubscriptionInfo{ {Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}}, }, }, { Name: "rp1", Subscriptions: []meta.SubscriptionInfo{ {Name: "s1", Mode: "ALL", Destinations: []string{"udp://h2:9093", "udp://h3:9093"}}, }, }, }, }, }, nil } prs := make(chan *cluster.WritePointsRequest, 4) urls := make(chan url.URL, 4) newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) { sub := Subscription{} sub.WritePointsFn = func(p *cluster.WritePointsRequest) error { prs <- p return nil } urls <- u return sub, nil } s := subscriber.NewService(subscriber.NewConfig()) s.MetaClient = ms s.NewPointsWriter = newPointsWriter s.Open() defer s.Close() // Signal that data has changed dataChanged <- struct{}{} for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093", "udp://h2:9093", "udp://h3:9093"} { var u url.URL expURL, _ := url.Parse(expURLStr) select { case u = <-urls: case <-time.After(10 * time.Millisecond): t.Fatal("expected urls") } if expURL.String() != u.String() { t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String()) } } // Write points that don't match any subscription. s.Points() <- &cluster.WritePointsRequest{ Database: "db1", RetentionPolicy: "rp0", } s.Points() <- &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp2", } // Write points that match subscription with mode ANY expPR := &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp0", } s.Points() <- expPR // Validate we get the pr back just once var pr *cluster.WritePointsRequest select { case pr = <-prs: case <-time.After(10 * time.Millisecond): t.Fatal("expected points request") } if pr != expPR { t.Errorf("unexpected points request: got %v, exp %v", pr, expPR) } // shouldn't get it a second time select { case pr = <-prs: t.Fatalf("unexpected points request %v", pr) default: } // Write points that match subscription with mode ALL expPR = &cluster.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp1", } s.Points() <- expPR // Should get pr back twice for i := 0; i < 2; i++ { select { case pr = <-prs: case <-time.After(10 * time.Millisecond): t.Fatalf("expected points request: got %d exp 2", i) } if pr != expPR { t.Errorf("unexpected points request: got %v, exp %v", pr, expPR) } } close(dataChanged) }