Beispiel #1
0
func TestMetaClient_PersistClusterIDAfterRestart(t *testing.T) {
	t.Parallel()

	cfg := newConfig()
	defer os.RemoveAll(cfg.Dir)

	c := meta.NewClient(cfg)
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	id := c.ClusterID()
	if id == 0 {
		t.Fatal("cluster ID can't be zero")
	}

	c = meta.NewClient(cfg)
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	idAfter := c.ClusterID()
	if idAfter == 0 {
		t.Fatal("cluster ID can't be zero")
	} else if idAfter != id {
		t.Fatalf("cluster id not the same: %d, %d", idAfter, id)
	}
}
Beispiel #2
0
// Ensures that everything works after a host name change. This is
// skipped by default. To enable add hosts foobar and asdf to your
// /etc/hosts file and point those to 127.0.0.1
func TestMetaService_NameChangeSingleNode(t *testing.T) {
	t.Skip("not enabled")
	t.Parallel()

	cfg := newConfig()
	defer os.RemoveAll(cfg.Dir)
	cfg.BindAddress = "foobar:0"
	cfg.HTTPBindAddress = "foobar:0"
	s := newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	c := meta.NewClient()
	c.SetMetaServers([]string{s.HTTPAddr()})
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	s.Close()
	time.Sleep(time.Second)

	cfg.BindAddress = "asdf" + ":" + strings.Split(s.RaftAddr(), ":")[1]
	cfg.HTTPBindAddress = "asdf" + ":" + strings.Split(s.HTTPAddr(), ":")[1]
	s = newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	c2 := meta.NewClient()
	c2.SetMetaServers([]string{s.HTTPAddr()})
	if err := c2.Open(); err != nil {
		t.Fatal(err)
	}
	defer c2.Close()

	db, err := c2.Database("foo")
	if db == nil || err != nil {
		t.Fatal(err)
	}

	nodes, err := c2.MetaNodes()
	if err != nil {
		t.Fatal(err)
	}
	exp := []meta.NodeInfo{{ID: 1, Host: cfg.HTTPBindAddress, TCPHost: cfg.BindAddress}}

	time.Sleep(10 * time.Second)
	if !reflect.DeepEqual(nodes, exp) {
		t.Fatalf("nodes don't match: %v", nodes)
	}
}
Beispiel #3
0
// initializeMetaClient will set the MetaClient and join the node to the cluster if needed
func (s *Server) initializeMetaClient() error {
	// if the node ID is > 0 then we just need to initialize the metaclient
	if s.Node.ID > 0 {
		s.MetaClient = meta.NewClient(s.Node.MetaServers, s.metaUseTLS)
		if err := s.MetaClient.Open(); err != nil {
			return err
		}

		go s.updateMetaNodeInformation()

		s.MetaClient.WaitForDataChanged()

		return nil
	}

	// It's the first time starting up and we need to either join
	// the cluster or initialize this node as the first member
	if len(s.joinPeers) == 0 {
		// start up a new single node cluster
		if s.MetaService == nil {
			return fmt.Errorf("server not set to join existing cluster must run also as a meta node")
		}
		s.MetaClient = meta.NewClient([]string{s.MetaService.HTTPAddr()}, s.metaUseTLS)
	} else {
		// join this node to the cluster
		s.MetaClient = meta.NewClient(s.joinPeers, s.metaUseTLS)
	}
	if err := s.MetaClient.Open(); err != nil {
		return err
	}
	n, err := s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
	for err != nil {
		log.Printf("Unable to create data node. retry in 1s: %s", err.Error())
		time.Sleep(time.Second)
		n, err = s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
	}
	s.Node.ID = n.ID

	metaNodes, err := s.MetaClient.MetaNodes()
	if err != nil {
		return err
	}
	for _, n := range metaNodes {
		s.Node.AddMetaServers([]string{n.Host})
	}

	if err := s.Node.Save(); err != nil {
		return err
	}

	go s.updateMetaNodeInformation()

	return nil
}
Beispiel #4
0
func newClient(s *testService) *meta.Client {
	c := meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		panic(err)
	}
	return c
}
Beispiel #5
0
func newClient() (string, *meta.Client) {
	cfg := newConfig()
	c := meta.NewClient(cfg)
	if err := c.Open(); err != nil {
		panic(err)
	}
	return cfg.Dir, c
}
Beispiel #6
0
func newClient(s *testService) *meta.Client {
	c := meta.NewClient()
	c.SetMetaServers([]string{s.HTTPAddr()})
	if err := c.Open(); err != nil {
		panic(err)
	}
	return c
}
Beispiel #7
0
func TestMetaService_PersistClusterIDAfterRestart(t *testing.T) {
	t.Parallel()

	cfg := newConfig()
	defer os.RemoveAll(cfg.Dir)
	s := newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	c := meta.NewClient()
	c.SetMetaServers([]string{s.HTTPAddr()})
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	id := c.ClusterID()
	if id == 0 {
		t.Fatal("cluster ID can't be zero")
	}

	s.Close()
	s = newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}

	c = meta.NewClient()
	c.SetMetaServers([]string{s.HTTPAddr()})
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	idAfter := c.ClusterID()
	if idAfter == 0 {
		t.Fatal("cluster ID can't be zero")
	} else if idAfter != id {
		t.Fatalf("cluster id not the same: %d, %d", idAfter, id)
	}
}
Beispiel #8
0
func TestMetaService_Ping(t *testing.T) {
	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	joinPeers := freePorts(len(cfgs))

	var swg sync.WaitGroup
	swg.Add(len(cfgs))

	for i, _ := range cfgs {
		c := newConfig()
		c.HTTPBindAddress = joinPeers[i]
		c.JoinPeers = joinPeers
		cfgs[i] = c

		srvs[i] = newService(c)
		go func(i int, srv *testService) {
			defer swg.Done()
			if err := srv.Open(); err != nil {
				t.Fatalf("error opening server %d: %s", i, err)
			}
		}(i, srvs[i])
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}
	swg.Wait()

	c := meta.NewClient()
	c.SetMetaServers(joinPeers)
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	if err := c.Ping(false); err != nil {
		t.Fatalf("ping false all failed: %s", err)
	}
	if err := c.Ping(true); err != nil {
		t.Fatalf("ping false true failed: %s", err)
	}

	srvs[1].Close()
	// give the server time to close
	time.Sleep(time.Second)

	if err := c.Ping(false); err != nil {
		t.Fatalf("ping false some failed: %s", err)
	}

	if err := c.Ping(true); err == nil {
		t.Fatal("expected error on ping")
	}
}
Beispiel #9
0
// Ensure that if we attempt to create a database and the client
// is pointed at a server that isn't the leader, it automatically
// hits the leader and finishes the command
func TestMetaService_CommandAgainstNonLeader(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	joinPeers := freePorts(len(cfgs))

	var wg sync.WaitGroup
	wg.Add(len(cfgs))

	for i, _ := range cfgs {
		c := newConfig()
		c.HTTPBindAddress = joinPeers[i]
		c.JoinPeers = joinPeers
		cfgs[i] = c

		srvs[i] = newService(c)
		go func(srv *testService) {
			defer wg.Done()
			if err := srv.Open(); err != nil {
				t.Fatal(err)
			}
		}(srvs[i])
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}
	wg.Wait()

	for i := range cfgs {
		c := meta.NewClient()
		c.SetMetaServers([]string{joinPeers[i]})
		if err := c.Open(); err != nil {
			t.Fatal(err)
		}
		defer c.Close()

		metaNodes, _ := c.MetaNodes()
		if len(metaNodes) != 3 {
			t.Fatalf("node %d - meta nodes wrong: %v", i, metaNodes)
		}

		if _, err := c.CreateDatabase(fmt.Sprintf("foo%d", i)); err != nil {
			t.Fatalf("node %d: %s", i, err)
		}

		if db, err := c.Database(fmt.Sprintf("foo%d", i)); db == nil || err != nil {
			t.Fatalf("node %d: database foo wasn't created: %s", i, err)
		}
	}
}
Beispiel #10
0
func TestMetaService_Ping(t *testing.T) {
	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		c.HTTPBindAddress = srvs[i].HTTPAddr()
		c.BindAddress = srvs[i].RaftAddr()
		c.JoinPeers = nil
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[0].HTTPAddr(), srvs[1].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	if err := c.Ping(false); err != nil {
		t.Fatal(err.Error())
	}
	if err := c.Ping(true); err != nil {
		t.Fatal(err.Error())
	}

	srvs[1].Close()

	if err := c.Ping(false); err != nil {
		t.Fatal(err.Error())
	}

	if err := c.Ping(true); err == nil {
		t.Fatal("expected error on ping")
	}
}
Beispiel #11
0
// Ensure that if we attempt to create a database and the client
// is pointed at a server that isn't the leader, it automatically
// hits the leader and finishes the command
func TestMetaService_CommandAgainstNonLeader(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[2].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	metaNodes, _ := c.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("foo"); db == nil || err != nil {
		t.Fatalf("database foo wasn't created: %s", err.Error())
	}
}
Beispiel #12
0
// Ensure that the client will fail over to another server if the leader goes
// down. Also ensure that the cluster will come back up successfully after restart
func TestMetaService_FailureAndRestartCluster(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	joinPeers := freePorts(len(cfgs))
	raftPeers := freePorts(len(cfgs))

	var swg sync.WaitGroup
	swg.Add(len(cfgs))
	for i, _ := range cfgs {
		c := newConfig()
		c.HTTPBindAddress = joinPeers[i]
		c.BindAddress = raftPeers[i]
		c.JoinPeers = joinPeers
		cfgs[i] = c

		srvs[i] = newService(c)
		go func(i int, srv *testService) {
			defer swg.Done()
			if err := srv.Open(); err != nil {
				t.Logf("opening server %d", i)
				t.Fatal(err)
			}
		}(i, srvs[i])

		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}
	swg.Wait()

	c := meta.NewClient()
	c.SetMetaServers(joinPeers)
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	// check to see we were assigned a valid clusterID
	c1ID := c.ClusterID()
	if c1ID == 0 {
		t.Fatalf("invalid cluster id: %d", c1ID)
	}

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("foo"); db == nil || err != nil {
		t.Fatalf("database foo wasn't created: %s", err)
	}

	if err := srvs[0].Close(); err != nil {
		t.Fatal(err)
	}

	if _, err := c.CreateDatabase("bar"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err)
	}

	if err := srvs[1].Close(); err != nil {
		t.Fatal(err)
	}
	if err := srvs[2].Close(); err != nil {
		t.Fatal(err)
	}

	// give them a second to shut down
	time.Sleep(time.Second)

	// need to start them all at once so they can discover the bind addresses for raft
	var wg sync.WaitGroup
	wg.Add(len(cfgs))
	for i, cfg := range cfgs {
		srvs[i] = newService(cfg)
		go func(srv *testService) {
			if err := srv.Open(); err != nil {
				panic(err)
			}
			wg.Done()
		}(srvs[i])
		defer srvs[i].Close()
	}
	wg.Wait()
	time.Sleep(time.Second)

	c2 := meta.NewClient()
	c2.SetMetaServers(joinPeers)
	if err := c2.Open(); err != nil {
		t.Fatal(err)
	}
	defer c2.Close()

	c2ID := c2.ClusterID()
	if c1ID != c2ID {
		t.Fatalf("invalid cluster id. got: %d, exp: %d", c2ID, c1ID)
	}

	if db, err := c2.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err)
	}

	if _, err := c2.CreateDatabase("asdf"); err != nil {
		t.Fatal(err)
	}

	if db, err := c2.Database("asdf"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err)
	}
}
Beispiel #13
0
func TestMetaService_CreateRemoveMetaNode(t *testing.T) {
	t.Parallel()

	joinPeers := freePorts(4)
	raftPeers := freePorts(4)

	cfg1 := newConfig()
	cfg1.HTTPBindAddress = joinPeers[0]
	cfg1.BindAddress = raftPeers[0]
	defer os.RemoveAll(cfg1.Dir)
	cfg2 := newConfig()
	cfg2.HTTPBindAddress = joinPeers[1]
	cfg2.BindAddress = raftPeers[1]
	defer os.RemoveAll(cfg2.Dir)

	var wg sync.WaitGroup
	wg.Add(2)
	cfg1.JoinPeers = joinPeers[0:2]
	s1 := newService(cfg1)
	go func() {
		defer wg.Done()
		if err := s1.Open(); err != nil {
			t.Fatal(err)
		}
	}()
	defer s1.Close()

	cfg2.JoinPeers = joinPeers[0:2]
	s2 := newService(cfg2)
	go func() {
		defer wg.Done()
		if err := s2.Open(); err != nil {
			t.Fatal(err)
		}
	}()
	defer s2.Close()
	wg.Wait()

	cfg3 := newConfig()
	joinPeers[2] = freePort()
	cfg3.HTTPBindAddress = joinPeers[2]
	raftPeers[2] = freePort()
	cfg3.BindAddress = raftPeers[2]
	defer os.RemoveAll(cfg3.Dir)

	cfg3.JoinPeers = joinPeers[0:3]
	s3 := newService(cfg3)
	if err := s3.Open(); err != nil {
		t.Fatal(err)
	}
	defer s3.Close()

	c1 := meta.NewClient()
	c1.SetMetaServers(joinPeers[0:3])
	if err := c1.Open(); err != nil {
		t.Fatal(err)
	}
	defer c1.Close()

	metaNodes, _ := c1.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	c := meta.NewClient()
	c.SetMetaServers([]string{s1.HTTPAddr()})
	if err := c.Open(); err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	if err := c.DeleteMetaNode(3); err != nil {
		t.Fatal(err)
	}

	metaNodes, _ = c.MetaNodes()
	if len(metaNodes) != 2 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	cfg4 := newConfig()
	cfg4.HTTPBindAddress = freePort()
	cfg4.BindAddress = freePort()
	cfg4.JoinPeers = []string{joinPeers[0], joinPeers[1], cfg4.HTTPBindAddress}
	defer os.RemoveAll(cfg4.Dir)
	s4 := newService(cfg4)
	if err := s4.Open(); err != nil {
		t.Fatal(err)
	}
	defer s4.Close()

	c2 := meta.NewClient()
	c2.SetMetaServers(cfg4.JoinPeers)
	if err := c2.Open(); err != nil {
		t.Fatal(err)
	}
	defer c2.Close()

	metaNodes, _ = c2.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}
}
Beispiel #14
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// We need to ensure that a meta directory always exists even if
	// we don't start the meta store.  node.json is always stored under
	// the meta directory.
	if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
		return nil, fmt.Errorf("mkdir all: %s", err)
	}

	// 0.10-rc1 and prior would sometimes put the node.json at the root
	// dir which breaks backup/restore and restarting nodes.  This moves
	// the file from the root so it's always under the meta dir.
	oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
	newPath := filepath.Join(c.Meta.Dir, "node.json")

	if _, err := os.Stat(oldPath); err == nil {
		if err := os.Rename(oldPath, newPath); err != nil {
			return nil, err
		}
	}

	_, err := influxdb.LoadNode(c.Meta.Dir)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
	}

	// Check to see if there is a raft db, if so, error out with a message
	// to downgrade, export, and then import the meta data
	raftFile := filepath.Join(c.Meta.Dir, "raft.db")
	if _, err := os.Stat(raftFile); err == nil {
		return nil, fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile)
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		MetaClient: meta.NewClient(c.Meta),

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,

		httpAPIAddr: c.HTTPD.BindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     bind,

		config: c,
	}

	if err := s.MetaClient.Open(); err != nil {
		return nil, err
	}

	s.TSDBStore = tsdb.NewStore(c.Data.Dir)
	s.TSDBStore.EngineOptions.Config = c.Data

	// Copy TSDB configuration.
	s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine

	// Create the Subscriber service
	s.Subscriber = subscriber.NewService(c.Subscriber)

	// Initialize points writer.
	s.PointsWriter = cluster.NewPointsWriter()
	s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.Subscriber = s.Subscriber

	// Initialize query executor.
	s.QueryExecutor = influxql.NewQueryExecutor()
	s.QueryExecutor.StatementExecutor = &cluster.StatementExecutor{
		MetaClient:        s.MetaClient,
		TSDBStore:         cluster.LocalTSDBStore{Store: s.TSDBStore},
		Monitor:           s.Monitor,
		PointsWriter:      s.PointsWriter,
		MaxSelectPointN:   c.Cluster.MaxSelectPointN,
		MaxSelectSeriesN:  c.Cluster.MaxSelectSeriesN,
		MaxSelectBucketsN: c.Cluster.MaxSelectBucketsN,
	}
	s.QueryExecutor.QueryTimeout = time.Duration(c.Cluster.QueryTimeout)
	s.QueryExecutor.MaxConcurrentQueries = c.Cluster.MaxConcurrentQueries
	if c.Data.QueryLogEnabled {
		s.QueryExecutor.LogOutput = os.Stderr
	}

	// Initialize the monitor
	s.Monitor.Version = s.buildInfo.Version
	s.Monitor.Commit = s.buildInfo.Commit
	s.Monitor.Branch = s.buildInfo.Branch
	s.Monitor.BuildTime = s.buildInfo.Time
	s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)

	return s, nil
}
Beispiel #15
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// We need to ensure that a meta directory always exists even if
	// we don't start the meta store.  node.json is always stored under
	// the meta directory.
	if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
		return nil, fmt.Errorf("mkdir all: %s", err)
	}

	// 0.10-rc1 and prior would sometimes put the node.json at the root
	// dir which breaks backup/restore and restarting nodes.  This moves
	// the file from the root so it's always under the meta dir.
	oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
	newPath := filepath.Join(c.Meta.Dir, "node.json")

	if _, err := os.Stat(oldPath); err == nil {
		if err := os.Rename(oldPath, newPath); err != nil {
			return nil, err
		}
	}

	_, err := influxdb.LoadNode(c.Meta.Dir)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		MetaClient: meta.NewClient(c.Meta),

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,

		httpAPIAddr: c.HTTPD.BindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     bind,

		config: c,
	}

	if err := s.MetaClient.Open(); err != nil {
		return nil, err
	}

	s.TSDBStore = tsdb.NewStore(c.Data.Dir)
	s.TSDBStore.EngineOptions.Config = c.Data

	// Copy TSDB configuration.
	s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
	s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
	s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
	s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

	// Create the Subscriber service
	s.Subscriber = subscriber.NewService(c.Subscriber)

	// Initialize points writer.
	s.PointsWriter = cluster.NewPointsWriter()
	s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.Subscriber = s.Subscriber

	// Initialize query executor.
	s.QueryExecutor = cluster.NewQueryExecutor()
	s.QueryExecutor.MetaClient = s.MetaClient
	s.QueryExecutor.TSDBStore = s.TSDBStore
	s.QueryExecutor.Monitor = s.Monitor
	s.QueryExecutor.PointsWriter = s.PointsWriter
	if c.Data.QueryLogEnabled {
		s.QueryExecutor.LogOutput = os.Stderr
	}

	// Initialize the monitor
	s.Monitor.Version = s.buildInfo.Version
	s.Monitor.Commit = s.buildInfo.Commit
	s.Monitor.Branch = s.buildInfo.Branch
	s.Monitor.BuildTime = s.buildInfo.Time
	s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)

	return s, nil
}
Beispiel #16
0
func TestMetaService_CreateRemoveMetaNode(t *testing.T) {
	t.Parallel()

	cfg1 := newConfig()
	defer os.RemoveAll(cfg1.Dir)
	cfg2 := newConfig()
	defer os.RemoveAll(cfg2.Dir)
	cfg3 := newConfig()
	defer os.RemoveAll(cfg3.Dir)
	cfg4 := newConfig()
	defer os.RemoveAll(cfg4.Dir)

	s1 := newService(cfg1)
	if err := s1.Open(); err != nil {
		t.Fatalf(err.Error())
	}
	defer s1.Close()

	cfg2.JoinPeers = []string{s1.HTTPAddr()}
	s2 := newService(cfg2)
	if err := s2.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer s2.Close()

	func() {
		cfg3.JoinPeers = []string{s2.HTTPAddr()}
		s3 := newService(cfg3)
		if err := s3.Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer s3.Close()

		c1 := meta.NewClient([]string{s1.HTTPAddr()}, false)
		if err := c1.Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer c1.Close()

		metaNodes, _ := c1.MetaNodes()
		if len(metaNodes) != 3 {
			t.Fatalf("meta nodes wrong: %v", metaNodes)
		}
	}()

	c := meta.NewClient([]string{s1.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	if res := c.ExecuteStatement(mustParseStatement("DROP META SERVER 3")); res.Err != nil {
		t.Fatal(res.Err)
	}

	metaNodes, _ := c.MetaNodes()
	if len(metaNodes) != 2 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	cfg4.JoinPeers = []string{s1.HTTPAddr()}
	s4 := newService(cfg4)
	if err := s4.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer s4.Close()

	metaNodes, _ = c.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}
}
Beispiel #17
0
// unpackMeta reads the metadata from the backup directory and initializes a raft
// cluster and replaces the root metadata.
func (cmd *Command) unpackMeta() error {
	// find the meta file
	metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
	if err != nil {
		return err
	}

	if len(metaFiles) == 0 {
		return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
	}

	latest := metaFiles[len(metaFiles)-1]

	fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest)
	// Read the metastore backup
	f, err := os.Open(latest)
	if err != nil {
		return err
	}

	var buf bytes.Buffer
	if _, err := io.Copy(&buf, f); err != nil {
		return fmt.Errorf("copy: %s", err)
	}

	b := buf.Bytes()
	var i int

	// Make sure the file is actually a meta store backup file
	magic := btou64(b[:8])
	if magic != snapshotter.BackupMagicHeader {
		return fmt.Errorf("invalid metadata file")
	}
	i += 8

	// Size of the meta store bytes
	length := int(btou64(b[i : i+8]))
	i += 8
	metaBytes := b[i : i+length]
	i += int(length)

	// Size of the node.json bytes
	length = int(btou64(b[i : i+8]))
	i += 8
	nodeBytes := b[i:]

	// Unpack into metadata.
	var data meta.Data
	if err := data.UnmarshalBinary(metaBytes); err != nil {
		return fmt.Errorf("unmarshal: %s", err)
	}

	// Copy meta config and remove peers so it starts in single mode.
	c := cmd.MetaConfig
	c.JoinPeers = nil
	c.LoggingEnabled = false

	// Create the meta dir
	if os.MkdirAll(c.Dir, 0700); err != nil {
		return err
	}

	// Write node.json back to meta dir
	if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil {
		return err
	}

	// Initialize meta store.
	store := meta.NewService(c)
	store.RaftListener = newNopListener()

	// Open the meta store.
	if err := store.Open(); err != nil {
		return fmt.Errorf("open store: %s", err)
	}
	defer store.Close()

	// Wait for the store to be ready or error.
	select {
	case err := <-store.Err():
		return err
	default:
	}

	client := meta.NewClient([]string{store.HTTPAddr()}, false)
	client.SetLogger(log.New(ioutil.Discard, "", 0))
	if err := client.Open(); err != nil {
		return err
	}
	defer client.Close()

	// Force set the full metadata.
	if err := client.SetData(&data); err != nil {
		return fmt.Errorf("set data: %s", err)
	}
	return nil
}
Beispiel #18
0
// Ensure that the client will fail over to another server if the leader goes
// down. Also ensure that the cluster will come back up successfully after restart
func TestMetaService_FailureAndRestartCluster(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		c.HTTPBindAddress = srvs[i].HTTPAddr()
		c.BindAddress = srvs[i].RaftAddr()
		c.JoinPeers = nil
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[0].HTTPAddr(), srvs[1].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	// check to see we were assigned a valid clusterID
	c1ID := c.ClusterID()
	if c1ID == 0 {
		t.Fatalf("invalid cluster id: %d", c1ID)
	}

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("foo"); db == nil || err != nil {
		t.Fatalf("database foo wasn't created: %s", err.Error())
	}

	if err := srvs[0].Close(); err != nil {
		t.Fatal(err.Error())
	}

	if _, err := c.CreateDatabase("bar"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}

	if err := srvs[1].Close(); err != nil {
		t.Fatal(err.Error())
	}
	if err := srvs[2].Close(); err != nil {
		t.Fatal(err.Error())
	}

	// give them a second to shut down
	time.Sleep(time.Second)

	// when we start back up they need to happen simultaneously, otherwise
	// a leader won't get elected
	var wg sync.WaitGroup
	for i, cfg := range cfgs {
		srvs[i] = newService(cfg)
		wg.Add(1)
		go func(srv *testService) {
			if err := srv.Open(); err != nil {
				panic(err)
			}
			wg.Done()
		}(srvs[i])
		defer srvs[i].Close()
	}
	wg.Wait()
	time.Sleep(time.Second)

	c2 := meta.NewClient([]string{srvs[0].HTTPAddr()}, false)
	if err := c2.Open(); err != nil {
		t.Fatal(err)
	}
	defer c2.Close()

	c2ID := c2.ClusterID()
	if c1ID != c2ID {
		t.Fatalf("invalid cluster id. got: %d, exp: %d", c2ID, c1ID)
	}

	if db, err := c2.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}

	if _, err := c2.CreateDatabase("asdf"); err != nil {
		t.Fatal(err)
	}

	if db, err := c2.Database("asdf"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}
}
Beispiel #19
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// We need to ensure that a meta directory always exists even if
	// we don't start the meta store.  node.json is always stored under
	// the meta directory.
	if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
		return nil, fmt.Errorf("mkdir all: %s", err)
	}

	// 0.10-rc1 and prior would sometimes put the node.json at the root
	// dir which breaks backup/restore and restarting nodes.  This moves
	// the file from the root so it's always under the meta dir.
	oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
	newPath := filepath.Join(c.Meta.Dir, "node.json")

	if _, err := os.Stat(oldPath); err == nil {
		if err := os.Rename(oldPath, newPath); err != nil {
			return nil, err
		}
	}

	_, err := influxdb.LoadNode(c.Meta.Dir)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
	}

	if err := raftDBExists(c.Meta.Dir); err != nil {
		return nil, err
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		Logger: zap.New(
			zap.NewTextEncoder(),
			zap.Output(os.Stderr),
		),

		MetaClient: meta.NewClient(c.Meta),

		reportingDisabled: c.ReportingDisabled,

		httpAPIAddr: c.HTTPD.BindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     bind,

		config: c,
	}
	s.Monitor = monitor.New(s, c.Monitor)

	if err := s.MetaClient.Open(); err != nil {
		return nil, err
	}

	s.TSDBStore = tsdb.NewStore(c.Data.Dir)
	s.TSDBStore.EngineOptions.Config = c.Data

	// Copy TSDB configuration.
	s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine

	// Create the Subscriber service
	s.Subscriber = subscriber.NewService(c.Subscriber)

	// Initialize points writer.
	s.PointsWriter = coordinator.NewPointsWriter()
	s.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout)
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.Subscriber = s.Subscriber

	// Initialize query executor.
	s.QueryExecutor = influxql.NewQueryExecutor()
	s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{
		MetaClient:        s.MetaClient,
		TaskManager:       s.QueryExecutor.TaskManager,
		TSDBStore:         coordinator.LocalTSDBStore{Store: s.TSDBStore},
		Monitor:           s.Monitor,
		PointsWriter:      s.PointsWriter,
		MaxSelectPointN:   c.Coordinator.MaxSelectPointN,
		MaxSelectSeriesN:  c.Coordinator.MaxSelectSeriesN,
		MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN,
	}
	s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout)
	s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter)
	s.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries

	// Initialize the monitor
	s.Monitor.Version = s.buildInfo.Version
	s.Monitor.Commit = s.buildInfo.Commit
	s.Monitor.Branch = s.buildInfo.Branch
	s.Monitor.BuildTime = s.buildInfo.Time
	s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)
	return s, nil
}
Beispiel #20
0
// unpackMeta reads the metadata from the backup directory and initializes a raft
// cluster and replaces the root metadata.
func (cmd *Command) unpackMeta() error {
	// find the meta file
	metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
	if err != nil {
		return err
	}

	if len(metaFiles) == 0 {
		return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
	}

	latest := metaFiles[len(metaFiles)-1]

	fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest)
	// Read the metastore backup
	f, err := os.Open(latest)
	if err != nil {
		return err
	}

	var buf bytes.Buffer
	if _, err := io.Copy(&buf, f); err != nil {
		return fmt.Errorf("copy: %s", err)
	}

	b := buf.Bytes()
	var i int

	// Make sure the file is actually a meta store backup file
	magic := binary.BigEndian.Uint64(b[:8])
	if magic != snapshotter.BackupMagicHeader {
		return fmt.Errorf("invalid metadata file")
	}
	i += 8

	// Size of the meta store bytes
	length := int(binary.BigEndian.Uint64(b[i : i+8]))
	i += 8
	metaBytes := b[i : i+length]
	i += int(length)

	// Size of the node.json bytes
	length = int(binary.BigEndian.Uint64(b[i : i+8]))
	i += 8
	nodeBytes := b[i:]

	// Unpack into metadata.
	var data meta.Data
	if err := data.UnmarshalBinary(metaBytes); err != nil {
		return fmt.Errorf("unmarshal: %s", err)
	}

	// Copy meta config and remove peers so it starts in single mode.
	c := cmd.MetaConfig
	c.Dir = cmd.metadir

	// Create the meta dir
	if os.MkdirAll(c.Dir, 0700); err != nil {
		return err
	}

	// Write node.json back to meta dir
	if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil {
		return err
	}

	client := meta.NewClient(c)
	client.SetLogger(log.New(ioutil.Discard, "", 0))
	if err := client.Open(); err != nil {
		return err
	}
	defer client.Close()

	// Force set the full metadata.
	if err := client.SetData(&data); err != nil {
		return fmt.Errorf("set data: %s", err)
	}

	// remove the raft.db file if it exists
	err = os.Remove(filepath.Join(cmd.metadir, "raft.db"))
	if err != nil {
		if os.IsNotExist(err) {
			return nil
		}
		return err
	}

	// remove the node.json file if it exists
	err = os.Remove(filepath.Join(cmd.metadir, "node.json"))
	if err != nil {
		if os.IsNotExist(err) {
			return nil
		}
		return err
	}

	return nil
}