Пример #1
0
// Ensures that everything works after a host name change. This is
// skipped by default. To enable add hosts foobar and asdf to your
// /etc/hosts file and point those to 127.0.0.1
func TestMetaService_NameChangeSingleNode(t *testing.T) {
	t.Skip("not enabled")
	t.Parallel()

	cfg := newConfig()
	defer os.RemoveAll(cfg.Dir)
	cfg.BindAddress = "foobar:0"
	cfg.HTTPBindAddress = "foobar:0"
	s := newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	c := meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err.Error())
	}

	s.Close()
	time.Sleep(time.Second)

	cfg.BindAddress = "asdf" + ":" + strings.Split(s.RaftAddr(), ":")[1]
	cfg.HTTPBindAddress = "asdf" + ":" + strings.Split(s.HTTPAddr(), ":")[1]
	s = newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer s.Close()

	c2 := meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c2.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c2.Close()

	db, err := c2.Database("foo")
	if db == nil || err != nil {
		t.Fatal(err.Error())
	}

	nodes, err := c2.MetaNodes()
	if err != nil {
		t.Fatal(err.Error())
	}
	exp := []meta.NodeInfo{{ID: 1, Host: cfg.HTTPBindAddress, TCPHost: cfg.BindAddress}}

	time.Sleep(10 * time.Second)
	if !reflect.DeepEqual(nodes, exp) {
		t.Fatalf("nodes don't match: %v", nodes)
	}
}
Пример #2
0
// initializeMetaClient will set the MetaClient and join the node to the cluster if needed
func (s *Server) initializeMetaClient() error {
	// if the node ID is > 0 then we just need to initialize the metaclient
	if s.Node.ID > 0 {
		s.MetaClient = meta.NewClient(s.Node.MetaServers, s.metaUseTLS)
		if err := s.MetaClient.Open(); err != nil {
			return err
		}

		go s.updateMetaNodeInformation()

		return nil
	}

	// It's the first time starting up and we need to either join
	// the cluster or initialize this node as the first member
	if len(s.joinPeers) == 0 {
		// start up a new single node cluster
		if s.MetaService == nil {
			return fmt.Errorf("server not set to join existing cluster must run also as a meta node")
		}
		s.MetaClient = meta.NewClient([]string{s.MetaService.HTTPAddr()}, s.metaUseTLS)
	} else {
		// join this node to the cluster
		s.MetaClient = meta.NewClient(s.joinPeers, s.metaUseTLS)
	}
	if err := s.MetaClient.Open(); err != nil {
		return err
	}

	if s.TSDBStore != nil {
		n, err := s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
		if err != nil {
			return err
		}
		s.Node.ID = n.ID
	}
	metaNodes, err := s.MetaClient.MetaNodes()
	if err != nil {
		return err
	}
	for _, n := range metaNodes {
		s.Node.AddMetaServers([]string{n.Host})
	}

	if err := s.Node.Save(); err != nil {
		return err
	}

	go s.updateMetaNodeInformation()

	return nil
}
Пример #3
0
func newClient(s *testService) *meta.Client {
	c := meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		panic(err)
	}
	return c
}
Пример #4
0
func TestMetaService_PersistClusterIDAfterRestart(t *testing.T) {
	t.Parallel()

	cfg := newConfig()
	defer os.RemoveAll(cfg.Dir)
	s := newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	c := meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	id := c.ClusterID()
	if id == 0 {
		t.Fatal("cluster ID can't be zero")
	}

	s.Close()
	s = newService(cfg)
	if err := s.Open(); err != nil {
		t.Fatal(err.Error())
	}

	c = meta.NewClient([]string{s.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	id_after := c.ClusterID()
	if id_after == 0 {
		t.Fatal("cluster ID can't be zero")
	} else if id_after != id {
		t.Fatalf("cluster id not the same: %d, %d", id_after, id)
	}
}
Пример #5
0
func TestMetaService_Ping(t *testing.T) {
	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i, _ := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		c.HTTPBindAddress = srvs[i].HTTPAddr()
		c.BindAddress = srvs[i].RaftAddr()
		c.JoinPeers = nil
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[0].HTTPAddr(), srvs[1].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	if err := c.Ping(false); err != nil {
		t.Fatal(err.Error())
	}
	if err := c.Ping(true); err != nil {
		t.Fatal(err.Error())
	}

	srvs[1].Close()

	if err := c.Ping(false); err != nil {
		t.Fatal(err.Error())
	}

	if err := c.Ping(true); err == nil {
		t.Fatal("expected error on ping")
	}
}
Пример #6
0
// Ensure that if we attempt to create a database and the client
// is pointed at a server that isn't the leader, it automatically
// hits the leader and finishes the command
func TestMetaService_CommandAgainstNonLeader(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i, _ := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[2].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	metaNodes, _ := c.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("foo"); db == nil || err != nil {
		t.Fatalf("database foo wasn't created: %s", err.Error())
	}
}
Пример #7
0
// Ensure that the client will fail over to another server if the leader goes
// down. Also ensure that the cluster will come back up successfully after restart
func TestMetaService_FailureAndRestartCluster(t *testing.T) {
	t.Parallel()

	cfgs := make([]*meta.Config, 3)
	srvs := make([]*testService, 3)
	for i, _ := range cfgs {
		c := newConfig()

		cfgs[i] = c

		if i > 0 {
			c.JoinPeers = []string{srvs[0].HTTPAddr()}
		}
		srvs[i] = newService(c)
		if err := srvs[i].Open(); err != nil {
			t.Fatal(err.Error())
		}
		c.HTTPBindAddress = srvs[i].HTTPAddr()
		c.BindAddress = srvs[i].RaftAddr()
		c.JoinPeers = nil
		defer srvs[i].Close()
		defer os.RemoveAll(c.Dir)
	}

	c := meta.NewClient([]string{srvs[0].HTTPAddr(), srvs[1].HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	// check to see we were assigned a valid clusterID
	c1ID := c.ClusterID()
	if c1ID == 0 {
		t.Fatalf("invalid cluster id: %d", c1ID)
	}

	if _, err := c.CreateDatabase("foo"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("foo"); db == nil || err != nil {
		t.Fatalf("database foo wasn't created: %s", err.Error())
	}

	if err := srvs[0].Close(); err != nil {
		t.Fatal(err.Error())
	}

	if _, err := c.CreateDatabase("bar"); err != nil {
		t.Fatal(err)
	}

	if db, err := c.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}

	if err := srvs[1].Close(); err != nil {
		t.Fatal(err.Error())
	}
	if err := srvs[2].Close(); err != nil {
		t.Fatal(err.Error())
	}

	// give them a second to shut down
	time.Sleep(time.Second)

	// when we start back up they need to happen simultaneously, otherwise
	// a leader won't get elected
	var wg sync.WaitGroup
	for i, cfg := range cfgs {
		srvs[i] = newService(cfg)
		wg.Add(1)
		go func(srv *testService) {
			if err := srv.Open(); err != nil {
				panic(err)
			}
			wg.Done()
		}(srvs[i])
		defer srvs[i].Close()
	}
	wg.Wait()
	time.Sleep(time.Second)

	c2 := meta.NewClient([]string{srvs[0].HTTPAddr()}, false)
	if err := c2.Open(); err != nil {
		t.Fatal(err)
	}
	defer c2.Close()

	c2ID := c2.ClusterID()
	if c1ID != c2ID {
		t.Fatalf("invalid cluster id. got: %d, exp: %d", c2ID, c1ID)
	}

	if db, err := c2.Database("bar"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}

	if _, err := c2.CreateDatabase("asdf"); err != nil {
		t.Fatal(err)
	}

	if db, err := c2.Database("asdf"); db == nil || err != nil {
		t.Fatalf("database bar wasn't created: %s", err.Error())
	}
}
Пример #8
0
func TestMetaService_CreateRemoveMetaNode(t *testing.T) {
	t.Parallel()

	cfg1 := newConfig()
	defer os.RemoveAll(cfg1.Dir)
	cfg2 := newConfig()
	defer os.RemoveAll(cfg2.Dir)
	cfg3 := newConfig()
	defer os.RemoveAll(cfg3.Dir)
	cfg4 := newConfig()
	defer os.RemoveAll(cfg4.Dir)

	s1 := newService(cfg1)
	if err := s1.Open(); err != nil {
		t.Fatalf(err.Error())
	}
	defer s1.Close()

	cfg2.JoinPeers = []string{s1.HTTPAddr()}
	s2 := newService(cfg2)
	if err := s2.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer s2.Close()

	func() {
		cfg3.JoinPeers = []string{s2.HTTPAddr()}
		s3 := newService(cfg3)
		if err := s3.Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer s3.Close()

		c1 := meta.NewClient([]string{s1.HTTPAddr()}, false)
		if err := c1.Open(); err != nil {
			t.Fatal(err.Error())
		}
		defer c1.Close()

		metaNodes, _ := c1.MetaNodes()
		if len(metaNodes) != 3 {
			t.Fatalf("meta nodes wrong: %v", metaNodes)
		}
	}()

	c := meta.NewClient([]string{s1.HTTPAddr()}, false)
	if err := c.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer c.Close()

	if res := c.ExecuteStatement(mustParseStatement("DROP META SERVER 3")); res.Err != nil {
		t.Fatal(res.Err)
	}

	metaNodes, _ := c.MetaNodes()
	if len(metaNodes) != 2 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}

	cfg4.JoinPeers = []string{s1.HTTPAddr()}
	s4 := newService(cfg4)
	if err := s4.Open(); err != nil {
		t.Fatal(err.Error())
	}
	defer s4.Close()

	metaNodes, _ = c.MetaNodes()
	if len(metaNodes) != 3 {
		t.Fatalf("meta nodes wrong: %v", metaNodes)
	}
}
Пример #9
0
// unpackMeta reads the metadata from the backup directory and initializes a raft
// cluster and replaces the root metadata.
func (cmd *Command) unpackMeta() error {
	// find the meta file
	metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
	if err != nil {
		return err
	}

	if len(metaFiles) == 0 {
		return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
	}

	latest := metaFiles[len(metaFiles)-1]

	fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest)
	// Read the metastore backup
	f, err := os.Open(latest)
	if err != nil {
		return err
	}

	var buf bytes.Buffer
	if _, err := io.Copy(&buf, f); err != nil {
		return fmt.Errorf("copy: %s", err)
	}

	b := buf.Bytes()
	var i int

	// Make sure the file is actually a meta store backup file
	magic := btou64(b[:8])
	if magic != snapshotter.BackupMagicHeader {
		return fmt.Errorf("invalid metadata file")
	}
	i += 8

	// Size of the meta store bytes
	length := int(btou64(b[i : i+8]))
	i += 8
	metaBytes := b[i : i+length]
	i += int(length)

	// Size of the node.json bytes
	length = int(btou64(b[i : i+8]))
	i += 8
	nodeBytes := b[i:]

	// Unpack into metadata.
	var data meta.Data
	if err := data.UnmarshalBinary(metaBytes); err != nil {
		return fmt.Errorf("unmarshal: %s", err)
	}

	// Copy meta config and remove peers so it starts in single mode.
	c := cmd.MetaConfig
	c.JoinPeers = nil
	c.LoggingEnabled = false

	// Create the meta dir
	if os.MkdirAll(c.Dir, 0700); err != nil {
		return err
	}

	// Write node.json back to meta dir
	if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil {
		return err
	}

	// Initialize meta store.
	store := meta.NewService(c)
	store.RaftListener = newNopListener()

	// Open the meta store.
	if err := store.Open(); err != nil {
		return fmt.Errorf("open store: %s", err)
	}
	defer store.Close()

	// Wait for the store to be ready or error.
	select {
	case err := <-store.Err():
		return err
	default:
	}

	client := meta.NewClient([]string{store.HTTPAddr()}, false)
	client.SetLogger(log.New(ioutil.Discard, "", 0))
	if err := client.Open(); err != nil {
		return err
	}
	defer client.Close()

	// Force set the full metadata.
	if err := client.SetData(&data); err != nil {
		return fmt.Errorf("set data: %s", err)
	}
	return nil
}
Пример #10
0
// unpackMeta reads the metadata from the backup directory and initializes a raft
// cluster and replaces the root metadata.
func (cmd *Command) unpackMeta() error {
	// find the meta file
	metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
	if err != nil {
		return err
	}

	if len(metaFiles) == 0 {
		return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
	}

	latest := metaFiles[len(metaFiles)-1]

	fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest)
	// Read the metastore backup
	f, err := os.Open(latest)
	if err != nil {
		return err
	}

	var buf bytes.Buffer
	if _, err := io.Copy(&buf, f); err != nil {
		return fmt.Errorf("copy: %s", err)
	}

	// Unpack into metadata.
	var data meta.Data
	if err := data.UnmarshalBinary(buf.Bytes()); err != nil {
		return fmt.Errorf("unmarshal: %s", err)
	}

	// Copy meta config and remove peers so it starts in single mode.
	c := cmd.MetaConfig
	c.JoinPeers = nil
	c.LoggingEnabled = false

	// Initialize meta store.
	store := meta.NewService(c)
	store.RaftListener = newNopListener()

	// Open the meta store.
	if err := store.Open(); err != nil {
		return fmt.Errorf("open store: %s", err)
	}
	defer store.Close()

	// Wait for the store to be ready or error.
	select {
	case err := <-store.Err():
		return err
	default:
	}

	client := meta.NewClient([]string{store.HTTPAddr()}, false)
	client.SetLogger(log.New(ioutil.Discard, "", 0))
	if err := client.Open(); err != nil {
		return err
	}
	defer client.Close()

	// Force set the full metadata.
	if err := client.SetData(&data); err != nil {
		return fmt.Errorf("set data: %s", err)
	}
	return nil
}