Ejemplo n.º 1
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, version string) (*Server, error) {
	// Construct base meta store and data store.
	s := &Server{
		version: version,
		err:     make(chan error),
		closing: make(chan struct{}),

		Hostname:    c.Meta.Hostname,
		BindAddress: c.Meta.BindAddress,

		MetaStore: meta.NewStore(c.Meta),
		TSDBStore: tsdb.NewStore(c.Data.Dir),

		reportingDisabled: c.ReportingDisabled,
	}

	// Copy TSDB configuration.
	s.TSDBStore.MaxWALSize = c.Data.MaxWALSize
	s.TSDBStore.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)

	// Initialize query executor.
	s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
	s.QueryExecutor.MetaStore = s.MetaStore
	s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}

	// Set the shard writer
	s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
	s.ShardWriter.MetaStore = s.MetaStore

	// Create the hinted handoff service
	s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter)

	// Initialize points writer.
	s.PointsWriter = cluster.NewPointsWriter()
	s.PointsWriter.MetaStore = s.MetaStore
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.ShardWriter = s.ShardWriter
	s.PointsWriter.HintedHandoff = s.HintedHandoff

	// Append services.
	s.appendClusterService(c.Cluster)
	s.appendPrecreatorService(c.Precreator)
	s.appendSnapshotterService()
	s.appendAdminService(c.Admin)
	s.appendContinuousQueryService(c.ContinuousQuery)
	s.appendHTTPDService(c.HTTPD)
	s.appendCollectdService(c.Collectd)
	if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil {
		return nil, err
	}
	s.appendUDPService(c.UDP)
	s.appendRetentionPolicyService(c.Retention)
	for _, g := range c.Graphites {
		if err := s.appendGraphiteService(g); err != nil {
			return nil, err
		}
	}

	return s, nil
}
Ejemplo n.º 2
0
func TestPointsWriter_WritePoints(t *testing.T) {
	tests := []struct {
		name            string
		database        string
		retentionPolicy string
		consistency     cluster.ConsistencyLevel

		// the responses returned by each shard write call.  node ID 1 = pos 0
		err    []error
		expErr error
	}{
		// Consistency one
		{
			name:            "write one success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write one error",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Consistency any
		{
			name:            "write any success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		// Consistency all
		{
			name:            "write all success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write all, 2/3, partial write",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write all, 1/3 (failure)",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          cluster.ErrPartialWrite,
		},

		// Consistency quorum
		{
			name:            "write quorum, 1/3 failure",
			consistency:     cluster.ConsistencyLevelQuorum,
			database:        "mydb",
			retentionPolicy: "myrp",
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write quorum, 2/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		{
			name:            "write quorum, 3/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},

		// Error write error
		{
			name:            "no writes succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Hinted handoff w/ ANY
		{
			name:            "hinted handoff write succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          nil,
		},

		// Write to non-existent database
		{
			name:            "write to non-existent database",
			database:        "doesnt_exist",
			retentionPolicy: "",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{nil, nil, nil},
			expErr:          fmt.Errorf("database not found: doesnt_exist"),
		},
	}

	for _, test := range tests {

		pr := &cluster.WritePointsRequest{
			Database:         test.database,
			RetentionPolicy:  test.retentionPolicy,
			ConsistencyLevel: test.consistency,
		}

		// Three points that range over the shardGroup duration (1h) and should map to two
		// distinct shards
		pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil)
		pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
		pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)

		// copy to prevent data race
		theTest := test
		sm := cluster.NewShardMapping()
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{
				{NodeID: 1},
				{NodeID: 2},
				{NodeID: 3},
			}},
			pr.Points[0])
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{
				{NodeID: 1},
				{NodeID: 2},
				{NodeID: 3},
			}},
			pr.Points[1])
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{
				{NodeID: 1},
				{NodeID: 2},
				{NodeID: 3},
			}},
			pr.Points[2])

		// Local cluster.Node ShardWriter
		// lock on the write increment since these functions get called in parallel
		var mu sync.Mutex
		sw := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[int(nodeID)-1]
			},
		}

		store := &fakeStore{
			WriteFn: func(shardID uint64, points []models.Point) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[0]
			},
		}

		hh := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error {
				return nil
			},
		}

		ms := NewMetaClient()
		ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) {
			return nil, nil
		}
		ms.NodeIDFn = func() uint64 { return 1 }

		subPoints := make(chan *cluster.WritePointsRequest, 1)
		sub := Subscriber{}
		sub.PointsFn = func() chan<- *cluster.WritePointsRequest {
			return subPoints
		}

		c := cluster.NewPointsWriter()
		c.MetaClient = ms
		c.ShardWriter = sw
		c.TSDBStore = store
		c.HintedHandoff = hh
		c.Subscriber = sub
		c.Node = &influxdb.Node{ID: 1}

		c.Open()
		defer c.Close()

		err := c.WritePoints(pr)
		if err == nil && test.expErr != nil {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}

		if err != nil && test.expErr == nil {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
		if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
		if test.expErr == nil {
			select {
			case p := <-subPoints:
				if p != pr {
					t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr)
				}
			default:
				t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name)
			}
		}
	}
}
Ejemplo n.º 3
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// We need to ensure that a meta directory always exists even if
	// we don't start the meta store.  node.json is always stored under
	// the meta directory.
	if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
		return nil, fmt.Errorf("mkdir all: %s", err)
	}

	// 0.10-rc1 and prior would sometimes put the node.json at the root
	// dir which breaks backup/restore and restarting nodes.  This moves
	// the file from the root so it's always under the meta dir.
	oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
	newPath := filepath.Join(c.Meta.Dir, "node.json")

	if _, err := os.Stat(oldPath); err == nil {
		if err := os.Rename(oldPath, newPath); err != nil {
			return nil, err
		}
	}

	// load the node information
	metaAddresses := []string{c.Meta.HTTPBindAddress}
	if !c.Meta.Enabled {
		metaAddresses = c.Meta.JoinPeers
	}

	node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		} else {
			node = influxdb.NewNode(c.Meta.Dir, metaAddresses)
		}
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress
	if c.Meta.BindAddress != "" {
		bind = c.Meta.BindAddress
	}

	if !c.Data.Enabled && !c.Meta.Enabled {
		return nil, fmt.Errorf("must run as either meta node or data node or both")
	}

	httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress)
	if err != nil {
		return nil, err
	}
	tcpBindAddress, err := defaultHost(DefaultHostname, bind)
	if err != nil {
		return nil, err
	}

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		Node: node,

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,
		joinPeers:         c.Meta.JoinPeers,
		metaUseTLS:        c.Meta.HTTPSEnabled,

		httpAPIAddr: httpBindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     tcpBindAddress,

		config: c,
	}

	if c.Meta.Enabled {
		s.MetaService = meta.NewService(c.Meta)
	}

	if c.Data.Enabled {
		s.TSDBStore = tsdb.NewStore(c.Data.Dir)
		s.TSDBStore.EngineOptions.Config = c.Data

		// Copy TSDB configuration.
		s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
		s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
		s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
		s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

		// Set the shard mapper
		s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
		s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
		s.ShardMapper.TSDBStore = s.TSDBStore
		s.ShardMapper.Node = node

		// Initialize query executor.
		s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
		s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
		s.QueryExecutor.ShardMapper = s.ShardMapper
		s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled

		// Set the shard writer
		s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout),
			c.Cluster.MaxRemoteWriteConnections)

		// Create the hinted handoff service
		s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient)
		s.HintedHandoff.Monitor = s.Monitor

		// Create the Subscriber service
		s.Subscriber = subscriber.NewService(c.Subscriber)

		// Initialize points writer.
		s.PointsWriter = cluster.NewPointsWriter()
		s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
		s.PointsWriter.TSDBStore = s.TSDBStore
		s.PointsWriter.ShardWriter = s.ShardWriter
		s.PointsWriter.HintedHandoff = s.HintedHandoff
		s.PointsWriter.Subscriber = s.Subscriber
		s.PointsWriter.Node = s.Node

		// needed for executing INTO queries.
		s.QueryExecutor.IntoWriter = s.PointsWriter

		// Initialize the monitor
		s.Monitor.Version = s.buildInfo.Version
		s.Monitor.Commit = s.buildInfo.Commit
		s.Monitor.Branch = s.buildInfo.Branch
		s.Monitor.BuildTime = s.buildInfo.Time
		s.Monitor.PointsWriter = s.PointsWriter
	}

	return s, nil
}
Ejemplo n.º 4
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// Construct base meta store and data store.
	tsdbStore := tsdb.NewStore(c.Data.Dir)
	tsdbStore.EngineOptions.Config = c.Data

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		Hostname:    c.Meta.Hostname,
		BindAddress: c.Meta.BindAddress,

		MetaStore: meta.NewStore(c.Meta),
		TSDBStore: tsdbStore,

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,
	}

	// Copy TSDB configuration.
	s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
	s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
	s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
	s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

	// Set the shard mapper
	s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
	s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
	s.ShardMapper.MetaStore = s.MetaStore
	s.ShardMapper.TSDBStore = s.TSDBStore

	// Initialize query executor.
	s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
	s.QueryExecutor.MetaStore = s.MetaStore
	s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}
	s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
	s.QueryExecutor.ShardMapper = s.ShardMapper
	s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled

	// Set the shard writer
	s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
	s.ShardWriter.MetaStore = s.MetaStore

	// Create the hinted handoff service
	s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore)

	// Initialize points writer.
	s.PointsWriter = cluster.NewPointsWriter()
	s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
	s.PointsWriter.MetaStore = s.MetaStore
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.ShardWriter = s.ShardWriter
	s.PointsWriter.HintedHandoff = s.HintedHandoff

	// Initialize the monitor
	s.Monitor.Version = s.buildInfo.Version
	s.Monitor.Commit = s.buildInfo.Commit
	s.Monitor.Branch = s.buildInfo.Branch
	s.Monitor.BuildTime = s.buildInfo.Time
	s.Monitor.MetaStore = s.MetaStore
	s.Monitor.PointsWriter = s.PointsWriter

	// Append services.
	s.appendClusterService(c.Cluster)
	s.appendPrecreatorService(c.Precreator)
	s.appendSnapshotterService()
	s.appendCopierService()
	s.appendAdminService(c.Admin)
	s.appendContinuousQueryService(c.ContinuousQuery)
	s.appendHTTPDService(c.HTTPD)
	s.appendCollectdService(c.Collectd)
	if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil {
		return nil, err
	}
	for _, g := range c.UDPs {
		s.appendUDPService(g)
	}
	s.appendRetentionPolicyService(c.Retention)
	for _, g := range c.Graphites {
		if err := s.appendGraphiteService(g); err != nil {
			return nil, err
		}
	}

	return s, nil
}
Ejemplo n.º 5
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// load the node information. Before 0.10 this was in the meta directory,
	// so use that if the top level directory isn't specified
	dir := c.Dir
	if dir == "" {
		dir = c.Meta.Dir
	}

	// load the node information
	node, err := influxdb.LoadNode(dir, c.Meta.HTTPBindAddress)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		} else {
			node = influxdb.NewNode(dir, c.Meta.HTTPBindAddress)
		}
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress
	if c.Meta.BindAddress != "" {
		bind = c.Meta.BindAddress
	}

	if !c.Data.Enabled && !c.Meta.Enabled {
		return nil, fmt.Errorf("must run as either meta node or data node or both")
	}

	httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress)
	if err != nil {
		return nil, err
	}
	tcpBindAddress, err := defaultHost(DefaultHostname, bind)
	if err != nil {
		return nil, err
	}

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		Node: node,

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,
		joinPeers:         c.Meta.JoinPeers,
		metaUseTLS:        c.Meta.HTTPSEnabled,

		httpAPIAddr: httpBindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     tcpBindAddress,

		config: c,
	}

	if c.Meta.Enabled {
		s.MetaService = meta.NewService(c.Meta)
	}

	if c.Data.Enabled {
		s.TSDBStore = tsdb.NewStore(c.Data.Dir)
		s.TSDBStore.EngineOptions.Config = c.Data

		// Copy TSDB configuration.
		s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
		s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
		s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
		s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

		// Set the shard mapper
		s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
		s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
		s.ShardMapper.TSDBStore = s.TSDBStore
		s.ShardMapper.Node = node

		// Initialize query executor.
		s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
		s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
		s.QueryExecutor.ShardMapper = s.ShardMapper
		s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled

		// Set the shard writer
		s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout),
			c.Cluster.MaxRemoteWriteConnections)

		// Create the hinted handoff service
		s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient)
		s.HintedHandoff.Monitor = s.Monitor

		// Create the Subscriber service
		s.Subscriber = subscriber.NewService(c.Subscriber)

		// Initialize points writer.
		s.PointsWriter = cluster.NewPointsWriter()
		s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
		s.PointsWriter.TSDBStore = s.TSDBStore
		s.PointsWriter.ShardWriter = s.ShardWriter
		s.PointsWriter.HintedHandoff = s.HintedHandoff
		s.PointsWriter.Subscriber = s.Subscriber
		s.PointsWriter.Node = s.Node

		// needed for executing INTO queries.
		s.QueryExecutor.IntoWriter = s.PointsWriter

		// Initialize the monitor
		s.Monitor.Version = s.buildInfo.Version
		s.Monitor.Commit = s.buildInfo.Commit
		s.Monitor.Branch = s.buildInfo.Branch
		s.Monitor.BuildTime = s.buildInfo.Time
		s.Monitor.PointsWriter = s.PointsWriter
	}

	return s, nil
}