Example #1
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// load the node information. Before 0.10 this was in the meta directory,
	// so use that if the top level directory isn't specified
	dir := c.Dir
	if dir == "" {
		dir = c.Meta.Dir
	}

	// load the node information
	node, err := influxdb.LoadNode(dir, c.Meta.HTTPBindAddress)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		} else {
			node = influxdb.NewNode(dir, c.Meta.HTTPBindAddress)
		}
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress
	if c.Meta.BindAddress != "" {
		bind = c.Meta.BindAddress
	}

	if !c.Data.Enabled && !c.Meta.Enabled {
		return nil, fmt.Errorf("must run as either meta node or data node or both")
	}

	httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress)
	if err != nil {
		return nil, err
	}
	tcpBindAddress, err := defaultHost(DefaultHostname, bind)
	if err != nil {
		return nil, err
	}

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		Node: node,

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,
		joinPeers:         c.Meta.JoinPeers,
		metaUseTLS:        c.Meta.HTTPSEnabled,

		httpAPIAddr: httpBindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     tcpBindAddress,

		config: c,
	}

	if c.Meta.Enabled {
		s.MetaService = meta.NewService(c.Meta)
	}

	if c.Data.Enabled {
		s.TSDBStore = tsdb.NewStore(c.Data.Dir)
		s.TSDBStore.EngineOptions.Config = c.Data

		// Copy TSDB configuration.
		s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
		s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
		s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
		s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

		// Set the shard mapper
		s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
		s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
		s.ShardMapper.TSDBStore = s.TSDBStore
		s.ShardMapper.Node = node

		// Initialize query executor.
		s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
		s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
		s.QueryExecutor.ShardMapper = s.ShardMapper
		s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled

		// Set the shard writer
		s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout),
			c.Cluster.MaxRemoteWriteConnections)

		// Create the hinted handoff service
		s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient)
		s.HintedHandoff.Monitor = s.Monitor

		// Create the Subscriber service
		s.Subscriber = subscriber.NewService(c.Subscriber)

		// Initialize points writer.
		s.PointsWriter = cluster.NewPointsWriter()
		s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
		s.PointsWriter.TSDBStore = s.TSDBStore
		s.PointsWriter.ShardWriter = s.ShardWriter
		s.PointsWriter.HintedHandoff = s.HintedHandoff
		s.PointsWriter.Subscriber = s.Subscriber
		s.PointsWriter.Node = s.Node

		// needed for executing INTO queries.
		s.QueryExecutor.IntoWriter = s.PointsWriter

		// Initialize the monitor
		s.Monitor.Version = s.buildInfo.Version
		s.Monitor.Commit = s.buildInfo.Commit
		s.Monitor.Branch = s.buildInfo.Branch
		s.Monitor.BuildTime = s.buildInfo.Time
		s.Monitor.PointsWriter = s.PointsWriter
	}

	return s, nil
}
Example #2
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
	// We need to ensure that a meta directory always exists even if
	// we don't start the meta store.  node.json is always stored under
	// the meta directory.
	if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
		return nil, fmt.Errorf("mkdir all: %s", err)
	}

	// 0.10-rc1 and prior would sometimes put the node.json at the root
	// dir which breaks backup/restore and restarting nodes.  This moves
	// the file from the root so it's always under the meta dir.
	oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
	newPath := filepath.Join(c.Meta.Dir, "node.json")

	if _, err := os.Stat(oldPath); err == nil {
		if err := os.Rename(oldPath, newPath); err != nil {
			return nil, err
		}
	}

	// load the node information
	metaAddresses := []string{c.Meta.HTTPBindAddress}
	if !c.Meta.Enabled {
		metaAddresses = c.Meta.JoinPeers
	}

	node, err := influxdb.LoadNode(c.Meta.Dir, metaAddresses)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		} else {
			node = influxdb.NewNode(c.Meta.Dir, metaAddresses)
		}
	}

	// In 0.10.0 bind-address got moved to the top level. Check
	// The old location to keep things backwards compatible
	bind := c.BindAddress
	if c.Meta.BindAddress != "" {
		bind = c.Meta.BindAddress
	}

	if !c.Data.Enabled && !c.Meta.Enabled {
		return nil, fmt.Errorf("must run as either meta node or data node or both")
	}

	httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress)
	if err != nil {
		return nil, err
	}
	tcpBindAddress, err := defaultHost(DefaultHostname, bind)
	if err != nil {
		return nil, err
	}

	s := &Server{
		buildInfo: *buildInfo,
		err:       make(chan error),
		closing:   make(chan struct{}),

		BindAddress: bind,

		Node: node,

		Monitor: monitor.New(c.Monitor),

		reportingDisabled: c.ReportingDisabled,
		joinPeers:         c.Meta.JoinPeers,
		metaUseTLS:        c.Meta.HTTPSEnabled,

		httpAPIAddr: httpBindAddress,
		httpUseTLS:  c.HTTPD.HTTPSEnabled,
		tcpAddr:     tcpBindAddress,

		config: c,
	}

	if c.Meta.Enabled {
		s.MetaService = meta.NewService(c.Meta)
	}

	if c.Data.Enabled {
		s.TSDBStore = tsdb.NewStore(c.Data.Dir)
		s.TSDBStore.EngineOptions.Config = c.Data

		// Copy TSDB configuration.
		s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
		s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
		s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
		s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

		// Set the shard mapper
		s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
		s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
		s.ShardMapper.TSDBStore = s.TSDBStore
		s.ShardMapper.Node = node

		// Initialize query executor.
		s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
		s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
		s.QueryExecutor.ShardMapper = s.ShardMapper
		s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled

		// Set the shard writer
		s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout),
			c.Cluster.MaxRemoteWriteConnections)

		// Create the hinted handoff service
		s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient)
		s.HintedHandoff.Monitor = s.Monitor

		// Create the Subscriber service
		s.Subscriber = subscriber.NewService(c.Subscriber)

		// Initialize points writer.
		s.PointsWriter = cluster.NewPointsWriter()
		s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
		s.PointsWriter.TSDBStore = s.TSDBStore
		s.PointsWriter.ShardWriter = s.ShardWriter
		s.PointsWriter.HintedHandoff = s.HintedHandoff
		s.PointsWriter.Subscriber = s.Subscriber
		s.PointsWriter.Node = s.Node

		// needed for executing INTO queries.
		s.QueryExecutor.IntoWriter = s.PointsWriter

		// Initialize the monitor
		s.Monitor.Version = s.buildInfo.Version
		s.Monitor.Commit = s.buildInfo.Commit
		s.Monitor.Branch = s.buildInfo.Branch
		s.Monitor.BuildTime = s.buildInfo.Time
		s.Monitor.PointsWriter = s.PointsWriter
	}

	return s, nil
}
Example #3
0
// Run parses the config from args and runs the server.
func (cmd *Command) Run(args ...string) error {
	// Parse the command line flags.
	options, err := cmd.ParseFlags(args...)
	if err != nil {
		return err
	}

	// Print sweet InfluxDB logo.
	fmt.Print(logo)

	// Set parallelism.
	runtime.GOMAXPROCS(runtime.NumCPU())

	// Mark start-up in log.
	log.Printf("InfluxDB starting, version %s, branch %s, commit %s, built %s",
		cmd.Version, cmd.Branch, cmd.Commit, cmd.BuildTime)
	log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))

	// Write the PID file.
	if err := cmd.writePIDFile(options.PIDFile); err != nil {
		return fmt.Errorf("write pid file: %s", err)
	}

	// Turn on block profiling to debug stuck databases
	runtime.SetBlockProfileRate(int(1 * time.Second))

	// Parse config
	config, err := cmd.ParseConfig(options.ConfigPath)
	if err != nil {
		return fmt.Errorf("parse config: %s", err)
	}

	// Apply any environment variables on top of the parsed config
	if err := config.ApplyEnvOverrides(); err != nil {
		return fmt.Errorf("apply env config: %v", err)
	}

	// If we have a node ID, ignore the join argument
	// We are not using the reference to this node var, just checking
	// to see if we have a node ID on disk
	if node, _ := influxdb.LoadNode(config.Meta.Dir, config.Meta.HTTPBindAddress); node == nil || node.ID == 0 {
		if options.Join != "" {
			config.Meta.JoinPeers = strings.Split(options.Join, ",")
		}
	}

	// Validate the configuration.
	if err := config.Validate(); err != nil {
		return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
	}

	// Create server from config and start it.
	buildInfo := &BuildInfo{
		Version: cmd.Version,
		Commit:  cmd.Commit,
		Branch:  cmd.Branch,
		Time:    cmd.BuildTime,
	}
	s, err := NewServer(config, buildInfo)
	if err != nil {
		return fmt.Errorf("create server: %s", err)
	}
	s.CPUProfile = options.CPUProfile
	s.MemProfile = options.MemProfile
	if err := s.Open(); err != nil {
		return fmt.Errorf("open server: %s", err)
	}
	cmd.Server = s

	// Begin monitoring the server's error channel.
	go cmd.monitorServerErrors()

	return nil
}