Пример #1
0
// LogOutput determines where we should send logs (if anywhere) and the log level.
func LogOutput() (logOutput io.Writer, err error) {
	logOutput = ioutil.Discard
	envLevel := os.Getenv(EnvLog)
	if envLevel == "" {
		return
	}

	logOutput = os.Stderr
	if logPath := os.Getenv(EnvLogFile); logPath != "" {
		var err error
		logOutput, err = os.Create(logPath)
		if err != nil {
			return nil, err
		}
	}

	// This was the default since the beginning
	logLevel := logutils.LogLevel("TRACE")

	if isValidLogLevel(envLevel) {
		// allow following for better ux: info, Info or INFO
		logLevel = logutils.LogLevel(strings.ToUpper(envLevel))
	} else {
		log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
			envLevel, validLevels)
	}

	logOutput = &logutils.LevelFilter{
		Levels:   validLevels,
		MinLevel: logLevel,
		Writer:   logOutput,
	}

	return
}
Пример #2
0
func setupLogging() {
	minLevel := logutils.LogLevel("ERROR")

	if *debug {
		minLevel = logutils.LogLevel("DEBUG")
	}

	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel{"DEBUG", "INFO", "ERROR"},
		MinLevel: minLevel,
		Writer:   os.Stderr,
	}

	log.SetOutput(filter)
}
Пример #3
0
func Setup(config *Config) error {
	var logOutput io.Writer

	// Setup the default logging
	logFilter := NewLogFilter()
	logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.Level))
	logFilter.Writer = config.Writer
	if !ValidateLevelFilter(logFilter.MinLevel, logFilter) {
		levels := make([]string, 0, len(logFilter.Levels))
		for _, level := range logFilter.Levels {
			levels = append(levels, string(level))
		}
		return fmt.Errorf("invalid log level %q, valid log levels are %s",
			config.Level, strings.Join(levels, ", "))
	}

	// Check if syslog is enabled
	if config.Syslog {
		log.Printf("[DEBUG] (logging) enabling syslog on %s", config.SyslogFacility)

		l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, config.Name)
		if err != nil {
			return fmt.Errorf("error setting up syslog logger: %s", err)
		}
		syslog := &SyslogWrapper{l, logFilter}
		logOutput = io.MultiWriter(logFilter, syslog)
	} else {
		logOutput = io.MultiWriter(logFilter)
	}

	log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC)
	log.SetOutput(logOutput)

	return nil
}
Пример #4
0
func main() {
	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR"},
		MinLevel: logutils.LogLevel("INFO"),
		Writer:   os.Stderr,
	}
	log.SetOutput(filter)

	viper.SetConfigName("Rigfile")
	viper.AddConfigPath("$HOME/.rigger/")
	viper.AddConfigPath(".")

	err := viper.ReadInConfig()
	if err != nil {
		if _, ok := err.(viper.UnsupportedConfigError); ok {
			log.Printf("[ERROR] No Rigfile exists.")
			os.Exit(1)
		} else {
			log.Printf("[ERROR] %s", err)
		}
	}

	var cmdUp = &cobra.Command{
		Use:   "up",
		Short: "Create my infrastructure",
		Long:  `Do lots of work`,
		Run: func(cmd *cobra.Command, args []string) {
			log.Printf("[INFO] Rigger lifting!")
		},
	}

	var rootCmd = &cobra.Command{Use: "rigger"}
	rootCmd.AddCommand(cmdUp)
	rootCmd.Execute()
}
func TestIPCLogStream(t *testing.T) {
	sc := &MockStreamClient{}
	filter := LevelFilter()
	filter.MinLevel = logutils.LogLevel("INFO")

	ls := newLogStream(sc, filter, 42, log.New(os.Stderr, "", log.LstdFlags))
	defer ls.Stop()

	log := "[DEBUG] this is a test log"
	log2 := "[INFO] This should pass"
	ls.HandleLog(log)
	ls.HandleLog(log2)

	time.Sleep(5 * time.Millisecond)

	if len(sc.headers) != 1 {
		t.Fatalf("expected 1 messages!")
	}
	for _, h := range sc.headers {
		if h.Seq != 42 {
			t.Fatalf("bad seq")
		}
		if h.Error != "" {
			t.Fatalf("bad err")
		}
	}

	obj1 := sc.objs[0].(*logRecord)
	if obj1.Log != log2 {
		t.Fatalf("bad event %#v", obj1)
	}
}
Пример #6
0
func TestSyslogFilter(t *testing.T) {
	if runtime.GOOS == "windows" {
		t.SkipNow()
	}
	l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, "LOCAL0", "consul")
	if err != nil {
		t.Fatalf("err: %s", err)
	}

	filt := LevelFilter()
	filt.MinLevel = logutils.LogLevel("INFO")

	s := &SyslogWrapper{l, filt}
	n, err := s.Write([]byte("[INFO] test"))
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	if n == 0 {
		t.Fatalf("should have logged")
	}

	n, err = s.Write([]byte("[DEBUG] test"))
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	if n != 0 {
		t.Fatalf("should not have logged")
	}
}
Пример #7
0
func TestSyslogFilter(t *testing.T) {
	if runtime.GOOS == "windows" {
		t.SkipNow()
	}

	// Travis does not support syslog for some reason
	if travis := os.Getenv("TRAVIS"); travis != "" {
		t.SkipNow()
	}

	l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, "LOCAL0", "consul-template")
	if err != nil {
		t.Fatalf("err: %s", err)
	}

	filt := NewLogFilter()
	filt.MinLevel = logutils.LogLevel("INFO")

	s := &SyslogWrapper{l, filt}
	n, err := s.Write([]byte("[INFO] test"))
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	if n == 0 {
		t.Fatalf("should have logged")
	}

	n, err = s.Write([]byte("[DEBUG] test"))
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	if n != 0 {
		t.Fatalf("should not have logged")
	}
}
Пример #8
0
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config, agent *Agent) *Config {
	c.Ui.Output("Reloading configuration...")
	newConf := c.readConfig()
	if newConf == nil {
		c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
		return config
	}

	// Change the log level
	minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
	if ValidateLevelFilter(minLevel, c.logFilter) {
		c.logFilter.SetMinLevel(minLevel)
	} else {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			minLevel, c.logFilter.Levels))

		// Keep the current log level
		newConf.LogLevel = config.LogLevel
	}

	// Change the event handlers
	c.scriptHandler.UpdateScripts(newConf.EventScripts())

	// Update the tags in serf
	if err := agent.SetTags(newConf.Tags); err != nil {
		c.Ui.Error(fmt.Sprintf("Failed to update tags: %v", err))
		return newConf
	}

	return newConf
}
Пример #9
0
// LogOutput determines where we should send logs (if anywhere) and the log level.
func LogOutput() (logOutput io.Writer, err error) {
	logOutput = ioutil.Discard

	logLevel := LogLevel()
	if logLevel == "" {
		return
	}

	logOutput = os.Stderr
	if logPath := os.Getenv(EnvLogFile); logPath != "" {
		var err error
		logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
		if err != nil {
			return nil, err
		}
	}

	// This was the default since the beginning
	logOutput = &logutils.LevelFilter{
		Levels:   validLevels,
		MinLevel: logutils.LogLevel(logLevel),
		Writer:   logOutput,
	}

	return
}
Пример #10
0
func (c *MonitorCommand) Run(args []string) int {
	var logLevel string
	cmdFlags := flag.NewFlagSet("monitor", flag.ContinueOnError)
	cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
	cmdFlags.StringVar(&logLevel, "log-level", "INFO", "log level")
	rpcAddr := RPCAddrFlag(cmdFlags)
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	client, err := RPCClient(*rpcAddr)
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
		return 1
	}
	defer client.Close()

	logCh := make(chan string, 1024)
	monHandle, err := client.Monitor(logutils.LogLevel(logLevel), logCh)
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err))
		return 1
	}
	defer client.Stop(monHandle)

	eventDoneCh := make(chan struct{})
	go func() {
		defer close(eventDoneCh)
	OUTER:
		for {
			select {
			case log := <-logCh:
				if log == "" {
					break OUTER
				}
				c.Ui.Info(log)
			}
		}

		c.lock.Lock()
		defer c.lock.Unlock()
		if !c.quitting {
			c.Ui.Info("")
			c.Ui.Output("Remote side ended the monitor! This usually means that the\n" +
				"remote side has exited or crashed.")
		}
	}()

	select {
	case <-eventDoneCh:
		return 1
	case <-c.ShutdownCh:
		c.lock.Lock()
		c.quitting = true
		c.lock.Unlock()
	}

	return 0
}
Пример #11
0
func setLogger(level string) {
	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR", "FATAL"},
		MinLevel: logutils.LogLevel(level),
		Writer:   os.Stderr,
	}
	log.SetOutput(filter)
}
Пример #12
0
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config) *Config {
	c.Ui.Output("Reloading configuration...")
	newConf := c.readConfig()
	if newConf == nil {
		c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
		return config
	}

	// Change the log level
	minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
	if ValidateLevelFilter(minLevel, c.logFilter) {
		c.logFilter.SetMinLevel(minLevel)
	} else {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			minLevel, c.logFilter.Levels))

		// Keep the current log level
		newConf.LogLevel = config.LogLevel
	}

	// Bulk update the services and checks
	c.agent.PauseSync()
	defer c.agent.ResumeSync()

	// Deregister the old services
	for _, service := range config.Services {
		ns := service.NodeService()
		c.agent.RemoveService(ns.ID)
	}

	// Deregister the old checks
	for _, check := range config.Checks {
		health := check.HealthCheck(config.NodeName)
		c.agent.RemoveCheck(health.CheckID)
	}

	// Register the services
	for _, service := range newConf.Services {
		ns := service.NodeService()
		chkType := service.CheckType()
		if err := c.agent.AddService(ns, chkType); err != nil {
			c.Ui.Error(fmt.Sprintf("Failed to register service '%s': %v", service.Name, err))
		}
	}

	// Register the checks
	for _, check := range newConf.Checks {
		health := check.HealthCheck(config.NodeName)
		chkType := &check.CheckType
		if err := c.agent.AddCheck(health, chkType); err != nil {
			c.Ui.Error(fmt.Sprintf("Failed to register check '%s': %v %v", check.Name, err, check))
		}
	}

	return newConf
}
Пример #13
0
func validateLogLevel(level string) error {
	err := errors.New("invalid log level")
	for _, l := range LOG_LEVELS {
		if l == logutils.LogLevel(strings.ToUpper(level)) {
			return nil
		}
	}
	return err
}
Пример #14
0
func TestLevelFilter(t *testing.T) {

	filt := LevelFilter()
	filt.Levels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"}
	level := logutils.LogLevel("INFO")

	// LevelFilter regards INFO as valid level
	if !ValidateLevelFilter(level, filt) {
		t.Fatalf("expected valid LogLevel, %s was invalid", level)
	}

	level = logutils.LogLevel("FOO")

	// LevelFilter regards FOO as invalid level
	if ValidateLevelFilter(level, filt) {
		t.Fatalf("expected invalid LogLevel, %s was valid", level)
	}

}
Пример #15
0
// initLogger gets the log level from the environment, falling back to DEBUG if
// nothing was given.
func (cli *CLI) initLogger(level string) {
	minLevel := strings.ToUpper(strings.TrimSpace(level))
	if minLevel == "" {
		minLevel = "WARN"
	}

	levelFilter.Writer = cli.errStream
	levelFilter.SetMinLevel(logutils.LogLevel(level))
	log.SetOutput(levelFilter)
}
Пример #16
0
// SetupLogger is
func SetupLogger(level LogLevel) {
	var loglevel = logutils.LogLevel(level.String())
	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR"},
		MinLevel: loglevel,
		Writer:   os.Stderr,
	}
	log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
	log.SetOutput(filter)
}
Пример #17
0
func (c *MonitorCommand) Run(args []string, ui cli.Ui) int {
	var logLevel string
	cmdFlags := flag.NewFlagSet("monitor", flag.ContinueOnError)
	cmdFlags.Usage = func() { ui.Output(c.Help()) }
	cmdFlags.StringVar(&logLevel, "log-level", "INFO", "log level")
	rpcAddr := RPCAddrFlag(cmdFlags)
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	client, err := RPCClient(*rpcAddr)
	if err != nil {
		ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err))
		return 1
	}
	defer client.Close()

	eventCh := make(chan string)
	doneCh := make(chan struct{})
	if err := client.Monitor(logutils.LogLevel(logLevel), eventCh, doneCh); err != nil {
		ui.Error(fmt.Sprintf("Error starting monitor: %s", err))
		return 1
	}

	eventDoneCh := make(chan struct{})
	go func() {
		defer close(eventDoneCh)
		for e := range eventCh {
			ui.Info(e)
		}

		c.lock.Lock()
		defer c.lock.Unlock()
		if !c.quitting {
			ui.Info("")
			ui.Output("Remote side ended the monitor! This usually means that the\n" +
				"remote side has exited or crashed.")
		}
	}()

	select {
	case <-eventDoneCh:
		return 1
	case <-c.ShutdownCh:
		c.lock.Lock()
		c.quitting = true
		c.lock.Unlock()
	}

	close(doneCh)
	return 0
}
Пример #18
0
func initLogger() {
	if needsInit {
		filter := &logutils.LevelFilter{
			Levels:   []logutils.LogLevel(logLevels),
			MinLevel: logutils.LogLevel("WARN"),
			Writer:   os.Stderr,
		}
		log.SetOutput(filter)
		log.SetFlags(0)

		needsInit = false
	}
}
Пример #19
0
// SetLevel will set the desired level
func (c customLogger) SetLevel(level logging.LogLevel) {
	initLogger()

	logLevel := strings.Trim(strings.ToUpper(logging.LogLevel.String(level)), "")

	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel(logLevels),
		MinLevel: logutils.LogLevel(logLevel),
		Writer:   os.Stderr,
	}

	log.SetOutput(filter)
}
Пример #20
0
// Setup is used to perform setup of several logging objects:
//
// * A LevelFilter is used to perform filtering by log level.
// * A GatedWriter is used to buffer logs until startup UI operations are
//   complete. After this is flushed then logs flow directly to output
//   destinations.
// * A LogWriter provides a mean to temporarily hook logs, such as for running
//   a command like "consul monitor".
// * An io.Writer is provided as the sink for all logs to flow to.
//
// The provided ui object will get any log messages related to setting up
// logging itself, and will also be hooked up to the gated logger. The final bool
// parameter indicates if logging was set up successfully.
func Setup(config *Config, ui cli.Ui) (*logutils.LevelFilter, *GatedWriter, *LogWriter, io.Writer, bool) {
	// The gated writer buffers logs at startup and holds until it's flushed.
	logGate := &GatedWriter{
		Writer: &cli.UiWriter{ui},
	}

	// Set up the level filter.
	logFilter := LevelFilter()
	logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
	logFilter.Writer = logGate
	if !ValidateLevelFilter(logFilter.MinLevel, logFilter) {
		ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			logFilter.MinLevel, logFilter.Levels))
		return nil, nil, nil, nil, false
	}

	// Set up syslog if it's enabled.
	var syslog io.Writer
	if config.EnableSyslog {
		retries := 12
		delay := 5 * time.Second
		for i := 0; i <= retries; i++ {
			l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "consul")
			if err != nil {
				ui.Error(fmt.Sprintf("Syslog setup error: %v", err))
				if i == retries {
					timeout := time.Duration(retries) * delay
					ui.Error(fmt.Sprintf("Syslog setup did not succeed within timeout (%s).", timeout.String()))
					return nil, nil, nil, nil, false
				} else {
					ui.Error(fmt.Sprintf("Retrying syslog setup in %s...", delay.String()))
					time.Sleep(delay)
				}
			} else {
				syslog = &SyslogWrapper{l, logFilter}
				break
			}
		}
	}

	// Create a log writer, and wrap a logOutput around it
	logWriter := NewLogWriter(512)
	var logOutput io.Writer
	if syslog != nil {
		logOutput = io.MultiWriter(logFilter, logWriter, syslog)
	} else {
		logOutput = io.MultiWriter(logFilter, logWriter)
	}
	return logFilter, logGate, logWriter, logOutput, true
}
Пример #21
0
Файл: log.go Проект: DLag/logear
func logOpen() {
	if logFile != "" {
		logFilter.Writer = openFileLog(logFile)
	} else {
		if v, ok := cfg["main"]; ok {
			if v, ok := v.(map[string]interface{})["logfile"]; ok {
				logFilter.Writer = openFileLog(v.(string))
			}
			if v, ok := v.(map[string]interface{})["loglevel"]; ok {
				logFilter.MinLevel = logutils.LogLevel(v.(string))
			}
		}
	}
}
Пример #22
0
func initLog() {
	logLevel = os.Getenv("STORMPATH_LOG_LEVEL")

	if logLevel == "" {
		logLevel = "ERROR"
	}

	filter := &logutils.LevelFilter{
		Levels:   []logutils.LogLevel{"DEBUG", "WARN", "ERROR", "NONE"},
		MinLevel: logutils.LogLevel(logLevel),
		Writer:   os.Stderr,
	}

	Logger = log.New(filter, "", log.Ldate|log.Ltime|log.Lshortfile)
}
Пример #23
0
// Monitor opens a connection to the given callbackAddr and sends an event
// stream to it. This event stream is not the same as the _serf event_ stream.
// This is a general stream of events that are occuring to the agent.
func (e *rpcEndpoint) Monitor(args RPCMonitorArgs, result *interface{}) error {
	if args.LogLevel == "" {
		args.LogLevel = "DEBUG"
	}
	args.LogLevel = strings.ToUpper(args.LogLevel)

	filter := LevelFilter()
	filter.MinLevel = logutils.LogLevel(args.LogLevel)
	if !ValidateLevelFilter(filter) {
		return fmt.Errorf("Unknown log level: %s", filter.MinLevel)
	}

	go e.monitorStream(args.CallbackAddr, filter)
	return nil
}
Пример #24
0
// setupLoggers is used to setup the logGate, logWriter, and our logOutput
func (c *Command) setupLoggers(config *Config) (*GatedWriter, *logWriter, io.Writer) {
	// Setup logging. First create the gated log writer, which will
	// store logs until we're ready to show them. Then create the level
	// filter, filtering logs of the specified level.
	logGate := &GatedWriter{
		Writer: &cli.UiWriter{Ui: c.Ui},
	}

	c.logFilter = LevelFilter()
	c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
	c.logFilter.Writer = logGate
	if !ValidateLevelFilter(c.logFilter.MinLevel, c.logFilter) {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			c.logFilter.MinLevel, c.logFilter.Levels))
		return nil, nil, nil
	}

	// Check if syslog is enabled
	var syslog io.Writer
	if config.EnableSyslog {
		l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "serf")
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Syslog setup failed: %v", err))
			return nil, nil, nil
		}
		syslog = &SyslogWrapper{l}
	}

	// Create a log writer, and wrap a logOutput around it
	logWriter := NewLogWriter(512)
	var logOutput io.Writer
	if syslog != nil {
		logOutput = io.MultiWriter(c.logFilter, logWriter, syslog)
	} else {
		logOutput = io.MultiWriter(c.logFilter, logWriter)
	}

	// Create a logger
	c.logger = log.New(logOutput, "", log.LstdFlags)
	return logGate, logWriter, logOutput
}
Пример #25
0
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config) *Config {
	c.Ui.Output("Reloading configuration...")
	newConf := c.readConfig()
	if newConf == nil {
		c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
		return config
	}

	// Change the log level
	minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
	if ValidateLevelFilter(minLevel, c.logFilter) {
		c.logFilter.SetMinLevel(minLevel)
	} else {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			minLevel, c.logFilter.Levels))

		// Keep the current log level
		newConf.LogLevel = config.LogLevel
	}
	return newConf
}
Пример #26
0
func (i *AgentRPC) handleMonitor(client *rpcClient, seq uint64) error {
	var req monitorRequest
	if err := client.dec.Decode(&req); err != nil {
		return fmt.Errorf("decode failed: %v", err)
	}

	resp := responseHeader{
		Seq:   seq,
		Error: "",
	}

	// Upper case the log level
	req.LogLevel = strings.ToUpper(req.LogLevel)

	// Create a level filter
	filter := LevelFilter()
	filter.MinLevel = logutils.LogLevel(req.LogLevel)
	if !ValidateLevelFilter(filter.MinLevel, filter) {
		resp.Error = fmt.Sprintf("Unknown log level: %s", filter.MinLevel)
		goto SEND
	}

	// Check if there is an existing monitor
	if client.logStreamer != nil {
		resp.Error = monitorExists
		goto SEND
	}

	// Create a log streamer
	client.logStreamer = newLogStream(client, filter, seq, i.logger)

	// Register with the log writer. Defer so that we can respond before
	// registration, avoids any possible race condition
	defer i.logWriter.RegisterHandler(client.logStreamer)

SEND:
	return client.Send(&resp, nil)
}
Пример #27
0
// setupLoggers is used to setup the logGate, logWriter, and our logOutput
func (c *Command) setupLoggers(config *Config) (*GatedWriter, *logWriter, io.Writer) {
	// Setup logging. First create the gated log writer, which will
	// store logs until we're ready to show them. Then create the level
	// filter, filtering logs of the specified level.
	logGate := &GatedWriter{
		Writer: &cli.UiWriter{Ui: c.Ui},
	}

	c.logFilter = LevelFilter()
	c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
	c.logFilter.Writer = logGate
	if !ValidateLevelFilter(c.logFilter.MinLevel, c.logFilter) {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			c.logFilter.MinLevel, c.logFilter.Levels))
		return nil, nil, nil
	}

	// Create a log writer, and wrap a logOutput around it
	logWriter := NewLogWriter(512)
	logOutput := io.MultiWriter(c.logFilter, logWriter)
	return logGate, logWriter, logOutput
}
Пример #28
0
func (c *Command) Run(args []string) int {
	ui := &cli.PrefixedUi{
		OutputPrefix: "==> ",
		InfoPrefix:   "    ",
		ErrorPrefix:  "==> ",
		Ui:           c.Ui,
	}

	var cmdConfig Config
	var configFiles []string

	cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError)
	cmdFlags.Usage = func() { ui.Output(c.Help()) }
	cmdFlags.StringVar(&cmdConfig.BindAddr, "bind", "", "address to bind listeners to")
	cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-file",
		"json file to read config from")
	cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-dir",
		"directory of json files to read")
	cmdFlags.StringVar(&cmdConfig.EncryptKey, "encrypt", "", "encryption key")
	cmdFlags.Var((*AppendSliceValue)(&cmdConfig.EventHandlers), "event-handler",
		"command to execute when events occur")
	cmdFlags.Var((*AppendSliceValue)(&cmdConfig.StartJoin), "join",
		"address of agent to join on startup")
	cmdFlags.StringVar(&cmdConfig.LogLevel, "log-level", "", "log level")
	cmdFlags.StringVar(&cmdConfig.NodeName, "node", "", "node name")
	cmdFlags.IntVar(&cmdConfig.Protocol, "protocol", -1, "protocol version")
	cmdFlags.StringVar(&cmdConfig.Role, "role", "", "role name")
	cmdFlags.StringVar(&cmdConfig.RPCAddr, "rpc-addr", "",
		"address to bind RPC listener to")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	config := DefaultConfig
	if len(configFiles) > 0 {
		fileConfig, err := ReadConfigPaths(configFiles)
		if err != nil {
			c.Ui.Error(err.Error())
			return 1
		}

		config = MergeConfig(config, fileConfig)
	}

	config = MergeConfig(config, &cmdConfig)

	if config.NodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Error determining hostname: %s", err))
			return 1
		}

		config.NodeName = hostname
	}

	eventScripts, err := config.EventScripts()
	if err != nil {
		c.Ui.Error(err.Error())
		return 1
	}

	for _, script := range eventScripts {
		if !script.Valid() {
			c.Ui.Error(fmt.Sprintf("Invalid event script: %s", script.String()))
			return 1
		}
	}

	bindIP, bindPort, err := config.BindAddrParts()
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Invalid bind address: %s", err))
		return 1
	}

	encryptKey, err := config.EncryptBytes()
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Invalid encryption key: %s", err))
		return 1
	}

	// Setup logging. First create the gated log writer, which will
	// store logs until we're ready to show them. Then create the level
	// filter, filtering logs of the specified level.
	logGate := &GatedWriter{
		Writer: &cli.UiWriter{Ui: c.Ui},
	}

	logLevelFilter := LevelFilter()
	logLevelFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
	logLevelFilter.Writer = logGate
	if !ValidateLevelFilter(logLevelFilter) {
		ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			logLevelFilter.MinLevel, logLevelFilter.Levels))
		return 1
	}

	serfConfig := serf.DefaultConfig()
	serfConfig.MemberlistConfig.BindAddr = bindIP
	serfConfig.MemberlistConfig.Port = bindPort
	serfConfig.MemberlistConfig.SecretKey = encryptKey
	serfConfig.NodeName = config.NodeName
	serfConfig.Role = config.Role
	serfConfig.ProtocolVersion = uint8(config.Protocol)
	serfConfig.CoalescePeriod = 3 * time.Second
	serfConfig.QuiescentPeriod = time.Second
	serfConfig.UserCoalescePeriod = 3 * time.Second
	serfConfig.UserQuiescentPeriod = time.Second

	agent := &Agent{
		EventHandler: &ScriptEventHandler{
			Self: serf.Member{
				Name: serfConfig.NodeName,
				Role: serfConfig.Role,
			},
			Scripts: eventScripts,
		},
		LogOutput:  logLevelFilter,
		RPCAddr:    config.RPCAddr,
		SerfConfig: serfConfig,
	}

	ui.Output("Starting Serf agent...")
	if err := agent.Start(); err != nil {
		ui.Error(err.Error())
		return 1
	}
	defer agent.Shutdown()

	bindAddr := (&net.TCPAddr{IP: net.ParseIP(bindIP), Port: bindPort}).String()
	ui.Output("Serf agent running!")
	ui.Info(fmt.Sprintf("Node name: '%s'", config.NodeName))
	ui.Info(fmt.Sprintf("Bind addr: '%s'", bindAddr))
	ui.Info(fmt.Sprintf(" RPC addr: '%s'", config.RPCAddr))
	ui.Info(fmt.Sprintf("Encrypted: %#v", config.EncryptKey != ""))

	if len(config.StartJoin) > 0 {
		ui.Output("Joining cluster...")
		n, err := agent.Join(config.StartJoin, true)
		if err != nil {
			ui.Error(err.Error())
			return 1
		}

		ui.Info(fmt.Sprintf("Join completed. Synced with %d initial agents", n))
	}

	ui.Info("")
	ui.Output("Log data will now stream in as it occurs:\n")
	logGate.Flush()

	graceful, forceful := c.startShutdownWatcher(agent, ui)
	select {
	case <-graceful:
	case <-forceful:
		// Forcefully shut down, return a bad exit status.
		return 1
	}

	return 0
}
Пример #29
0
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config) *Config {
	c.Ui.Output("Reloading configuration...")
	newConf := c.readConfig()
	if newConf == nil {
		c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
		return config
	}

	// Change the log level
	minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
	if ValidateLevelFilter(minLevel, c.logFilter) {
		c.logFilter.SetMinLevel(minLevel)
	} else {
		c.Ui.Error(fmt.Sprintf(
			"Invalid log level: %s. Valid log levels are: %v",
			minLevel, c.logFilter.Levels))

		// Keep the current log level
		newConf.LogLevel = config.LogLevel
	}

	// Bulk update the services and checks
	c.agent.PauseSync()
	defer c.agent.ResumeSync()

	// Snapshot the current state, and restore it afterwards
	snap := c.agent.snapshotCheckState()
	defer c.agent.restoreCheckState(snap)

	// First unload all checks and services. This lets us begin the reload
	// with a clean slate.
	if err := c.agent.unloadServices(); err != nil {
		c.Ui.Error(fmt.Sprintf("Failed unloading services: %s", err))
		return nil
	}
	if err := c.agent.unloadChecks(); err != nil {
		c.Ui.Error(fmt.Sprintf("Failed unloading checks: %s", err))
		return nil
	}

	// Reload services and check definitions.
	if err := c.agent.loadServices(newConf); err != nil {
		c.Ui.Error(fmt.Sprintf("Failed reloading services: %s", err))
		return nil
	}
	if err := c.agent.loadChecks(newConf); err != nil {
		c.Ui.Error(fmt.Sprintf("Failed reloading checks: %s", err))
		return nil
	}

	// Get the new client listener addr
	httpAddr, err := newConf.ClientListener(config.Addresses.HTTP, config.Ports.HTTP)
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Failed to determine HTTP address: %v", err))
	}

	// Deregister the old watches
	for _, wp := range config.WatchPlans {
		wp.Stop()
	}

	// Register the new watches
	for _, wp := range newConf.WatchPlans {
		go func(wp *watch.WatchPlan) {
			wp.Handler = makeWatchHandler(c.logOutput, wp.Exempt["handler"])
			wp.LogOutput = c.logOutput
			if err := wp.Run(httpAddr.String()); err != nil {
				c.Ui.Error(fmt.Sprintf("Error running watch: %v", err))
			}
		}(wp)
	}

	// Reload SCADA client if we have a change
	if newConf.AtlasInfrastructure != config.AtlasInfrastructure ||
		newConf.AtlasToken != config.AtlasToken ||
		newConf.AtlasEndpoint != config.AtlasEndpoint {
		if err := c.setupScadaConn(newConf); err != nil {
			c.Ui.Error(fmt.Sprintf("Failed reloading SCADA client: %s", err))
			return nil
		}
	}

	return newConf
}
Пример #30
0
func (c *ServerCommand) Run(args []string) int {
	var dev, verifyOnly bool
	var configPath []string
	var logLevel string
	flags := c.Meta.FlagSet("server", FlagSetDefault)
	flags.BoolVar(&dev, "dev", false, "")
	flags.StringVar(&logLevel, "log-level", "info", "")
	flags.BoolVar(&verifyOnly, "verify-only", false, "")
	flags.Usage = func() { c.Ui.Error(c.Help()) }
	flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
	if err := flags.Parse(args); err != nil {
		return 1
	}

	// Validation
	if !dev && len(configPath) == 0 {
		c.Ui.Error("At least one config path must be specified with -config")
		flags.Usage()
		return 1
	}

	// Load the configuration
	var config *server.Config
	if dev {
		config = server.DevConfig()
	}
	for _, path := range configPath {
		current, err := server.LoadConfig(path)
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error loading configuration from %s: %s", path, err))
			return 1
		}

		if config == nil {
			config = current
		} else {
			config = config.Merge(current)
		}
	}

	// Ensure that a backend is provided
	if config.Backend == nil {
		c.Ui.Error("A physical backend must be specified")
		return 1
	}

	// If mlock isn't supported, show a warning. We disable this in
	// dev because it is quite scary to see when first using Vault.
	if !dev && !mlock.Supported() {
		c.Ui.Output("==> WARNING: mlock not supported on this system!\n")
		c.Ui.Output("  The `mlock` syscall to prevent memory from being swapped to")
		c.Ui.Output("  disk is not supported on this system. Enabling mlock or")
		c.Ui.Output("  running Vault on a system with mlock is much more secure.\n")
	}

	// Create a logger. We wrap it in a gated writer so that it doesn't
	// start logging too early.
	logGate := &gatedwriter.Writer{Writer: os.Stderr}
	logger := log.New(&logutils.LevelFilter{
		Levels: []logutils.LogLevel{
			"TRACE", "DEBUG", "INFO", "WARN", "ERR"},
		MinLevel: logutils.LogLevel(strings.ToUpper(logLevel)),
		Writer:   logGate,
	}, "", log.LstdFlags)

	if err := c.setupTelementry(config); err != nil {
		c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
		return 1
	}

	// Initialize the backend
	backend, err := physical.NewBackend(
		config.Backend.Type, config.Backend.Config)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Error initializing backend of type %s: %s",
			config.Backend.Type, err))
		return 1
	}

	coreConfig := &vault.CoreConfig{
		Physical:           backend,
		AdvertiseAddr:      config.Backend.AdvertiseAddr,
		HAPhysical:         nil,
		AuditBackends:      c.AuditBackends,
		CredentialBackends: c.CredentialBackends,
		LogicalBackends:    c.LogicalBackends,
		Logger:             logger,
		DisableCache:       config.DisableCache,
		DisableMlock:       config.DisableMlock,
		MaxLeaseTTL:        config.MaxLeaseTTL,
		DefaultLeaseTTL:    config.DefaultLeaseTTL,
	}

	// Initialize the separate HA physical backend, if it exists
	if config.HABackend != nil {
		habackend, err := physical.NewBackend(
			config.HABackend.Type, config.HABackend.Config)
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error initializing backend of type %s: %s",
				config.HABackend.Type, err))
			return 1
		}

		var ok bool
		if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
			c.Ui.Error("Specified HA backend does not support HA")
			return 1
		}
		coreConfig.AdvertiseAddr = config.HABackend.AdvertiseAddr
	}

	if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
		coreConfig.AdvertiseAddr = envAA
	}

	// Attempt to detect the advertise address possible
	var detect physical.AdvertiseDetect
	var ok bool
	if coreConfig.HAPhysical != nil {
		detect, ok = coreConfig.HAPhysical.(physical.AdvertiseDetect)
	} else {
		detect, ok = coreConfig.Physical.(physical.AdvertiseDetect)
	}
	if ok && coreConfig.AdvertiseAddr == "" {
		advertise, err := c.detectAdvertise(detect, config)
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Error detecting advertise address: %s", err))
		} else if advertise == "" {
			c.Ui.Error("Failed to detect advertise address.")
		} else {
			coreConfig.AdvertiseAddr = advertise
		}
	}

	// Initialize the core
	core, err := vault.NewCore(coreConfig)
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Error initializing core: %s", err))
		return 1
	}

	// If we're in dev mode, then initialize the core
	if dev {
		init, err := c.enableDev(core)
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error initializing dev mode: %s", err))
			return 1
		}

		export := "export"
		quote := "'"
		if runtime.GOOS == "windows" {
			export = "set"
			quote = ""
		}

		c.Ui.Output(fmt.Sprintf(
			"==> WARNING: Dev mode is enabled!\n\n"+
				"In this mode, Vault is completely in-memory and unsealed.\n"+
				"Vault is configured to only have a single unseal key. The root\n"+
				"token has already been authenticated with the CLI, so you can\n"+
				"immediately begin using the Vault CLI.\n\n"+
				"The only step you need to take is to set the following\n"+
				"environment variables:\n\n"+
				"    "+export+" VAULT_ADDR="+quote+"http://127.0.0.1:8200"+quote+"\n\n"+
				"The unseal key and root token are reproduced below in case you\n"+
				"want to seal/unseal the Vault or play with authentication.\n\n"+
				"Unseal Key: %s\nRoot Token: %s\n",
			hex.EncodeToString(init.SecretShares[0]),
			init.RootToken,
		))
	}

	// Compile server information for output later
	infoKeys := make([]string, 0, 10)
	info := make(map[string]string)
	info["backend"] = config.Backend.Type
	info["log level"] = logLevel
	info["mlock"] = fmt.Sprintf(
		"supported: %v, enabled: %v",
		mlock.Supported(), !config.DisableMlock)
	infoKeys = append(infoKeys, "log level", "mlock", "backend")

	if config.HABackend != nil {
		info["HA backend"] = config.HABackend.Type
		info["advertise address"] = coreConfig.AdvertiseAddr
		infoKeys = append(infoKeys, "HA backend", "advertise address")
	} else {
		// If the backend supports HA, then note it
		if coreConfig.HAPhysical != nil {
			info["backend"] += " (HA available)"
			info["advertise address"] = coreConfig.AdvertiseAddr
			infoKeys = append(infoKeys, "advertise address")
		}
	}

	// Initialize the listeners
	lns := make([]net.Listener, 0, len(config.Listeners))
	for i, lnConfig := range config.Listeners {
		ln, props, err := server.NewListener(lnConfig.Type, lnConfig.Config)
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error initializing listener of type %s: %s",
				lnConfig.Type, err))
			return 1
		}

		// Store the listener props for output later
		key := fmt.Sprintf("listener %d", i+1)
		propsList := make([]string, 0, len(props))
		for k, v := range props {
			propsList = append(propsList, fmt.Sprintf(
				"%s: %q", k, v))
		}
		sort.Strings(propsList)
		infoKeys = append(infoKeys, key)
		info[key] = fmt.Sprintf(
			"%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))

		lns = append(lns, ln)
	}

	if verifyOnly {
		return 0
	}

	// Initialize the HTTP server
	server := &http.Server{}
	server.Handler = vaulthttp.Handler(core)
	for _, ln := range lns {
		go server.Serve(ln)
	}

	infoKeys = append(infoKeys, "version")
	info["version"] = version.GetVersion().String()

	// Server configuration output
	padding := 18
	c.Ui.Output("==> Vault server configuration:\n")
	for _, k := range infoKeys {
		c.Ui.Output(fmt.Sprintf(
			"%s%s: %s",
			strings.Repeat(" ", padding-len(k)),
			strings.Title(k),
			info[k]))
	}
	c.Ui.Output("")

	// Output the header that the server has started
	c.Ui.Output("==> Vault server started! Log data will stream in below:\n")

	// Release the log gate.
	logGate.Flush()

	// Wait for shutdown
	select {
	case <-c.ShutdownCh:
		c.Ui.Output("==> Vault shutdown triggered")
		if err := core.Shutdown(); err != nil {
			c.Ui.Error(fmt.Sprintf("Error with core shutdown: %s", err))
		}
	}
	return 0
}