コード例 #1
0
ファイル: rpc.go プロジェクト: pulcy/vault-monkey
func (i *AgentRPC) handleMonitor(client *rpcClient, seq uint64) error {
	var req monitorRequest
	if err := client.dec.Decode(&req); err != nil {
		return fmt.Errorf("decode failed: %v", err)
	}

	resp := responseHeader{
		Seq:   seq,
		Error: "",
	}

	// Upper case the log level
	req.LogLevel = strings.ToUpper(req.LogLevel)

	// Create a level filter
	filter := logger.LevelFilter()
	filter.MinLevel = logutils.LogLevel(req.LogLevel)
	if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
		resp.Error = fmt.Sprintf("Unknown log level: %s", filter.MinLevel)
		goto SEND
	}

	// Check if there is an existing monitor
	if client.logStreamer != nil {
		resp.Error = monitorExists
		goto SEND
	}

	// Create a log streamer
	client.logStreamer = newLogStream(client, filter, seq, i.logger)

	// Register with the log writer. Defer so that we can respond before
	// registration, avoids any possible race condition
	defer i.logWriter.RegisterHandler(client.logStreamer)

SEND:
	return client.Send(&resp, nil)
}
コード例 #2
0
ファイル: command.go プロジェクト: luizbafilho/fusis
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config) (*Config, error) {
	c.Ui.Output("Reloading configuration...")
	var errs error
	newConf := c.readConfig()
	if newConf == nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed to reload configs"))
		return config, errs
	}

	// Change the log level
	minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
	if logger.ValidateLevelFilter(minLevel, c.logFilter) {
		c.logFilter.SetMinLevel(minLevel)
	} else {
		errs = multierror.Append(fmt.Errorf(
			"Invalid log level: %s. Valid log levels are: %v",
			minLevel, c.logFilter.Levels))

		// Keep the current log level
		newConf.LogLevel = config.LogLevel
	}

	// Bulk update the services and checks
	c.agent.PauseSync()
	defer c.agent.ResumeSync()

	// Snapshot the current state, and restore it afterwards
	snap := c.agent.snapshotCheckState()
	defer c.agent.restoreCheckState(snap)

	// First unload all checks and services. This lets us begin the reload
	// with a clean slate.
	if err := c.agent.unloadServices(); err != nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed unloading services: %s", err))
		return nil, errs
	}
	if err := c.agent.unloadChecks(); err != nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed unloading checks: %s", err))
		return nil, errs
	}

	// Reload services and check definitions.
	if err := c.agent.loadServices(newConf); err != nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed reloading services: %s", err))
		return nil, errs
	}
	if err := c.agent.loadChecks(newConf); err != nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed reloading checks: %s", err))
		return nil, errs
	}

	// Get the new client listener addr
	httpAddr, err := newConf.ClientListener(config.Addresses.HTTP, config.Ports.HTTP)
	if err != nil {
		errs = multierror.Append(errs, fmt.Errorf("Failed to determine HTTP address: %v", err))
	}

	// Deregister the old watches
	for _, wp := range config.WatchPlans {
		wp.Stop()
	}

	// Register the new watches
	for _, wp := range newConf.WatchPlans {
		go func(wp *watch.WatchPlan) {
			wp.Handler = makeWatchHandler(c.logOutput, wp.Exempt["handler"])
			wp.LogOutput = c.logOutput
			if err := wp.Run(httpAddr.String()); err != nil {
				errs = multierror.Append(errs, fmt.Errorf("Error running watch: %v", err))
			}
		}(wp)
	}

	// Reload SCADA client if we have a change
	if newConf.AtlasInfrastructure != config.AtlasInfrastructure ||
		newConf.AtlasToken != config.AtlasToken ||
		newConf.AtlasEndpoint != config.AtlasEndpoint {
		if err := c.setupScadaConn(newConf); err != nil {
			errs = multierror.Append(errs, fmt.Errorf("Failed reloading SCADA client: %s", err))
			return nil, errs
		}
	}

	return newConf, errs
}
コード例 #3
0
ファイル: agent_endpoint.go プロジェクト: hashicorp/consul
func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Only GET supported.
	if req.Method != "GET" {
		resp.WriteHeader(405)
		return nil, nil
	}

	// Fetch the ACL token, if any, and enforce agent policy.
	var token string
	s.parseToken(req, &token)
	acl, err := s.agent.resolveToken(token)
	if err != nil {
		return nil, err
	}
	if acl != nil && !acl.AgentRead(s.agent.config.NodeName) {
		return nil, permissionDeniedErr
	}

	// Get the provided loglevel.
	logLevel := req.URL.Query().Get("loglevel")
	if logLevel == "" {
		logLevel = "INFO"
	}

	// Upper case the level since that's required by the filter.
	logLevel = strings.ToUpper(logLevel)

	// Create a level filter and flusher.
	filter := logger.LevelFilter()
	filter.MinLevel = logutils.LogLevel(logLevel)
	if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
		resp.WriteHeader(400)
		resp.Write([]byte(fmt.Sprintf("Unknown log level: %s", filter.MinLevel)))
		return nil, nil
	}
	flusher, ok := resp.(http.Flusher)
	if !ok {
		return nil, fmt.Errorf("Streaming not supported")
	}

	// Set up a log handler.
	handler := &httpLogHandler{
		filter: filter,
		logCh:  make(chan string, 512),
		logger: s.logger,
	}
	s.agent.logWriter.RegisterHandler(handler)
	defer s.agent.logWriter.DeregisterHandler(handler)
	notify := resp.(http.CloseNotifier).CloseNotify()

	// Stream logs until the connection is closed.
	for {
		select {
		case <-notify:
			s.agent.logWriter.DeregisterHandler(handler)
			if handler.droppedCount > 0 {
				s.agent.logger.Printf("[WARN] agent: Dropped %d logs during monitor request", handler.droppedCount)
			}
			return nil, nil
		case log := <-handler.logCh:
			resp.Write([]byte(log + "\n"))
			flusher.Flush()
		}
	}

	return nil, nil
}
コード例 #4
0
ファイル: agent_endpoint.go プロジェクト: luizbafilho/fusis
func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Only GET supported
	if req.Method != "GET" {
		resp.WriteHeader(405)
		return nil, nil
	}

	var args structs.DCSpecificRequest
	args.Datacenter = s.agent.config.Datacenter
	s.parseToken(req, &args.Token)
	// Validate that the given token has operator permissions
	var reply structs.RaftConfigurationResponse
	if err := s.agent.RPC("Operator.RaftGetConfiguration", &args, &reply); err != nil {
		return nil, err
	}

	// Get the provided loglevel
	logLevel := req.URL.Query().Get("loglevel")
	if logLevel == "" {
		logLevel = "INFO"
	}

	// Upper case the log level
	logLevel = strings.ToUpper(logLevel)

	// Create a level filter
	filter := logger.LevelFilter()
	filter.MinLevel = logutils.LogLevel(logLevel)
	if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
		resp.WriteHeader(400)
		resp.Write([]byte(fmt.Sprintf("Unknown log level: %s", filter.MinLevel)))
		return nil, nil
	}

	flusher, ok := resp.(http.Flusher)
	if !ok {
		return nil, fmt.Errorf("Streaming not supported")
	}

	// Set up a log handler
	handler := &httpLogHandler{
		filter: filter,
		logCh:  make(chan string, 512),
		logger: s.logger,
	}
	s.agent.logWriter.RegisterHandler(handler)
	defer s.agent.logWriter.DeregisterHandler(handler)

	notify := resp.(http.CloseNotifier).CloseNotify()

	// Stream logs until the connection is closed
	for {
		select {
		case <-notify:
			s.agent.logWriter.DeregisterHandler(handler)
			if handler.droppedCount > 0 {
				s.agent.logger.Printf("[WARN] agent: Dropped %d logs during monitor request", handler.droppedCount)
			}
			return nil, nil
		case log := <-handler.logCh:
			resp.Write([]byte(log + "\n"))
			flusher.Flush()
		}
	}

	return nil, nil
}