Exemple #1
0
// findCommand searches the list of template configs for the given command and
// returns it if it exists.
func findCommand(c *config.TemplateConfig, templates []*config.TemplateConfig) *config.TemplateConfig {
	needle := config.StringVal(c.Exec.Command)
	for _, t := range templates {
		if needle == config.StringVal(t.Exec.Command) {
			return t
		}
	}
	return nil
}
Exemple #2
0
func (cli *CLI) setup(conf *config.Config) (*config.Config, error) {
	if err := logging.Setup(&logging.Config{
		Name:           Name,
		Level:          config.StringVal(conf.LogLevel),
		Syslog:         config.BoolVal(conf.Syslog.Enabled),
		SyslogFacility: config.StringVal(conf.Syslog.Facility),
		Writer:         cli.errStream,
	}); err != nil {
		return nil, err
	}

	return conf, nil
}
Exemple #3
0
// childEnv creates a map of environment variables for child processes to have
// access to configurations in Consul Template's configuration.
func (r *Runner) childEnv() []string {
	var m = make(map[string]string)

	if config.StringPresent(r.config.Consul.Address) {
		m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address)
	}

	if config.BoolVal(r.config.Consul.Auth.Enabled) {
		m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String()
	}

	m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled))
	m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify))

	if config.StringPresent(r.config.Vault.Address) {
		m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address)
	}

	if !config.BoolVal(r.config.Vault.SSL.Verify) {
		m["VAULT_SKIP_VERIFY"] = "true"
	}

	if config.StringPresent(r.config.Vault.SSL.Cert) {
		m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert)
	}

	if config.StringPresent(r.config.Vault.SSL.Key) {
		m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key)
	}

	if config.StringPresent(r.config.Vault.SSL.CaPath) {
		m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath)
	}

	if config.StringPresent(r.config.Vault.SSL.CaCert) {
		m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert)
	}

	if config.StringPresent(r.config.Vault.SSL.ServerName) {
		m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName)
	}

	// Append runner-supplied env (this is supplied programatically).
	for k, v := range r.Env {
		m[k] = v
	}

	e := make([]string, 0, len(m))
	for k, v := range m {
		e = append(e, k+"="+v)
	}
	return e
}
Exemple #4
0
// storePid is used to write out a PID file to disk.
func (r *Runner) storePid() error {
	path := config.StringVal(r.config.PidFile)
	if path == "" {
		return nil
	}

	log.Printf("[INFO] creating pid file at %q", path)

	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
	if err != nil {
		return fmt.Errorf("runner: could not open pid file: %s", err)
	}
	defer f.Close()

	pid := os.Getpid()
	_, err = f.WriteString(fmt.Sprintf("%d", pid))
	if err != nil {
		return fmt.Errorf("runner: could not write to pid file: %s", err)
	}
	return nil
}
Exemple #5
0
// deletePid is used to remove the PID on exit.
func (r *Runner) deletePid() error {
	path := config.StringVal(r.config.PidFile)
	if path == "" {
		return nil
	}

	log.Printf("[DEBUG] removing pid file at %q", path)

	stat, err := os.Stat(path)
	if err != nil {
		return fmt.Errorf("runner: could not remove pid file: %s", err)
	}
	if stat.IsDir() {
		return fmt.Errorf("runner: specified pid file path is directory")
	}

	err = os.Remove(path)
	if err != nil {
		return fmt.Errorf("runner: could not remove pid file: %s", err)
	}
	return nil
}
Exemple #6
0
// init() creates the Runner's underlying data structures and returns an error
// if any problems occur.
func (r *Runner) init() error {
	// Ensure default configuration values
	r.config = config.DefaultConfig().Merge(r.config)

	// Print the final config for debugging
	result, err := json.Marshal(r.config)
	if err != nil {
		return err
	}
	log.Printf("[DEBUG] (runner) final config: %s", result)

	// Create the clientset
	clients, err := newClientSet(r.config)
	if err != nil {
		return fmt.Errorf("runner: %s", err)
	}

	// Create the watcher
	watcher, err := newWatcher(r.config, clients, r.once)
	if err != nil {
		return fmt.Errorf("runner: %s", err)
	}
	r.watcher = watcher

	numTemplates := len(*r.config.Templates)
	templates := make([]*template.Template, 0, numTemplates)
	ctemplatesMap := make(map[string]config.TemplateConfigs)

	// Iterate over each TemplateConfig, creating a new Template resource for each
	// entry. Templates are parsed and saved, and a map of templates to their
	// config templates is kept so templates can lookup their commands and output
	// destinations.
	for _, ctmpl := range *r.config.Templates {
		tmpl, err := template.NewTemplate(&template.NewTemplateInput{
			Source:     config.StringVal(ctmpl.Source),
			Contents:   config.StringVal(ctmpl.Contents),
			LeftDelim:  config.StringVal(ctmpl.LeftDelim),
			RightDelim: config.StringVal(ctmpl.RightDelim),
		})
		if err != nil {
			return err
		}

		if _, ok := ctemplatesMap[tmpl.ID()]; !ok {
			templates = append(templates, tmpl)
		}

		if _, ok := ctemplatesMap[tmpl.ID()]; !ok {
			ctemplatesMap[tmpl.ID()] = make([]*config.TemplateConfig, 0, 1)
		}
		ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl)
	}

	// Convert the map of templates (which was only used to ensure uniqueness)
	// back into an array of templates.
	r.templates = templates

	r.renderEvents = make(map[string]*RenderEvent, numTemplates)
	r.dependencies = make(map[string]dep.Dependency)

	r.renderedCh = make(chan struct{}, 1)

	r.ctemplatesMap = ctemplatesMap
	r.inStream = os.Stdin
	r.outStream = os.Stdout
	r.errStream = os.Stderr
	r.brain = template.NewBrain()

	r.ErrCh = make(chan error)
	r.DoneCh = make(chan struct{})

	r.quiescenceMap = make(map[string]*quiescence)
	r.quiescenceCh = make(chan *template.Template)

	// Setup the dedup manager if needed. This is
	if config.BoolVal(r.config.Dedup.Enabled) {
		if r.once {
			log.Printf("[INFO] (runner) disabling de-duplication in once mode")
		} else {
			r.dedup, err = NewDedupManager(r.config.Dedup, clients, r.brain, r.templates)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
Exemple #7
0
// Run iterates over each template in this Runner and conditionally executes
// the template rendering and command execution.
//
// The template is rendered atomicly. If and only if the template render
// completes successfully, the optional commands will be executed, if given.
// Please note that all templates are rendered **and then** any commands are
// executed.
func (r *Runner) Run() error {
	log.Printf("[INFO] (runner) initiating run")

	var wouldRenderAny, renderedAny bool
	var commands []*config.TemplateConfig
	depsMap := make(map[string]dep.Dependency)

	for _, tmpl := range r.templates {
		log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID())

		// Create the event
		event := &RenderEvent{
			Template:        tmpl,
			TemplateConfigs: r.templateConfigsFor(tmpl),
		}

		// Check if we are currently the leader instance
		isLeader := true
		if r.dedup != nil {
			isLeader = r.dedup.IsLeader(tmpl)
		}

		// If we are in once mode and this template was already rendered, move
		// onto the next one. We do not want to re-render the template if we are
		// in once mode, and we certainly do not want to re-run any commands.
		if r.once {
			r.renderEventsLock.RLock()
			_, rendered := r.renderEvents[tmpl.ID()]
			r.renderEventsLock.RUnlock()
			if rendered {
				log.Printf("[DEBUG] (runner) once mode and already rendered")
				continue
			}
		}

		// Attempt to render the template, returning any missing dependencies and
		// the rendered contents. If there are any missing dependencies, the
		// contents cannot be rendered or trusted!
		result, err := tmpl.Execute(&template.ExecuteInput{
			Brain: r.brain,
			Env:   r.childEnv(),
		})
		if err != nil {
			return errors.Wrap(err, tmpl.Source())
		}

		// Grab the list of used and missing dependencies.
		missing, used := result.Missing, result.Used

		// Add the dependency to the list of dependencies for this runner.
		for _, d := range used.List() {
			// If we've taken over leadership for a template, we may have data
			// that is cached, but not have the watcher. We must treat this as
			// missing so that we create the watcher and re-run the template.
			if isLeader && !r.watcher.Watching(d) {
				missing.Add(d)
			}
			if _, ok := depsMap[d.String()]; !ok {
				depsMap[d.String()] = d
			}
		}

		// Diff any missing dependencies the template reported with dependencies
		// the watcher is watching.
		unwatched := new(dep.Set)
		for _, d := range missing.List() {
			if !r.watcher.Watching(d) {
				unwatched.Add(d)
			}
		}

		// If there are unwatched dependencies, start the watcher and move onto the
		// next one.
		if l := unwatched.Len(); l > 0 {
			log.Printf("[DEBUG] (runner) was not watching %d dependencies", l)
			for _, d := range unwatched.List() {
				// If we are deduplicating, we must still handle non-sharable
				// dependencies, since those will be ignored.
				if isLeader || !d.CanShare() {
					r.watcher.Add(d)
				}
			}
			continue
		}

		// If the template is missing data for some dependencies then we are not
		// ready to render and need to move on to the next one.
		if l := missing.Len(); l > 0 {
			log.Printf("[DEBUG] (runner) missing data for %d dependencies", l)
			continue
		}

		// Trigger an update of the de-duplicaiton manager
		if r.dedup != nil && isLeader {
			if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil {
				log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err)
			}
		}

		// Update event information with dependencies.
		event.MissingDeps = missing
		event.UnwatchedDeps = unwatched
		event.UsedDeps = used

		// If quiescence is activated, start/update the timers and loop back around.
		// We do not want to render the templates yet.
		if q, ok := r.quiescenceMap[tmpl.ID()]; ok {
			q.tick()
			continue
		}

		// For each template configuration that is tied to this template, attempt to
		// render it to disk and accumulate commands for later use.
		for _, templateConfig := range r.templateConfigsFor(tmpl) {
			log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display())

			// Render the template, taking dry mode into account
			result, err := Render(&RenderInput{
				Backup:    config.BoolVal(templateConfig.Backup),
				Contents:  result.Output,
				Dry:       r.dry,
				DryStream: r.outStream,
				Path:      config.StringVal(templateConfig.Destination),
				Perms:     config.FileModeVal(templateConfig.Perms),
			})
			if err != nil {
				return errors.Wrap(err, "error rendering "+templateConfig.Display())
			}

			// If we would have rendered this template (but we did not because the
			// contents were the same or something), we should consider this template
			// rendered even though the contents on disk have not been updated. We
			// will not fire commands unless the template was _actually_ rendered to
			// disk though.
			if result.WouldRender {
				// This event would have rendered
				event.WouldRender = true

				// Record that at least one template would have been rendered.
				wouldRenderAny = true
			}

			// If we _actually_ rendered the template to disk, we want to run the
			// appropriate commands.
			if result.DidRender {
				log.Printf("[INFO] (runner) rendered %s", templateConfig.Display())

				// This event did render
				event.DidRender = true

				// Record that at least one template was rendered.
				renderedAny = true

				if !r.dry {
					// If the template was rendered (changed) and we are not in dry-run mode,
					// aggregate commands, ignoring previously known commands
					//
					// Future-self Q&A: Why not use a map for the commands instead of an
					// array with an expensive lookup option? Well I'm glad you asked that
					// future-self! One of the API promises is that commands are executed
					// in the order in which they are provided in the TemplateConfig
					// definitions. If we inserted commands into a map, we would lose that
					// relative ordering and people would be unhappy.
					// if config.StringPresent(ctemplate.Command)
					if c := config.StringVal(templateConfig.Exec.Command); c != "" {
						existing := findCommand(templateConfig, commands)
						if existing != nil {
							log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)",
								c, templateConfig.Display(), existing.Display())
						} else {
							log.Printf("[DEBUG] (runner) appending command %q from %s",
								c, templateConfig.Display())
							commands = append(commands, templateConfig)
						}
					}
				}
			}
		}

		// Send updated render event
		r.renderEventsLock.Lock()
		event.UpdatedAt = time.Now().UTC()
		r.renderEvents[tmpl.ID()] = event
		r.renderEventsLock.Unlock()
	}

	// Check if we need to deliver any rendered signals
	if wouldRenderAny || renderedAny {
		// Send the signal that a template got rendered
		select {
		case r.renderedCh <- struct{}{}:
		default:
		}
	}

	// Perform the diff and update the known dependencies.
	r.diffAndUpdateDeps(depsMap)

	// Execute each command in sequence, collecting any errors that occur - this
	// ensures all commands execute at least once.
	var errs []error
	for _, t := range commands {
		command := config.StringVal(t.Exec.Command)
		log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display())
		env := t.Exec.Env.Copy()
		env.Custom = append(r.childEnv(), env.Custom...)
		if _, err := spawnChild(&spawnChildInput{
			Stdin:        r.inStream,
			Stdout:       r.outStream,
			Stderr:       r.errStream,
			Command:      command,
			Env:          env.Env(),
			Timeout:      config.TimeDurationVal(t.Exec.Timeout),
			ReloadSignal: config.SignalVal(t.Exec.ReloadSignal),
			KillSignal:   config.SignalVal(t.Exec.KillSignal),
			KillTimeout:  config.TimeDurationVal(t.Exec.KillTimeout),
			Splay:        config.TimeDurationVal(t.Exec.Splay),
		}); err != nil {
			s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display())
			errs = append(errs, errors.Wrap(err, s))
		}
	}

	// If we got this far and have a child process, we need to send the reload
	// signal to the child process.
	if renderedAny && r.child != nil {
		r.childLock.RLock()
		if err := r.child.Reload(); err != nil {
			errs = append(errs, err)
		}
		r.childLock.RUnlock()
	}

	// If any errors were returned, convert them to an ErrorList for human
	// readability.
	if len(errs) != 0 {
		var result *multierror.Error
		for _, err := range errs {
			result = multierror.Append(result, err)
		}
		return result.ErrorOrNil()
	}

	return nil
}
Exemple #8
0
// Start begins the polling for this runner. Any errors that occur will cause
// this function to push an item onto the runner's error channel and the halt
// execution. This function is blocking and should be called as a goroutine.
func (r *Runner) Start() {
	log.Printf("[INFO] (runner) starting")

	// Create the pid before doing anything.
	if err := r.storePid(); err != nil {
		r.ErrCh <- err
		return
	}

	// Start the de-duplication manager
	var dedupCh <-chan struct{}
	if r.dedup != nil {
		if err := r.dedup.Start(); err != nil {
			r.ErrCh <- err
			return
		}
		dedupCh = r.dedup.UpdateCh()
	}

	// Setup the child process exit channel
	var childExitCh <-chan int

	// Fire an initial run to parse all the templates and setup the first-pass
	// dependencies. This also forces any templates that have no dependencies to
	// be rendered immediately (since they are already renderable).
	log.Printf("[DEBUG] (runner) running initial templates")
	if err := r.Run(); err != nil {
		r.ErrCh <- err
		return
	}

	for {
		// Enable quiescence for all templates if we have specified wait
		// intervals.
	NEXT_Q:
		for _, t := range r.templates {
			if _, ok := r.quiescenceMap[t.ID()]; ok {
				continue NEXT_Q
			}

			for _, c := range r.templateConfigsFor(t) {
				if *c.Wait.Enabled {
					log.Printf("[DEBUG] (runner) enabling template-specific quiescence for %q", t.ID())
					r.quiescenceMap[t.ID()] = newQuiescence(
						r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t)
					continue NEXT_Q
				}
			}

			if *r.config.Wait.Enabled {
				log.Printf("[DEBUG] (runner) enabling global quiescence for %q", t.ID())
				r.quiescenceMap[t.ID()] = newQuiescence(
					r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t)
				continue NEXT_Q
			}
		}

		// Warn the user if they are watching too many dependencies.
		if r.watcher.Size() > saneViewLimit {
			log.Printf("[WARN] (runner) watching %d dependencies - watching this "+
				"many dependencies could DDoS your consul cluster", r.watcher.Size())
		} else {
			log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size())
		}

		if r.allTemplatesRendered() {
			// If an exec command was given and a command is not currently running,
			// spawn the child process for supervision.
			if config.StringPresent(r.config.Exec.Command) {
				// Lock the child because we are about to check if it exists.
				r.childLock.Lock()

				if r.child == nil {
					env := r.config.Exec.Env.Copy()
					env.Custom = append(r.childEnv(), env.Custom...)
					child, err := spawnChild(&spawnChildInput{
						Stdin:        r.inStream,
						Stdout:       r.outStream,
						Stderr:       r.errStream,
						Command:      config.StringVal(r.config.Exec.Command),
						Env:          env.Env(),
						ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal),
						KillSignal:   config.SignalVal(r.config.Exec.KillSignal),
						KillTimeout:  config.TimeDurationVal(r.config.Exec.KillTimeout),
						Splay:        config.TimeDurationVal(r.config.Exec.Splay),
					})
					if err != nil {
						r.ErrCh <- err
						r.childLock.Unlock()
						return
					}
					r.child = child
				}

				// Unlock the child, we are done now.
				r.childLock.Unlock()

				// It's possible that we didn't start a process, in which case no
				// channel is returned. If we did get a new exitCh, that means a child
				// was spawned, so we need to watch a new exitCh. It is also possible
				// that during a run, the child process was restarted, which means a
				// new exit channel should be used.
				nexitCh := r.child.ExitCh()
				if nexitCh != nil {
					childExitCh = nexitCh
				}
			}

			// If we are running in once mode and all our templates are rendered,
			// then we should exit here.
			if r.once {
				log.Printf("[INFO] (runner) once mode and all templates rendered")

				if r.child != nil {
					r.stopDedup()
					r.stopWatcher()

					log.Printf("[INFO] (runner) waiting for child process to exit")
					select {
					case c := <-childExitCh:
						log.Printf("[INFO] (runner) child process died")
						r.ErrCh <- NewErrChildDied(c)
						return
					case <-r.DoneCh:
					}
				}

				r.Stop()
				return
			}
		}

	OUTER:
		select {
		case view := <-r.watcher.DataCh():
			// Receive this update
			r.Receive(view.Dependency(), view.Data())

			// Drain all dependency data. Given a large number of dependencies, it is
			// feasible that we have data for more than one of them. Instead of
			// wasting CPU cycles rendering templates when we have more dependencies
			// waiting to be added to the brain, we drain the entire buffered channel
			// on the watcher and then reports when it is done receiving new data
			// which the parent select listens for.
			//
			// Please see https://github.com/hashicorp/consul-template/issues/168 for
			// more information about this optimization and the entire backstory.
			for {
				select {
				case view := <-r.watcher.DataCh():
					r.Receive(view.Dependency(), view.Data())
				default:
					break OUTER
				}
			}

		case <-dedupCh:
			// We may get triggered by the de-duplication manager for either a change
			// in leadership (acquired or lost lock), or an update of data for a template
			// that we are watching.
			log.Printf("[INFO] (runner) watcher triggered by de-duplication manager")
			break OUTER

		case err := <-r.watcher.ErrCh():
			// Push the error back up the stack
			log.Printf("[ERR] (runner) watcher reported error: %s", err)
			r.ErrCh <- err
			return

		case tmpl := <-r.quiescenceCh:
			// Remove the quiescence for this template from the map. This will force
			// the upcoming Run call to actually evaluate and render the template.
			log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID())
			delete(r.quiescenceMap, tmpl.ID())

		case c := <-childExitCh:
			log.Printf("[INFO] (runner) child process died")
			r.ErrCh <- NewErrChildDied(c)
			return

		case <-r.DoneCh:
			log.Printf("[INFO] (runner) received finish")
			return
		}

		// If we got this far, that means we got new data or one of the timers
		// fired, so attempt to re-render.
		if err := r.Run(); err != nil {
			r.ErrCh <- err
			return
		}
	}
}
Exemple #9
0
// newClientSet creates a new client set from the given config.
func newClientSet(c *config.Config) (*dep.ClientSet, error) {
	clients := dep.NewClientSet()

	if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{
		Address:      config.StringVal(c.Consul.Address),
		Token:        config.StringVal(c.Consul.Token),
		AuthEnabled:  config.BoolVal(c.Consul.Auth.Enabled),
		AuthUsername: config.StringVal(c.Consul.Auth.Username),
		AuthPassword: config.StringVal(c.Consul.Auth.Password),
		SSLEnabled:   config.BoolVal(c.Consul.SSL.Enabled),
		SSLVerify:    config.BoolVal(c.Consul.SSL.Verify),
		SSLCert:      config.StringVal(c.Consul.SSL.Cert),
		SSLKey:       config.StringVal(c.Consul.SSL.Key),
		SSLCACert:    config.StringVal(c.Consul.SSL.CaCert),
		SSLCAPath:    config.StringVal(c.Consul.SSL.CaPath),
		ServerName:   config.StringVal(c.Consul.SSL.ServerName),
	}); err != nil {
		return nil, fmt.Errorf("runner: %s", err)
	}

	if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{
		Address:     config.StringVal(c.Vault.Address),
		Token:       config.StringVal(c.Vault.Token),
		UnwrapToken: config.BoolVal(c.Vault.UnwrapToken),
		SSLEnabled:  config.BoolVal(c.Vault.SSL.Enabled),
		SSLVerify:   config.BoolVal(c.Vault.SSL.Verify),
		SSLCert:     config.StringVal(c.Vault.SSL.Cert),
		SSLKey:      config.StringVal(c.Vault.SSL.Key),
		SSLCACert:   config.StringVal(c.Vault.SSL.CaCert),
		SSLCAPath:   config.StringVal(c.Vault.SSL.CaPath),
		ServerName:  config.StringVal(c.Vault.SSL.ServerName),
	}); err != nil {
		return nil, fmt.Errorf("runner: %s", err)
	}

	return clients, nil
}