Esempio n. 1
0
// Run iterates over each template in this Runner and conditionally executes
// the template rendering and command execution.
//
// The template is rendered atomicly. If and only if the template render
// completes successfully, the optional commands will be executed, if given.
// Please note that all templates are rendered **and then** any commands are
// executed.
func (r *Runner) Run() error {
	log.Printf("[INFO] (runner) initiating run")

	var wouldRenderAny, renderedAny bool
	var commands []*config.TemplateConfig
	depsMap := make(map[string]dep.Dependency)

	for _, tmpl := range r.templates {
		log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID())

		// Create the event
		event := &RenderEvent{
			Template:        tmpl,
			TemplateConfigs: r.templateConfigsFor(tmpl),
		}

		// Check if we are currently the leader instance
		isLeader := true
		if r.dedup != nil {
			isLeader = r.dedup.IsLeader(tmpl)
		}

		// If we are in once mode and this template was already rendered, move
		// onto the next one. We do not want to re-render the template if we are
		// in once mode, and we certainly do not want to re-run any commands.
		if r.once {
			r.renderEventsLock.RLock()
			_, rendered := r.renderEvents[tmpl.ID()]
			r.renderEventsLock.RUnlock()
			if rendered {
				log.Printf("[DEBUG] (runner) once mode and already rendered")
				continue
			}
		}

		// Attempt to render the template, returning any missing dependencies and
		// the rendered contents. If there are any missing dependencies, the
		// contents cannot be rendered or trusted!
		result, err := tmpl.Execute(&template.ExecuteInput{
			Brain: r.brain,
			Env:   r.childEnv(),
		})
		if err != nil {
			return errors.Wrap(err, tmpl.Source())
		}

		// Grab the list of used and missing dependencies.
		missing, used := result.Missing, result.Used

		// Add the dependency to the list of dependencies for this runner.
		for _, d := range used.List() {
			// If we've taken over leadership for a template, we may have data
			// that is cached, but not have the watcher. We must treat this as
			// missing so that we create the watcher and re-run the template.
			if isLeader && !r.watcher.Watching(d) {
				missing.Add(d)
			}
			if _, ok := depsMap[d.String()]; !ok {
				depsMap[d.String()] = d
			}
		}

		// Diff any missing dependencies the template reported with dependencies
		// the watcher is watching.
		unwatched := new(dep.Set)
		for _, d := range missing.List() {
			if !r.watcher.Watching(d) {
				unwatched.Add(d)
			}
		}

		// If there are unwatched dependencies, start the watcher and move onto the
		// next one.
		if l := unwatched.Len(); l > 0 {
			log.Printf("[DEBUG] (runner) was not watching %d dependencies", l)
			for _, d := range unwatched.List() {
				// If we are deduplicating, we must still handle non-sharable
				// dependencies, since those will be ignored.
				if isLeader || !d.CanShare() {
					r.watcher.Add(d)
				}
			}
			continue
		}

		// If the template is missing data for some dependencies then we are not
		// ready to render and need to move on to the next one.
		if l := missing.Len(); l > 0 {
			log.Printf("[DEBUG] (runner) missing data for %d dependencies", l)
			continue
		}

		// Trigger an update of the de-duplicaiton manager
		if r.dedup != nil && isLeader {
			if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil {
				log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err)
			}
		}

		// Update event information with dependencies.
		event.MissingDeps = missing
		event.UnwatchedDeps = unwatched
		event.UsedDeps = used

		// If quiescence is activated, start/update the timers and loop back around.
		// We do not want to render the templates yet.
		if q, ok := r.quiescenceMap[tmpl.ID()]; ok {
			q.tick()
			continue
		}

		// For each template configuration that is tied to this template, attempt to
		// render it to disk and accumulate commands for later use.
		for _, templateConfig := range r.templateConfigsFor(tmpl) {
			log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display())

			// Render the template, taking dry mode into account
			result, err := Render(&RenderInput{
				Backup:    config.BoolVal(templateConfig.Backup),
				Contents:  result.Output,
				Dry:       r.dry,
				DryStream: r.outStream,
				Path:      config.StringVal(templateConfig.Destination),
				Perms:     config.FileModeVal(templateConfig.Perms),
			})
			if err != nil {
				return errors.Wrap(err, "error rendering "+templateConfig.Display())
			}

			// If we would have rendered this template (but we did not because the
			// contents were the same or something), we should consider this template
			// rendered even though the contents on disk have not been updated. We
			// will not fire commands unless the template was _actually_ rendered to
			// disk though.
			if result.WouldRender {
				// This event would have rendered
				event.WouldRender = true

				// Record that at least one template would have been rendered.
				wouldRenderAny = true
			}

			// If we _actually_ rendered the template to disk, we want to run the
			// appropriate commands.
			if result.DidRender {
				log.Printf("[INFO] (runner) rendered %s", templateConfig.Display())

				// This event did render
				event.DidRender = true

				// Record that at least one template was rendered.
				renderedAny = true

				if !r.dry {
					// If the template was rendered (changed) and we are not in dry-run mode,
					// aggregate commands, ignoring previously known commands
					//
					// Future-self Q&A: Why not use a map for the commands instead of an
					// array with an expensive lookup option? Well I'm glad you asked that
					// future-self! One of the API promises is that commands are executed
					// in the order in which they are provided in the TemplateConfig
					// definitions. If we inserted commands into a map, we would lose that
					// relative ordering and people would be unhappy.
					// if config.StringPresent(ctemplate.Command)
					if c := config.StringVal(templateConfig.Exec.Command); c != "" {
						existing := findCommand(templateConfig, commands)
						if existing != nil {
							log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)",
								c, templateConfig.Display(), existing.Display())
						} else {
							log.Printf("[DEBUG] (runner) appending command %q from %s",
								c, templateConfig.Display())
							commands = append(commands, templateConfig)
						}
					}
				}
			}
		}

		// Send updated render event
		r.renderEventsLock.Lock()
		event.UpdatedAt = time.Now().UTC()
		r.renderEvents[tmpl.ID()] = event
		r.renderEventsLock.Unlock()
	}

	// Check if we need to deliver any rendered signals
	if wouldRenderAny || renderedAny {
		// Send the signal that a template got rendered
		select {
		case r.renderedCh <- struct{}{}:
		default:
		}
	}

	// Perform the diff and update the known dependencies.
	r.diffAndUpdateDeps(depsMap)

	// Execute each command in sequence, collecting any errors that occur - this
	// ensures all commands execute at least once.
	var errs []error
	for _, t := range commands {
		command := config.StringVal(t.Exec.Command)
		log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display())
		env := t.Exec.Env.Copy()
		env.Custom = append(r.childEnv(), env.Custom...)
		if _, err := spawnChild(&spawnChildInput{
			Stdin:        r.inStream,
			Stdout:       r.outStream,
			Stderr:       r.errStream,
			Command:      command,
			Env:          env.Env(),
			Timeout:      config.TimeDurationVal(t.Exec.Timeout),
			ReloadSignal: config.SignalVal(t.Exec.ReloadSignal),
			KillSignal:   config.SignalVal(t.Exec.KillSignal),
			KillTimeout:  config.TimeDurationVal(t.Exec.KillTimeout),
			Splay:        config.TimeDurationVal(t.Exec.Splay),
		}); err != nil {
			s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display())
			errs = append(errs, errors.Wrap(err, s))
		}
	}

	// If we got this far and have a child process, we need to send the reload
	// signal to the child process.
	if renderedAny && r.child != nil {
		r.childLock.RLock()
		if err := r.child.Reload(); err != nil {
			errs = append(errs, err)
		}
		r.childLock.RUnlock()
	}

	// If any errors were returned, convert them to an ErrorList for human
	// readability.
	if len(errs) != 0 {
		var result *multierror.Error
		for _, err := range errs {
			result = multierror.Append(result, err)
		}
		return result.ErrorOrNil()
	}

	return nil
}
Esempio n. 2
0
// Start begins the polling for this runner. Any errors that occur will cause
// this function to push an item onto the runner's error channel and the halt
// execution. This function is blocking and should be called as a goroutine.
func (r *Runner) Start() {
	log.Printf("[INFO] (runner) starting")

	// Create the pid before doing anything.
	if err := r.storePid(); err != nil {
		r.ErrCh <- err
		return
	}

	// Start the de-duplication manager
	var dedupCh <-chan struct{}
	if r.dedup != nil {
		if err := r.dedup.Start(); err != nil {
			r.ErrCh <- err
			return
		}
		dedupCh = r.dedup.UpdateCh()
	}

	// Setup the child process exit channel
	var childExitCh <-chan int

	// Fire an initial run to parse all the templates and setup the first-pass
	// dependencies. This also forces any templates that have no dependencies to
	// be rendered immediately (since they are already renderable).
	log.Printf("[DEBUG] (runner) running initial templates")
	if err := r.Run(); err != nil {
		r.ErrCh <- err
		return
	}

	for {
		// Enable quiescence for all templates if we have specified wait
		// intervals.
	NEXT_Q:
		for _, t := range r.templates {
			if _, ok := r.quiescenceMap[t.ID()]; ok {
				continue NEXT_Q
			}

			for _, c := range r.templateConfigsFor(t) {
				if *c.Wait.Enabled {
					log.Printf("[DEBUG] (runner) enabling template-specific quiescence for %q", t.ID())
					r.quiescenceMap[t.ID()] = newQuiescence(
						r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t)
					continue NEXT_Q
				}
			}

			if *r.config.Wait.Enabled {
				log.Printf("[DEBUG] (runner) enabling global quiescence for %q", t.ID())
				r.quiescenceMap[t.ID()] = newQuiescence(
					r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t)
				continue NEXT_Q
			}
		}

		// Warn the user if they are watching too many dependencies.
		if r.watcher.Size() > saneViewLimit {
			log.Printf("[WARN] (runner) watching %d dependencies - watching this "+
				"many dependencies could DDoS your consul cluster", r.watcher.Size())
		} else {
			log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size())
		}

		if r.allTemplatesRendered() {
			// If an exec command was given and a command is not currently running,
			// spawn the child process for supervision.
			if config.StringPresent(r.config.Exec.Command) {
				// Lock the child because we are about to check if it exists.
				r.childLock.Lock()

				if r.child == nil {
					env := r.config.Exec.Env.Copy()
					env.Custom = append(r.childEnv(), env.Custom...)
					child, err := spawnChild(&spawnChildInput{
						Stdin:        r.inStream,
						Stdout:       r.outStream,
						Stderr:       r.errStream,
						Command:      config.StringVal(r.config.Exec.Command),
						Env:          env.Env(),
						ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal),
						KillSignal:   config.SignalVal(r.config.Exec.KillSignal),
						KillTimeout:  config.TimeDurationVal(r.config.Exec.KillTimeout),
						Splay:        config.TimeDurationVal(r.config.Exec.Splay),
					})
					if err != nil {
						r.ErrCh <- err
						r.childLock.Unlock()
						return
					}
					r.child = child
				}

				// Unlock the child, we are done now.
				r.childLock.Unlock()

				// It's possible that we didn't start a process, in which case no
				// channel is returned. If we did get a new exitCh, that means a child
				// was spawned, so we need to watch a new exitCh. It is also possible
				// that during a run, the child process was restarted, which means a
				// new exit channel should be used.
				nexitCh := r.child.ExitCh()
				if nexitCh != nil {
					childExitCh = nexitCh
				}
			}

			// If we are running in once mode and all our templates are rendered,
			// then we should exit here.
			if r.once {
				log.Printf("[INFO] (runner) once mode and all templates rendered")

				if r.child != nil {
					r.stopDedup()
					r.stopWatcher()

					log.Printf("[INFO] (runner) waiting for child process to exit")
					select {
					case c := <-childExitCh:
						log.Printf("[INFO] (runner) child process died")
						r.ErrCh <- NewErrChildDied(c)
						return
					case <-r.DoneCh:
					}
				}

				r.Stop()
				return
			}
		}

	OUTER:
		select {
		case view := <-r.watcher.DataCh():
			// Receive this update
			r.Receive(view.Dependency(), view.Data())

			// Drain all dependency data. Given a large number of dependencies, it is
			// feasible that we have data for more than one of them. Instead of
			// wasting CPU cycles rendering templates when we have more dependencies
			// waiting to be added to the brain, we drain the entire buffered channel
			// on the watcher and then reports when it is done receiving new data
			// which the parent select listens for.
			//
			// Please see https://github.com/hashicorp/consul-template/issues/168 for
			// more information about this optimization and the entire backstory.
			for {
				select {
				case view := <-r.watcher.DataCh():
					r.Receive(view.Dependency(), view.Data())
				default:
					break OUTER
				}
			}

		case <-dedupCh:
			// We may get triggered by the de-duplication manager for either a change
			// in leadership (acquired or lost lock), or an update of data for a template
			// that we are watching.
			log.Printf("[INFO] (runner) watcher triggered by de-duplication manager")
			break OUTER

		case err := <-r.watcher.ErrCh():
			// Push the error back up the stack
			log.Printf("[ERR] (runner) watcher reported error: %s", err)
			r.ErrCh <- err
			return

		case tmpl := <-r.quiescenceCh:
			// Remove the quiescence for this template from the map. This will force
			// the upcoming Run call to actually evaluate and render the template.
			log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID())
			delete(r.quiescenceMap, tmpl.ID())

		case c := <-childExitCh:
			log.Printf("[INFO] (runner) child process died")
			r.ErrCh <- NewErrChildDied(c)
			return

		case <-r.DoneCh:
			log.Printf("[INFO] (runner) received finish")
			return
		}

		// If we got this far, that means we got new data or one of the timers
		// fired, so attempt to re-render.
		if err := r.Run(); err != nil {
			r.ErrCh <- err
			return
		}
	}
}