// childEnv creates a map of environment variables for child processes to have // access to configurations in Consul Template's configuration. func (r *Runner) childEnv() []string { var m = make(map[string]string) if config.StringPresent(r.config.Consul.Address) { m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address) } if config.BoolVal(r.config.Consul.Auth.Enabled) { m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String() } m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled)) m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify)) if config.StringPresent(r.config.Vault.Address) { m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address) } if !config.BoolVal(r.config.Vault.SSL.Verify) { m["VAULT_SKIP_VERIFY"] = "true" } if config.StringPresent(r.config.Vault.SSL.Cert) { m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert) } if config.StringPresent(r.config.Vault.SSL.Key) { m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key) } if config.StringPresent(r.config.Vault.SSL.CaPath) { m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath) } if config.StringPresent(r.config.Vault.SSL.CaCert) { m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert) } if config.StringPresent(r.config.Vault.SSL.ServerName) { m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName) } // Append runner-supplied env (this is supplied programatically). for k, v := range r.Env { m[k] = v } e := make([]string, 0, len(m)) for k, v := range m { e = append(e, k+"="+v) } return e }
func (cli *CLI) setup(conf *config.Config) (*config.Config, error) { if err := logging.Setup(&logging.Config{ Name: Name, Level: config.StringVal(conf.LogLevel), Syslog: config.BoolVal(conf.Syslog.Enabled), SyslogFacility: config.StringVal(conf.Syslog.Facility), Writer: cli.errStream, }); err != nil { return nil, err } return conf, nil }
// newWatcher creates a new watcher. func newWatcher(c *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { log.Printf("[INFO] (runner) creating watcher") w, err := watch.NewWatcher(&watch.NewWatcherInput{ Clients: clients, MaxStale: config.TimeDurationVal(c.MaxStale), Once: once, RenewVault: config.StringPresent(c.Vault.Token) && config.BoolVal(c.Vault.RenewToken), RetryFuncConsul: watch.RetryFunc(c.Consul.Retry.RetryFunc()), // TODO: Add a sane default retry - right now this only affects "local" // dependencies like reading a file from disk. RetryFuncDefault: nil, RetryFuncVault: watch.RetryFunc(c.Vault.Retry.RetryFunc()), }) if err != nil { return nil, errors.Wrap(err, "runner") } return w, nil }
// newClientSet creates a new client set from the given config. func newClientSet(c *config.Config) (*dep.ClientSet, error) { clients := dep.NewClientSet() if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{ Address: config.StringVal(c.Consul.Address), Token: config.StringVal(c.Consul.Token), AuthEnabled: config.BoolVal(c.Consul.Auth.Enabled), AuthUsername: config.StringVal(c.Consul.Auth.Username), AuthPassword: config.StringVal(c.Consul.Auth.Password), SSLEnabled: config.BoolVal(c.Consul.SSL.Enabled), SSLVerify: config.BoolVal(c.Consul.SSL.Verify), SSLCert: config.StringVal(c.Consul.SSL.Cert), SSLKey: config.StringVal(c.Consul.SSL.Key), SSLCACert: config.StringVal(c.Consul.SSL.CaCert), SSLCAPath: config.StringVal(c.Consul.SSL.CaPath), ServerName: config.StringVal(c.Consul.SSL.ServerName), }); err != nil { return nil, fmt.Errorf("runner: %s", err) } if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{ Address: config.StringVal(c.Vault.Address), Token: config.StringVal(c.Vault.Token), UnwrapToken: config.BoolVal(c.Vault.UnwrapToken), SSLEnabled: config.BoolVal(c.Vault.SSL.Enabled), SSLVerify: config.BoolVal(c.Vault.SSL.Verify), SSLCert: config.StringVal(c.Vault.SSL.Cert), SSLKey: config.StringVal(c.Vault.SSL.Key), SSLCACert: config.StringVal(c.Vault.SSL.CaCert), SSLCAPath: config.StringVal(c.Vault.SSL.CaPath), ServerName: config.StringVal(c.Vault.SSL.ServerName), }); err != nil { return nil, fmt.Errorf("runner: %s", err) } return clients, nil }
// init() creates the Runner's underlying data structures and returns an error // if any problems occur. func (r *Runner) init() error { // Ensure default configuration values r.config = config.DefaultConfig().Merge(r.config) // Print the final config for debugging result, err := json.Marshal(r.config) if err != nil { return err } log.Printf("[DEBUG] (runner) final config: %s", result) // Create the clientset clients, err := newClientSet(r.config) if err != nil { return fmt.Errorf("runner: %s", err) } // Create the watcher watcher, err := newWatcher(r.config, clients, r.once) if err != nil { return fmt.Errorf("runner: %s", err) } r.watcher = watcher numTemplates := len(*r.config.Templates) templates := make([]*template.Template, 0, numTemplates) ctemplatesMap := make(map[string]config.TemplateConfigs) // Iterate over each TemplateConfig, creating a new Template resource for each // entry. Templates are parsed and saved, and a map of templates to their // config templates is kept so templates can lookup their commands and output // destinations. for _, ctmpl := range *r.config.Templates { tmpl, err := template.NewTemplate(&template.NewTemplateInput{ Source: config.StringVal(ctmpl.Source), Contents: config.StringVal(ctmpl.Contents), LeftDelim: config.StringVal(ctmpl.LeftDelim), RightDelim: config.StringVal(ctmpl.RightDelim), }) if err != nil { return err } if _, ok := ctemplatesMap[tmpl.ID()]; !ok { templates = append(templates, tmpl) } if _, ok := ctemplatesMap[tmpl.ID()]; !ok { ctemplatesMap[tmpl.ID()] = make([]*config.TemplateConfig, 0, 1) } ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl) } // Convert the map of templates (which was only used to ensure uniqueness) // back into an array of templates. r.templates = templates r.renderEvents = make(map[string]*RenderEvent, numTemplates) r.dependencies = make(map[string]dep.Dependency) r.renderedCh = make(chan struct{}, 1) r.ctemplatesMap = ctemplatesMap r.inStream = os.Stdin r.outStream = os.Stdout r.errStream = os.Stderr r.brain = template.NewBrain() r.ErrCh = make(chan error) r.DoneCh = make(chan struct{}) r.quiescenceMap = make(map[string]*quiescence) r.quiescenceCh = make(chan *template.Template) // Setup the dedup manager if needed. This is if config.BoolVal(r.config.Dedup.Enabled) { if r.once { log.Printf("[INFO] (runner) disabling de-duplication in once mode") } else { r.dedup, err = NewDedupManager(r.config.Dedup, clients, r.brain, r.templates) if err != nil { return err } } } return nil }
// Run iterates over each template in this Runner and conditionally executes // the template rendering and command execution. // // The template is rendered atomicly. If and only if the template render // completes successfully, the optional commands will be executed, if given. // Please note that all templates are rendered **and then** any commands are // executed. func (r *Runner) Run() error { log.Printf("[INFO] (runner) initiating run") var wouldRenderAny, renderedAny bool var commands []*config.TemplateConfig depsMap := make(map[string]dep.Dependency) for _, tmpl := range r.templates { log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) // Create the event event := &RenderEvent{ Template: tmpl, TemplateConfigs: r.templateConfigsFor(tmpl), } // Check if we are currently the leader instance isLeader := true if r.dedup != nil { isLeader = r.dedup.IsLeader(tmpl) } // If we are in once mode and this template was already rendered, move // onto the next one. We do not want to re-render the template if we are // in once mode, and we certainly do not want to re-run any commands. if r.once { r.renderEventsLock.RLock() _, rendered := r.renderEvents[tmpl.ID()] r.renderEventsLock.RUnlock() if rendered { log.Printf("[DEBUG] (runner) once mode and already rendered") continue } } // Attempt to render the template, returning any missing dependencies and // the rendered contents. If there are any missing dependencies, the // contents cannot be rendered or trusted! result, err := tmpl.Execute(&template.ExecuteInput{ Brain: r.brain, Env: r.childEnv(), }) if err != nil { return errors.Wrap(err, tmpl.Source()) } // Grab the list of used and missing dependencies. missing, used := result.Missing, result.Used // Add the dependency to the list of dependencies for this runner. for _, d := range used.List() { // If we've taken over leadership for a template, we may have data // that is cached, but not have the watcher. We must treat this as // missing so that we create the watcher and re-run the template. if isLeader && !r.watcher.Watching(d) { missing.Add(d) } if _, ok := depsMap[d.String()]; !ok { depsMap[d.String()] = d } } // Diff any missing dependencies the template reported with dependencies // the watcher is watching. unwatched := new(dep.Set) for _, d := range missing.List() { if !r.watcher.Watching(d) { unwatched.Add(d) } } // If there are unwatched dependencies, start the watcher and move onto the // next one. if l := unwatched.Len(); l > 0 { log.Printf("[DEBUG] (runner) was not watching %d dependencies", l) for _, d := range unwatched.List() { // If we are deduplicating, we must still handle non-sharable // dependencies, since those will be ignored. if isLeader || !d.CanShare() { r.watcher.Add(d) } } continue } // If the template is missing data for some dependencies then we are not // ready to render and need to move on to the next one. if l := missing.Len(); l > 0 { log.Printf("[DEBUG] (runner) missing data for %d dependencies", l) continue } // Trigger an update of the de-duplicaiton manager if r.dedup != nil && isLeader { if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil { log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) } } // Update event information with dependencies. event.MissingDeps = missing event.UnwatchedDeps = unwatched event.UsedDeps = used // If quiescence is activated, start/update the timers and loop back around. // We do not want to render the templates yet. if q, ok := r.quiescenceMap[tmpl.ID()]; ok { q.tick() continue } // For each template configuration that is tied to this template, attempt to // render it to disk and accumulate commands for later use. for _, templateConfig := range r.templateConfigsFor(tmpl) { log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display()) // Render the template, taking dry mode into account result, err := Render(&RenderInput{ Backup: config.BoolVal(templateConfig.Backup), Contents: result.Output, Dry: r.dry, DryStream: r.outStream, Path: config.StringVal(templateConfig.Destination), Perms: config.FileModeVal(templateConfig.Perms), }) if err != nil { return errors.Wrap(err, "error rendering "+templateConfig.Display()) } // If we would have rendered this template (but we did not because the // contents were the same or something), we should consider this template // rendered even though the contents on disk have not been updated. We // will not fire commands unless the template was _actually_ rendered to // disk though. if result.WouldRender { // This event would have rendered event.WouldRender = true // Record that at least one template would have been rendered. wouldRenderAny = true } // If we _actually_ rendered the template to disk, we want to run the // appropriate commands. if result.DidRender { log.Printf("[INFO] (runner) rendered %s", templateConfig.Display()) // This event did render event.DidRender = true // Record that at least one template was rendered. renderedAny = true if !r.dry { // If the template was rendered (changed) and we are not in dry-run mode, // aggregate commands, ignoring previously known commands // // Future-self Q&A: Why not use a map for the commands instead of an // array with an expensive lookup option? Well I'm glad you asked that // future-self! One of the API promises is that commands are executed // in the order in which they are provided in the TemplateConfig // definitions. If we inserted commands into a map, we would lose that // relative ordering and people would be unhappy. // if config.StringPresent(ctemplate.Command) if c := config.StringVal(templateConfig.Exec.Command); c != "" { existing := findCommand(templateConfig, commands) if existing != nil { log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)", c, templateConfig.Display(), existing.Display()) } else { log.Printf("[DEBUG] (runner) appending command %q from %s", c, templateConfig.Display()) commands = append(commands, templateConfig) } } } } } // Send updated render event r.renderEventsLock.Lock() event.UpdatedAt = time.Now().UTC() r.renderEvents[tmpl.ID()] = event r.renderEventsLock.Unlock() } // Check if we need to deliver any rendered signals if wouldRenderAny || renderedAny { // Send the signal that a template got rendered select { case r.renderedCh <- struct{}{}: default: } } // Perform the diff and update the known dependencies. r.diffAndUpdateDeps(depsMap) // Execute each command in sequence, collecting any errors that occur - this // ensures all commands execute at least once. var errs []error for _, t := range commands { command := config.StringVal(t.Exec.Command) log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display()) env := t.Exec.Env.Copy() env.Custom = append(r.childEnv(), env.Custom...) if _, err := spawnChild(&spawnChildInput{ Stdin: r.inStream, Stdout: r.outStream, Stderr: r.errStream, Command: command, Env: env.Env(), Timeout: config.TimeDurationVal(t.Exec.Timeout), ReloadSignal: config.SignalVal(t.Exec.ReloadSignal), KillSignal: config.SignalVal(t.Exec.KillSignal), KillTimeout: config.TimeDurationVal(t.Exec.KillTimeout), Splay: config.TimeDurationVal(t.Exec.Splay), }); err != nil { s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display()) errs = append(errs, errors.Wrap(err, s)) } } // If we got this far and have a child process, we need to send the reload // signal to the child process. if renderedAny && r.child != nil { r.childLock.RLock() if err := r.child.Reload(); err != nil { errs = append(errs, err) } r.childLock.RUnlock() } // If any errors were returned, convert them to an ErrorList for human // readability. if len(errs) != 0 { var result *multierror.Error for _, err := range errs { result = multierror.Append(result, err) } return result.ErrorOrNil() } return nil }