// childEnv creates a map of environment variables for child processes to have // access to configurations in Consul Template's configuration. func (r *Runner) childEnv() []string { var m = make(map[string]string) if config.StringPresent(r.config.Consul.Address) { m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address) } if config.BoolVal(r.config.Consul.Auth.Enabled) { m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String() } m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled)) m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify)) if config.StringPresent(r.config.Vault.Address) { m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address) } if !config.BoolVal(r.config.Vault.SSL.Verify) { m["VAULT_SKIP_VERIFY"] = "true" } if config.StringPresent(r.config.Vault.SSL.Cert) { m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert) } if config.StringPresent(r.config.Vault.SSL.Key) { m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key) } if config.StringPresent(r.config.Vault.SSL.CaPath) { m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath) } if config.StringPresent(r.config.Vault.SSL.CaCert) { m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert) } if config.StringPresent(r.config.Vault.SSL.ServerName) { m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName) } // Append runner-supplied env (this is supplied programatically). for k, v := range r.Env { m[k] = v } e := make([]string, 0, len(m)) for k, v := range m { e = append(e, k+"="+v) } return e }
// newWatcher creates a new watcher. func newWatcher(c *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { log.Printf("[INFO] (runner) creating watcher") w, err := watch.NewWatcher(&watch.NewWatcherInput{ Clients: clients, MaxStale: config.TimeDurationVal(c.MaxStale), Once: once, RenewVault: config.StringPresent(c.Vault.Token) && config.BoolVal(c.Vault.RenewToken), RetryFuncConsul: watch.RetryFunc(c.Consul.Retry.RetryFunc()), // TODO: Add a sane default retry - right now this only affects "local" // dependencies like reading a file from disk. RetryFuncDefault: nil, RetryFuncVault: watch.RetryFunc(c.Vault.Retry.RetryFunc()), }) if err != nil { return nil, errors.Wrap(err, "runner") } return w, nil }
// Start begins the polling for this runner. Any errors that occur will cause // this function to push an item onto the runner's error channel and the halt // execution. This function is blocking and should be called as a goroutine. func (r *Runner) Start() { log.Printf("[INFO] (runner) starting") // Create the pid before doing anything. if err := r.storePid(); err != nil { r.ErrCh <- err return } // Start the de-duplication manager var dedupCh <-chan struct{} if r.dedup != nil { if err := r.dedup.Start(); err != nil { r.ErrCh <- err return } dedupCh = r.dedup.UpdateCh() } // Setup the child process exit channel var childExitCh <-chan int // Fire an initial run to parse all the templates and setup the first-pass // dependencies. This also forces any templates that have no dependencies to // be rendered immediately (since they are already renderable). log.Printf("[DEBUG] (runner) running initial templates") if err := r.Run(); err != nil { r.ErrCh <- err return } for { // Enable quiescence for all templates if we have specified wait // intervals. NEXT_Q: for _, t := range r.templates { if _, ok := r.quiescenceMap[t.ID()]; ok { continue NEXT_Q } for _, c := range r.templateConfigsFor(t) { if *c.Wait.Enabled { log.Printf("[DEBUG] (runner) enabling template-specific quiescence for %q", t.ID()) r.quiescenceMap[t.ID()] = newQuiescence( r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t) continue NEXT_Q } } if *r.config.Wait.Enabled { log.Printf("[DEBUG] (runner) enabling global quiescence for %q", t.ID()) r.quiescenceMap[t.ID()] = newQuiescence( r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t) continue NEXT_Q } } // Warn the user if they are watching too many dependencies. if r.watcher.Size() > saneViewLimit { log.Printf("[WARN] (runner) watching %d dependencies - watching this "+ "many dependencies could DDoS your consul cluster", r.watcher.Size()) } else { log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size()) } if r.allTemplatesRendered() { // If an exec command was given and a command is not currently running, // spawn the child process for supervision. if config.StringPresent(r.config.Exec.Command) { // Lock the child because we are about to check if it exists. r.childLock.Lock() if r.child == nil { env := r.config.Exec.Env.Copy() env.Custom = append(r.childEnv(), env.Custom...) child, err := spawnChild(&spawnChildInput{ Stdin: r.inStream, Stdout: r.outStream, Stderr: r.errStream, Command: config.StringVal(r.config.Exec.Command), Env: env.Env(), ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal), KillSignal: config.SignalVal(r.config.Exec.KillSignal), KillTimeout: config.TimeDurationVal(r.config.Exec.KillTimeout), Splay: config.TimeDurationVal(r.config.Exec.Splay), }) if err != nil { r.ErrCh <- err r.childLock.Unlock() return } r.child = child } // Unlock the child, we are done now. r.childLock.Unlock() // It's possible that we didn't start a process, in which case no // channel is returned. If we did get a new exitCh, that means a child // was spawned, so we need to watch a new exitCh. It is also possible // that during a run, the child process was restarted, which means a // new exit channel should be used. nexitCh := r.child.ExitCh() if nexitCh != nil { childExitCh = nexitCh } } // If we are running in once mode and all our templates are rendered, // then we should exit here. if r.once { log.Printf("[INFO] (runner) once mode and all templates rendered") if r.child != nil { r.stopDedup() r.stopWatcher() log.Printf("[INFO] (runner) waiting for child process to exit") select { case c := <-childExitCh: log.Printf("[INFO] (runner) child process died") r.ErrCh <- NewErrChildDied(c) return case <-r.DoneCh: } } r.Stop() return } } OUTER: select { case view := <-r.watcher.DataCh(): // Receive this update r.Receive(view.Dependency(), view.Data()) // Drain all dependency data. Given a large number of dependencies, it is // feasible that we have data for more than one of them. Instead of // wasting CPU cycles rendering templates when we have more dependencies // waiting to be added to the brain, we drain the entire buffered channel // on the watcher and then reports when it is done receiving new data // which the parent select listens for. // // Please see https://github.com/hashicorp/consul-template/issues/168 for // more information about this optimization and the entire backstory. for { select { case view := <-r.watcher.DataCh(): r.Receive(view.Dependency(), view.Data()) default: break OUTER } } case <-dedupCh: // We may get triggered by the de-duplication manager for either a change // in leadership (acquired or lost lock), or an update of data for a template // that we are watching. log.Printf("[INFO] (runner) watcher triggered by de-duplication manager") break OUTER case err := <-r.watcher.ErrCh(): // Push the error back up the stack log.Printf("[ERR] (runner) watcher reported error: %s", err) r.ErrCh <- err return case tmpl := <-r.quiescenceCh: // Remove the quiescence for this template from the map. This will force // the upcoming Run call to actually evaluate and render the template. log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID()) delete(r.quiescenceMap, tmpl.ID()) case c := <-childExitCh: log.Printf("[INFO] (runner) child process died") r.ErrCh <- NewErrChildDied(c) return case <-r.DoneCh: log.Printf("[INFO] (runner) received finish") return } // If we got this far, that means we got new data or one of the timers // fired, so attempt to re-render. if err := r.Run(); err != nil { r.ErrCh <- err return } } }