// treeFunc returns or accumulates keyPrefix dependencies. func treeFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { return func(s string) ([]*dep.KeyPair, error) { result := []*dep.KeyPair{} if len(s) == 0 { return result, nil } d, err := dep.NewKVListQuery(s) if err != nil { return result, err } used.Add(d) // Only return non-empty top-level keys if value, ok := b.Recall(d); ok { for _, pair := range value.([]*dep.KeyPair) { parts := strings.Split(pair.Key, "/") if parts[len(parts)-1] != "" { result = append(result, pair) } } return result, nil } missing.Add(d) return result, nil } }
// envFunc returns a function which checks the value of an environment variable. // Invokers can specify their own environment, which takes precedences over any // real environment variables func envFunc(b *Brain, used, missing *dep.Set, overrides []string) func(string) (string, error) { return func(s string) (string, error) { var result string d, err := dep.NewEnvQuery(s) if err != nil { return result, err } used.Add(d) // Overrides lookup - we have to do this after adding the dependency, // otherwise dedupe sharing won't work. for _, e := range overrides { split := strings.SplitN(e, "=", 2) k, v := split[0], split[1] if k == s { return v, nil } } if value, ok := b.Recall(d); ok { return value.(string), nil } missing.Add(d) return result, nil } }
// keyWithDefaultFunc returns or accumulates key dependencies that have a // default value. func keyWithDefaultFunc(b *Brain, used, missing *dep.Set) func(string, string) (string, error) { return func(s, def string) (string, error) { if len(s) == 0 { return def, nil } d, err := dep.NewKVGetQuery(s) if err != nil { return "", err } used.Add(d) if value, ok := b.Recall(d); ok { if value == nil || value.(string) == "" { return def, nil } return value.(string), nil } missing.Add(d) return def, nil } }
// secretsFunc returns or accumulates a list of secret dependencies from Vault. func secretsFunc(b *Brain, used, missing *dep.Set) func(string) ([]string, error) { return func(s string) ([]string, error) { result := []string{} if len(s) == 0 { return result, nil } d, err := dep.NewVaultListQuery(s) if err != nil { return result, nil } used.Add(d) if value, ok := b.Recall(d); ok { result = value.([]string) return result, nil } missing.Add(d) return result, nil } }
// keyFunc returns or accumulates key dependencies. func keyFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { return func(s string) (string, error) { if len(s) == 0 { return "", nil } d, err := dep.NewKVGetQuery(s) if err != nil { return "", err } d.EnableBlocking() used.Add(d) if value, ok := b.Recall(d); ok { if value == nil { return "", nil } return value.(string), nil } missing.Add(d) return "", nil } }
// fileFunc returns or accumulates file dependencies. func fileFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { return func(s string) (string, error) { if len(s) == 0 { return "", nil } d, err := dep.NewFileQuery(s) if err != nil { return "", err } used.Add(d) if value, ok := b.Recall(d); ok { if value == nil { return "", nil } return value.(string), nil } missing.Add(d) return "", nil } }
// nodeFunc returns or accumulates catalog node dependency. func nodeFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.CatalogNode, error) { return func(s ...string) (*dep.CatalogNode, error) { d, err := dep.NewCatalogNodeQuery(strings.Join(s, "")) if err != nil { return nil, err } used.Add(d) if value, ok := b.Recall(d); ok { return value.(*dep.CatalogNode), nil } missing.Add(d) return nil, nil } }
// servicesFunc returns or accumulates catalog services dependencies. func servicesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.CatalogSnippet, error) { return func(s ...string) ([]*dep.CatalogSnippet, error) { result := []*dep.CatalogSnippet{} d, err := dep.NewCatalogServicesQuery(strings.Join(s, "")) if err != nil { return nil, err } used.Add(d) if value, ok := b.Recall(d); ok { return value.([]*dep.CatalogSnippet), nil } missing.Add(d) return result, nil } }
// datacentersFunc returns or accumulates datacenter dependencies. func datacentersFunc(b *Brain, used, missing *dep.Set) func() ([]string, error) { return func() ([]string, error) { result := []string{} d, err := dep.NewCatalogDatacentersQuery() if err != nil { return result, err } used.Add(d) if value, ok := b.Recall(d); ok { return value.([]string), nil } missing.Add(d) return result, nil } }
// keyExistsFunc returns true if a key exists, false otherwise. func keyExistsFunc(b *Brain, used, missing *dep.Set) func(string) (bool, error) { return func(s string) (bool, error) { if len(s) == 0 { return false, nil } d, err := dep.NewKVGetQuery(s) if err != nil { return false, err } used.Add(d) if value, ok := b.Recall(d); ok { return value != nil, nil } missing.Add(d) return false, nil } }
// serviceFunc returns or accumulates health service dependencies. func serviceFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.HealthService, error) { return func(s ...string) ([]*dep.HealthService, error) { result := []*dep.HealthService{} if len(s) == 0 || s[0] == "" { return result, nil } d, err := dep.NewHealthServiceQuery(strings.Join(s, "")) if err != nil { return nil, err } used.Add(d) if value, ok := b.Recall(d); ok { return value.([]*dep.HealthService), nil } missing.Add(d) return result, nil } }
// Run iterates over each template in this Runner and conditionally executes // the template rendering and command execution. // // The template is rendered atomicly. If and only if the template render // completes successfully, the optional commands will be executed, if given. // Please note that all templates are rendered **and then** any commands are // executed. func (r *Runner) Run() error { log.Printf("[INFO] (runner) initiating run") var wouldRenderAny, renderedAny bool var commands []*config.TemplateConfig depsMap := make(map[string]dep.Dependency) for _, tmpl := range r.templates { log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) // Create the event event := &RenderEvent{ Template: tmpl, TemplateConfigs: r.templateConfigsFor(tmpl), } // Check if we are currently the leader instance isLeader := true if r.dedup != nil { isLeader = r.dedup.IsLeader(tmpl) } // If we are in once mode and this template was already rendered, move // onto the next one. We do not want to re-render the template if we are // in once mode, and we certainly do not want to re-run any commands. if r.once { r.renderEventsLock.RLock() _, rendered := r.renderEvents[tmpl.ID()] r.renderEventsLock.RUnlock() if rendered { log.Printf("[DEBUG] (runner) once mode and already rendered") continue } } // Attempt to render the template, returning any missing dependencies and // the rendered contents. If there are any missing dependencies, the // contents cannot be rendered or trusted! result, err := tmpl.Execute(&template.ExecuteInput{ Brain: r.brain, Env: r.childEnv(), }) if err != nil { return errors.Wrap(err, tmpl.Source()) } // Grab the list of used and missing dependencies. missing, used := result.Missing, result.Used // Add the dependency to the list of dependencies for this runner. for _, d := range used.List() { // If we've taken over leadership for a template, we may have data // that is cached, but not have the watcher. We must treat this as // missing so that we create the watcher and re-run the template. if isLeader && !r.watcher.Watching(d) { missing.Add(d) } if _, ok := depsMap[d.String()]; !ok { depsMap[d.String()] = d } } // Diff any missing dependencies the template reported with dependencies // the watcher is watching. unwatched := new(dep.Set) for _, d := range missing.List() { if !r.watcher.Watching(d) { unwatched.Add(d) } } // If there are unwatched dependencies, start the watcher and move onto the // next one. if l := unwatched.Len(); l > 0 { log.Printf("[DEBUG] (runner) was not watching %d dependencies", l) for _, d := range unwatched.List() { // If we are deduplicating, we must still handle non-sharable // dependencies, since those will be ignored. if isLeader || !d.CanShare() { r.watcher.Add(d) } } continue } // If the template is missing data for some dependencies then we are not // ready to render and need to move on to the next one. if l := missing.Len(); l > 0 { log.Printf("[DEBUG] (runner) missing data for %d dependencies", l) continue } // Trigger an update of the de-duplicaiton manager if r.dedup != nil && isLeader { if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil { log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) } } // Update event information with dependencies. event.MissingDeps = missing event.UnwatchedDeps = unwatched event.UsedDeps = used // If quiescence is activated, start/update the timers and loop back around. // We do not want to render the templates yet. if q, ok := r.quiescenceMap[tmpl.ID()]; ok { q.tick() continue } // For each template configuration that is tied to this template, attempt to // render it to disk and accumulate commands for later use. for _, templateConfig := range r.templateConfigsFor(tmpl) { log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display()) // Render the template, taking dry mode into account result, err := Render(&RenderInput{ Backup: config.BoolVal(templateConfig.Backup), Contents: result.Output, Dry: r.dry, DryStream: r.outStream, Path: config.StringVal(templateConfig.Destination), Perms: config.FileModeVal(templateConfig.Perms), }) if err != nil { return errors.Wrap(err, "error rendering "+templateConfig.Display()) } // If we would have rendered this template (but we did not because the // contents were the same or something), we should consider this template // rendered even though the contents on disk have not been updated. We // will not fire commands unless the template was _actually_ rendered to // disk though. if result.WouldRender { // This event would have rendered event.WouldRender = true // Record that at least one template would have been rendered. wouldRenderAny = true } // If we _actually_ rendered the template to disk, we want to run the // appropriate commands. if result.DidRender { log.Printf("[INFO] (runner) rendered %s", templateConfig.Display()) // This event did render event.DidRender = true // Record that at least one template was rendered. renderedAny = true if !r.dry { // If the template was rendered (changed) and we are not in dry-run mode, // aggregate commands, ignoring previously known commands // // Future-self Q&A: Why not use a map for the commands instead of an // array with an expensive lookup option? Well I'm glad you asked that // future-self! One of the API promises is that commands are executed // in the order in which they are provided in the TemplateConfig // definitions. If we inserted commands into a map, we would lose that // relative ordering and people would be unhappy. // if config.StringPresent(ctemplate.Command) if c := config.StringVal(templateConfig.Exec.Command); c != "" { existing := findCommand(templateConfig, commands) if existing != nil { log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)", c, templateConfig.Display(), existing.Display()) } else { log.Printf("[DEBUG] (runner) appending command %q from %s", c, templateConfig.Display()) commands = append(commands, templateConfig) } } } } } // Send updated render event r.renderEventsLock.Lock() event.UpdatedAt = time.Now().UTC() r.renderEvents[tmpl.ID()] = event r.renderEventsLock.Unlock() } // Check if we need to deliver any rendered signals if wouldRenderAny || renderedAny { // Send the signal that a template got rendered select { case r.renderedCh <- struct{}{}: default: } } // Perform the diff and update the known dependencies. r.diffAndUpdateDeps(depsMap) // Execute each command in sequence, collecting any errors that occur - this // ensures all commands execute at least once. var errs []error for _, t := range commands { command := config.StringVal(t.Exec.Command) log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display()) env := t.Exec.Env.Copy() env.Custom = append(r.childEnv(), env.Custom...) if _, err := spawnChild(&spawnChildInput{ Stdin: r.inStream, Stdout: r.outStream, Stderr: r.errStream, Command: command, Env: env.Env(), Timeout: config.TimeDurationVal(t.Exec.Timeout), ReloadSignal: config.SignalVal(t.Exec.ReloadSignal), KillSignal: config.SignalVal(t.Exec.KillSignal), KillTimeout: config.TimeDurationVal(t.Exec.KillTimeout), Splay: config.TimeDurationVal(t.Exec.Splay), }); err != nil { s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display()) errs = append(errs, errors.Wrap(err, s)) } } // If we got this far and have a child process, we need to send the reload // signal to the child process. if renderedAny && r.child != nil { r.childLock.RLock() if err := r.child.Reload(); err != nil { errs = append(errs, err) } r.childLock.RUnlock() } // If any errors were returned, convert them to an ErrorList for human // readability. if len(errs) != 0 { var result *multierror.Error for _, err := range errs { result = multierror.Append(result, err) } return result.ErrorOrNil() } return nil }