func deleteSecurityGroupRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, ors *schema.Set) error { var errs *multierror.Error var wg sync.WaitGroup wg.Add(ors.Len()) sem := make(chan struct{}, d.Get("parallelism").(int)) for _, rule := range ors.List() { // Put a sleep here to avoid DoS'ing the API time.Sleep(500 * time.Millisecond) go func(rule map[string]interface{}) { defer wg.Done() sem <- struct{}{} // Create a single rule err := deleteSecurityGroupRule(d, meta, rule) if err != nil { errs = multierror.Append(errs, err) } // If we have at least one UUID, we need to save the rule if len(rule["uuids"].(map[string]interface{})) > 0 { rules.Add(rule) } <-sem }(rule.(map[string]interface{})) } wg.Wait() return errs.ErrorOrNil() }
func (auth *GitHubAuthFlag) Validate() error { var errs *multierror.Error if auth.ClientID == "" || auth.ClientSecret == "" { errs = multierror.Append( errs, errors.New("must specify --github-auth-client-id and --github-auth-client-secret to use GitHub OAuth."), ) } if len(auth.Organizations) == 0 && len(auth.Teams) == 0 && len(auth.Users) == 0 { errs = multierror.Append( errs, errors.New("at least one of the following is required for github-auth: organizations, teams, users."), ) } return errs.ErrorOrNil() }
func (auth *BasicAuthFlag) Validate() error { var errs *multierror.Error if auth.Username == "" { errs = multierror.Append( errs, errors.New("must specify --basic-auth-username to use basic auth."), ) } if auth.Password == "" { errs = multierror.Append( errs, errors.New("must specify --basic-auth-password to use basic auth."), ) } return errs.ErrorOrNil() }
// validateRole contacts Vault and checks that the given Vault role is valid for // the purposes of being used by Nomad func (v *vaultClient) validateRole(role string) error { if role == "" { return fmt.Errorf("Invalid empty role name") } // Validate the role rsecret, err := v.client.Logical().Read(fmt.Sprintf("auth/token/roles/%s", role)) if err != nil { return fmt.Errorf("failed to lookup role %q: %v", role, err) } // Read and parse the fields var data struct { ExplicitMaxTtl int `mapstructure:"explicit_max_ttl"` Orphan bool Period int Renewable bool } if err := mapstructure.WeakDecode(rsecret.Data, &data); err != nil { return fmt.Errorf("failed to parse Vault role's data block: %v", err) } // Validate the role is acceptable var mErr multierror.Error if data.Orphan { multierror.Append(&mErr, fmt.Errorf("Role must not allow orphans")) } if !data.Renewable { multierror.Append(&mErr, fmt.Errorf("Role must allow tokens to be renewed")) } if data.ExplicitMaxTtl != 0 { multierror.Append(&mErr, fmt.Errorf("Role can not use an explicit max ttl. Token must be periodic.")) } if data.Period == 0 { multierror.Append(&mErr, fmt.Errorf("Role must have a non-zero period to make tokens periodic.")) } return mErr.ErrorOrNil() }
func (auth *GenericOAuthFlag) Validate() error { var errs *multierror.Error if auth.ClientID == "" || auth.ClientSecret == "" { errs = multierror.Append( errs, errors.New("must specify --generic-oauth-client-id and --generic-oauth-client-secret to use Generic OAuth."), ) } if auth.AuthURL == "" || auth.TokenURL == "" { errs = multierror.Append( errs, errors.New("must specify --generic-oauth-auth-url and --generic-oauth-token-url to use Generic OAuth."), ) } if auth.DisplayName == "" { errs = multierror.Append( errs, errors.New("must specify --generic-oauth-display-name to use Generic OAuth."), ) } return errs.ErrorOrNil() }
func (auth *UAAAuthFlag) Validate() error { var errs *multierror.Error if auth.ClientID == "" || auth.ClientSecret == "" { errs = multierror.Append( errs, errors.New("must specify --uaa-auth-client-id and --uaa-auth-client-secret to use UAA OAuth."), ) } if len(auth.CFSpaces) == 0 { errs = multierror.Append( errs, errors.New("must specify --uaa-auth-cf-space to use UAA OAuth."), ) } if auth.AuthURL == "" || auth.TokenURL == "" || auth.CFURL == "" { errs = multierror.Append( errs, errors.New("must specify --uaa-auth-auth-url, --uaa-auth-token-url and --uaa-auth-cf-url to use UAA OAuth."), ) } return errs.ErrorOrNil() }
// run is the long lived loop that handles errors and templates being rendered func (tm *TaskTemplateManager) run() { // Runner is nil if there is no templates if tm.runner == nil { // Unblock the start if there is nothing to do tm.hook.UnblockStart("consul-template") return } // Start the runner go tm.runner.Start() // Track when they have all been rendered so we don't signal the task for // any render event before hand var allRenderedTime time.Time // Handle the first rendering // Wait till all the templates have been rendered WAIT: for { select { case <-tm.shutdownCh: return case err, ok := <-tm.runner.ErrCh: if !ok { continue } tm.hook.Kill("consul-template", err.Error(), true) case <-tm.runner.TemplateRenderedCh(): // A template has been rendered, figure out what to do events := tm.runner.RenderEvents() // Not all templates have been rendered yet if len(events) < len(tm.lookup) { continue } for _, event := range events { // This template hasn't been rendered if event.LastWouldRender.IsZero() { continue WAIT } } break WAIT } } allRenderedTime = time.Now() tm.hook.UnblockStart("consul-template") // If all our templates are change mode no-op, then we can exit here if tm.allTemplatesNoop() { return } // A lookup for the last time the template was handled numTemplates := len(tm.templates) handledRenders := make(map[string]time.Time, numTemplates) for { select { case <-tm.shutdownCh: return case err, ok := <-tm.runner.ErrCh: if !ok { continue } tm.hook.Kill("consul-template", err.Error(), true) case <-tm.runner.TemplateRenderedCh(): // A template has been rendered, figure out what to do var handling []string signals := make(map[string]struct{}) restart := false var splay time.Duration events := tm.runner.RenderEvents() for id, event := range events { // First time through if allRenderedTime.After(event.LastDidRender) || allRenderedTime.Equal(event.LastDidRender) { handledRenders[id] = allRenderedTime continue } // We have already handled this one if htime := handledRenders[id]; htime.After(event.LastDidRender) || htime.Equal(event.LastDidRender) { continue } // Lookup the template and determine what to do tmpls, ok := tm.lookup[id] if !ok { tm.hook.Kill("consul-template", fmt.Sprintf("consul-template runner returned unknown template id %q", id), true) return } for _, tmpl := range tmpls { switch tmpl.ChangeMode { case structs.TemplateChangeModeSignal: signals[tmpl.ChangeSignal] = struct{}{} case structs.TemplateChangeModeRestart: restart = true case structs.TemplateChangeModeNoop: continue } if tmpl.Splay > splay { splay = tmpl.Splay } } handling = append(handling, id) } if restart || len(signals) != 0 { if splay != 0 { select { case <-time.After(time.Duration(splay)): case <-tm.shutdownCh: return } } // Update handle time for _, id := range handling { handledRenders[id] = events[id].LastDidRender } if restart { tm.hook.Restart("consul-template", "template with change_mode restart re-rendered") } else if len(signals) != 0 { var mErr multierror.Error for signal := range signals { err := tm.hook.Signal("consul-template", "template re-rendered", tm.signals[signal]) if err != nil { multierror.Append(&mErr, err) } } if err := mErr.ErrorOrNil(); err != nil { flat := make([]os.Signal, 0, len(signals)) for signal := range signals { flat = append(flat, tm.signals[signal]) } tm.hook.Kill("consul-template", fmt.Sprintf("Sending signals %v failed: %v", flat, err), true) } } } } } }
// parseSelfToken looks up the Vault token in Vault and parses its data storing // it in the client. If the token is not valid for Nomads purposes an error is // returned. func (v *vaultClient) parseSelfToken() error { // Get the initial lease duration auth := v.client.Auth().Token() self, err := auth.LookupSelf() if err != nil { return fmt.Errorf("failed to lookup Vault periodic token: %v", err) } // Read and parse the fields var data tokenData if err := mapstructure.WeakDecode(self.Data, &data); err != nil { return fmt.Errorf("failed to parse Vault token's data block: %v", err) } root := false for _, p := range data.Policies { if p == "root" { root = true break } } var mErr multierror.Error if !root { // All non-root tokens must be renewable if !data.Renewable { multierror.Append(&mErr, fmt.Errorf("Vault token is not renewable or root")) } // All non-root tokens must have a lease duration if data.CreationTTL == 0 { multierror.Append(&mErr, fmt.Errorf("invalid lease duration of zero")) } // The lease duration can not be expired if data.TTL == 0 { multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) } // There must be a valid role since we aren't root if data.Role == "" { multierror.Append(&mErr, fmt.Errorf("token role name must be set when not using a root token")) } } else if data.CreationTTL != 0 { // If the root token has a TTL it must be renewable if !data.Renewable { multierror.Append(&mErr, fmt.Errorf("Vault token has a TTL but is not renewable")) } else if data.TTL == 0 { // If the token has a TTL make sure it has not expired multierror.Append(&mErr, fmt.Errorf("token TTL is zero")) } } // If given a role validate it if data.Role != "" { if err := v.validateRole(data.Role); err != nil { multierror.Append(&mErr, err) } } data.Root = root v.tokenData = &data return mErr.ErrorOrNil() }
func createSecurityGroupRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, nrs *schema.Set) error { cs := meta.(*cloudstack.CloudStackClient) var errs *multierror.Error var wg sync.WaitGroup wg.Add(nrs.Len()) sem := make(chan struct{}, d.Get("parallelism").(int)) for _, rule := range nrs.List() { // Put in a tiny sleep here to avoid DoS'ing the API time.Sleep(500 * time.Millisecond) go func(rule map[string]interface{}) { defer wg.Done() sem <- struct{}{} // Make sure all required parameters are there if err := verifySecurityGroupRuleParams(d, rule); err != nil { errs = multierror.Append(errs, err) return } var p authorizeSecurityGroupParams if cidrList, ok := rule["cidr_list"].(*schema.Set); ok && cidrList.Len() > 0 { for _, cidr := range cidrList.List() { // Create a new parameter struct switch rule["traffic_type"].(string) { case "ingress": p = cs.SecurityGroup.NewAuthorizeSecurityGroupIngressParams() case "egress": p = cs.SecurityGroup.NewAuthorizeSecurityGroupEgressParams() } p.SetSecuritygroupid(d.Id()) p.SetCidrlist([]string{cidr.(string)}) // Create a single rule err := createSecurityGroupRule(d, meta, rule, p, cidr.(string)) if err != nil { errs = multierror.Append(errs, err) } } } if usgList, ok := rule["user_security_group_list"].(*schema.Set); ok && usgList.Len() > 0 { for _, usg := range usgList.List() { sg, _, err := cs.SecurityGroup.GetSecurityGroupByName( usg.(string), cloudstack.WithProject(d.Get("project").(string)), ) if err != nil { errs = multierror.Append(errs, err) continue } // Create a new parameter struct switch rule["traffic_type"].(string) { case "ingress": p = cs.SecurityGroup.NewAuthorizeSecurityGroupIngressParams() case "egress": p = cs.SecurityGroup.NewAuthorizeSecurityGroupEgressParams() } p.SetSecuritygroupid(d.Id()) p.SetUsersecuritygrouplist(map[string]string{sg.Account: usg.(string)}) // Create a single rule err = createSecurityGroupRule(d, meta, rule, p, usg.(string)) if err != nil { errs = multierror.Append(errs, err) } } } // If we have at least one UUID, we need to save the rule if len(rule["uuids"].(map[string]interface{})) > 0 { rules.Add(rule) } <-sem }(rule.(map[string]interface{})) } wg.Wait() return errs.ErrorOrNil() }