Esempio n. 1
0
func (c *Controller) goWithRetry(f func() error, errChan chan error, wg *sync.WaitGroup) {
	wg.Add(1)
	go func() {
		defer wg.Done()
		bo := backoff.NewExponentialBackOff()
		bo.InitialInterval = time.Millisecond * 250
		bo.MaxInterval = time.Second * 1
		bo.MaxElapsedTime = time.Minute * 2 // channel message can take some time

		ticker := backoff.NewTicker(bo)
		defer ticker.Stop()

		var err error
		for range ticker.C {
			if err = f(); err != nil {
				c.log.Error("err while operating: %s  will retry...", err.Error())
				continue
			}

			break
		}

		errChan <- err
	}()
}
Esempio n. 2
0
func (l *LifeCycle) newProcessFunc(callback func([]*ec2.Instance) error) func(body *string) error {
	return func(body *string) error {
		l.log.Debug("got event %s", *body)

		ticker := backoff.NewTicker(backoff.NewExponentialBackOff())

		var res []*ec2.Instance
		var err error

		for _ = range ticker.C {
			if res, err = l.GetAutoScalingOperatingMachines(); err != nil {
				l.log.Error("Getting autoscaling operating IPs failed, will retry... err: %s", err.Error())
				continue
			}

			if err = callback(res); err != nil {
				l.log.Error("Upserting records failed, will retry... err: %s", err.Error())
				continue

			}

			ticker.Stop()
			break
		}
		return err
	}
}
Esempio n. 3
0
func backOffTick(initial, max time.Duration) *backoff.Ticker {
	b := backoff.NewExponentialBackOff()
	b.InitialInterval = initial
	b.MaxInterval = max
	b.MaxElapsedTime = 0 // infinite
	return backoff.NewTicker(b)
}
Esempio n. 4
0
func (b *Bongo) Connect() error {

	bo := backoff.NewExponentialBackOff()
	ticker := backoff.NewTicker(bo)
	defer ticker.Stop()

	var err error
	for _ = range ticker.C {
		if err = b.Broker.Connect(); err != nil {
			b.log.Error("err while connecting: %s  will retry...", err.Error())
			continue
		}

		break
	}

	if err != nil {
		return err
	}

	B = b

	b.log.Info("Bongo connected %t", true)
	// todo add gorm Connect()
	return nil
}
Esempio n. 5
0
func newTicker() *backoff.Ticker {
	b := backoff.NewExponentialBackOff()
	b.InitialInterval = 100 * time.Millisecond
	b.MaxInterval = 5 * time.Second
	b.MaxElapsedTime = 15 * time.Second

	return backoff.NewTicker(b)
}
Esempio n. 6
0
// Exec implements the Stage interface.
func (r RetryStage) Exec(ctx context.Context, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
	var (
		i    = 0
		b    = backoff.NewExponentialBackOff()
		tick = backoff.NewTicker(b)
		iErr error
	)
	defer tick.Stop()

	for {
		i++
		// Always check the context first to not notify again.
		select {
		case <-ctx.Done():
			if iErr != nil {
				return ctx, nil, iErr
			}

			return ctx, nil, ctx.Err()
		default:
		}

		select {
		case <-tick.C:
			if retry, err := r.integration.Notify(ctx, alerts...); err != nil {
				numFailedNotifications.WithLabelValues(r.integration.name).Inc()
				log.Debugf("Notify attempt %d failed: %s", i, err)
				if !retry {
					return ctx, alerts, fmt.Errorf("Cancelling notify retry due to unrecoverable error: %s", err)
				}

				// Save this error to be able to return the last seen error by an
				// integration upon context timeout.
				iErr = err
			} else {
				numNotifications.WithLabelValues(r.integration.name).Inc()
				return ctx, alerts, nil
			}
		case <-ctx.Done():
			if iErr != nil {
				return ctx, nil, iErr
			}

			return ctx, nil, ctx.Err()
		}
	}
}
// ExecWithExponentialBackoff executes an operation with an exponential backoff retry
// and returns the operation's error
func (c *Context) ExecWithExponentialBackoff(op backoff.Operation) error {
	var err error

	c.backOff.Reset()
	ticker := backoff.NewTicker(c.backOff)

	for range ticker.C {
		if err = op(); err != nil {
			continue
		}

		ticker.Stop()
		break
	}

	return err
}
Esempio n. 8
0
// MustPing pings a database with an exponential backoff. The
// function panics if the database cannot be pinged after 15 minutes
func MustPing(db *sql.DB) {
	var err error
	b := backoff.NewExponentialBackOff()
	ticker := backoff.NewTicker(b)

	// Ticks will continue to arrive when the previous operation is still running,
	// so operations that take a while to fail could run in quick succession.
	for _ = range ticker.C {
		if err = db.Ping(); err != nil {
			logger.Info("pinging database...", err.Error())
			continue
		}

		ticker.Stop()
		return
	}

	panic("Could not ping database!")
}
Esempio n. 9
0
func Save(d *schemas.Developer) error {
	if d.Salt == "" {
		d.Salt = uuid.New()
		d.Password = util.HashPassword(d.Password, d.Salt)
	}

	var err error
	b := backoff.NewTicker(backoff.NewExponentialBackOff()).C

	for _ = range b {
		if err = devs.Insert(d); err != nil {
			continue
		}

		break
	}

	return err
}
Esempio n. 10
0
func (p *PubNub) grantAccess(s *pubnub.AuthSettings) error {
	bo := backoff.NewExponentialBackOff()
	bo.MaxElapsedTime = MaxRetryDuration
	ticker := backoff.NewTicker(bo)

	var err error
	tryCount := 0
	for range ticker.C {
		if err = p.grant.Grant(s); err != nil {
			tryCount++
			p.log.Error("Could not grant access: %s  will retry... (%d time(s))", err, tryCount)
			continue
		}

		ticker.Stop()
	}

	return err
}
Esempio n. 11
0
func (p *PubNub) publish(c ChannelManager, message interface{}) error {

	bo := backoff.NewExponentialBackOff()
	bo.MaxElapsedTime = MaxRetryDuration
	ticker := backoff.NewTicker(bo)
	defer ticker.Stop()

	var err error
	tryCount := 0
	for range ticker.C {
		if err = p.pub.Push(c.PrepareName(), message); err != nil {
			tryCount++
			p.log.Error("Could not publish message: %s  will retry... (%d time(s))", err, tryCount)

			continue
		}

		ticker.Stop()
	}

	return err
}
Esempio n. 12
0
// Notify calls the underlying notifier with exponential backoff until it succeeds.
// It aborts if the context is canceled or timed out.
func (n *RetryNotifier) Notify(ctx context.Context, alerts ...*types.Alert) error {
	var (
		i    = 0
		b    = backoff.NewExponentialBackOff()
		tick = backoff.NewTicker(b)
	)
	defer tick.Stop()

	for {
		i++

		select {
		case <-tick.C:
			if err := n.notifier.Notify(ctx, alerts...); err != nil {
				log.Warnf("Notify attempt %d failed: %s", i, err)
			} else {
				return nil
			}
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}
Esempio n. 13
0
// validateClientWithBackoff repeatedly calls client.Ping until either
// a successfull response or the context is canceled.
func (c *influxdbCluster) validateClientWithBackoff(ctx context.Context) error {
	b := backoff.NewExponentialBackOff()
	b.MaxElapsedTime = c.startupTimeout
	ticker := backoff.NewTicker(b)
	defer ticker.Stop()
	done := ctx.Done()
	for {
		select {
		case <-done:
			return errors.New("canceled")
		case _, ok := <-ticker.C:
			if !ok {
				return errors.New("failed to connect to InfluxDB, retry limit reached")
			}
			_, _, err := c.client.Ping(ctx)
			if err != nil {
				c.logger.Println("D! failed to connect to InfluxDB, retrying... ", err)
				continue
			}
			return nil
		}
	}
}
Esempio n. 14
0
// waitForAction waits for a single action to finish.
func waitForAction(ctx context.Context, client *godo.Client, action *godo.Action) error {
	if action == nil {
		return nil
	}

	// NOTE: if we need debugging, enable the following
	debug := false
	if debug {
		start := time.Now()
		log.Println("waiting for action to finish: ", action.ID)
		defer func() {
			log.Println("done: ", time.Since(start).Seconds())
		}()
	}

	ticker := backoff.NewTicker(backoff.NewExponentialBackOff())
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			var err error
			action, _, err = client.Actions.Get(action.ID)
			if err != nil {
				return err
			}
			if action.Status == actionErrored {
				return errors.New(action.String())
			}
			if action.CompletedAt != nil || action.Status == actionCompleted {
				return nil
			}
		case <-ctx.Done():
			return fmt.Errorf("timed out waiting for action %d to complete", action.ID)
		}
	}
}
Esempio n. 15
0
func (c *Client) vagetbackendauth(s url.URL, cid string) error {

	if cid == "" {
		cid = os.Getenv("VCLOUDAIR_COMPUTEID")
	}

	req := c.NewRequest(map[string]string{}, "POST", s, nil)

	// Add the Accept header for vCA
	req.Header.Add("Accept", "application/xml;version=5.6")

	// Set Authorization Header
	req.Header.Add("x-vchs-authorization", c.VAToken)

	// Adding exponential backoff to retry
	b := backoff.NewExponentialBackOff()
	b.MaxElapsedTime = time.Duration(30 * time.Second)

	ticker := backoff.NewTicker(b)

	var err error
	var resp *http.Response

	for t := range ticker.C {
		resp, err = checkResp(c.Http.Do(req))
		if err != nil {
			fmt.Println(err, "retrying...", t)
			continue
		}
		ticker.Stop()
		break

	}

	if err != nil {
		return fmt.Errorf("error processing backend url action: %s", err)
	}

	defer resp.Body.Close()

	vcloudsession := new(vCloudSession)

	if err = decodeBody(resp, vcloudsession); err != nil {
		return fmt.Errorf("error decoding vcloudsession response: %s", err)
	}

	// Get the backend session information
	for _, s := range vcloudsession.VdcLink {
		if s.Name == cid {
			// Fetch the authorization token
			c.VCDToken = s.AuthorizationToken

			// Fetch the authorization header
			c.VCDAuthHeader = s.AuthorizationHeader

			u, err := url.ParseRequestURI(s.HREF)
			if err != nil {
				return fmt.Errorf("error decoding href: %s", err)
			}
			c.VCDVDCHREF = *u
			return nil
		}
	}
	return fmt.Errorf("error finding the right backend resource")
}
Esempio n. 16
0
func (s *Service) linkSubscriptions() error {
	s.logger.Println("I! linking subscriptions")
	b := backoff.NewExponentialBackOff()
	b.MaxElapsedTime = s.startupTimeout
	ticker := backoff.NewTicker(b)
	var err error
	var cli client.Client
	for range ticker.C {
		cli, err = s.NewClient()
		if err != nil {
			s.logger.Println("D! failed to connect to InfluxDB, retrying... ", err)
			continue
		}
		ticker.Stop()
		break
	}
	if err != nil {
		return err
	}

	numSubscriptions := int64(0)

	// Get all databases and retention policies
	var allSubs []subEntry
	resp, err := s.execQuery(cli, "SHOW DATABASES")
	if err != nil {
		return err
	}

	if len(resp.Results) == 1 && len(resp.Results[0].Series) == 1 && len(resp.Results[0].Series[0].Values) > 0 {
		dbs := resp.Results[0].Series[0].Values
		for _, v := range dbs {
			dbname := v[0].(string)

			rpResp, err := s.execQuery(cli, fmt.Sprintf(`SHOW RETENTION POLICIES ON "%s"`, dbname))
			if err != nil {
				return err
			}
			if len(rpResp.Results) == 1 && len(rpResp.Results[0].Series) == 1 && len(rpResp.Results[0].Series[0].Values) > 0 {
				rps := rpResp.Results[0].Series[0].Values
				for _, v := range rps {
					rpname := v[0].(string)

					se := subEntry{
						db:   dbname,
						rp:   rpname,
						name: s.clusterID,
					}
					allSubs = append(allSubs, se)
				}
			}

		}
	}

	// Get all existing subscriptions
	resp, err = s.execQuery(cli, "SHOW SUBSCRIPTIONS")
	if err != nil {
		return err
	}
	existingSubs := make(map[subEntry]subInfo)
	for _, res := range resp.Results {
		for _, series := range res.Series {
			for _, v := range series.Values {
				se := subEntry{
					db: series.Name,
				}
				si := subInfo{}
				for i, c := range series.Columns {
					switch c {
					case "retention_policy":
						se.rp = v[i].(string)
					case "name":
						se.name = v[i].(string)
					case "mode":
						si.Mode = v[i].(string)
					case "destinations":
						destinations := v[i].([]interface{})
						si.Destinations = make([]string, len(destinations))
						for i := range destinations {
							si.Destinations[i] = destinations[i].(string)
						}
					}
				}
				if se.name == subName {
					// This is an old-style subscription,
					// drop it and recreate with new name.
					err := s.dropSub(cli, se.name, se.db, se.rp)
					if err != nil {
						return err
					}
					se.name = s.clusterID
					err = s.createSub(cli, se.name, se.db, se.rp, si.Mode, si.Destinations)
					if err != nil {
						return err
					}
					existingSubs[se] = si
				}
				if se.name == s.clusterID {
					existingSubs[se] = si
				}
			}
		}
	}

	// Compare to configured list
	startedSubs := make(map[subEntry]bool)
	all := len(s.configSubs) == 0
	for se, si := range existingSubs {
		if (s.configSubs[se] || all) && !s.exConfigSubs[se] {
			// Check if this kapacitor instance is in the list of hosts
			for _, dest := range si.Destinations {
				u, err := url.Parse(dest)
				if err != nil {
					s.logger.Println("E! invalid URL in subscription destinations:", err)
					continue
				}
				pair := strings.Split(u.Host, ":")
				if pair[0] == s.hostname {
					numSubscriptions++
					_, err := s.startListener(se.db, se.rp, *u)
					if err != nil {
						s.logger.Println("E! failed to start listener:", err)
					}
					startedSubs[se] = true
					break
				}
			}
		}
	}
	// create and start any new subscriptions
	for _, se := range allSubs {
		// If we have been configured to subscribe and the subscription is not started yet.
		if (s.configSubs[se] || all) && !startedSubs[se] && !s.exConfigSubs[se] {
			u, err := url.Parse("udp://:0")
			if err != nil {
				return fmt.Errorf("could not create valid destination url, is hostname correct? err: %s", err)
			}

			numSubscriptions++
			addr, err := s.startListener(se.db, se.rp, *u)
			if err != nil {
				s.logger.Println("E! failed to start listener:", err)
			}

			// Get port from addr
			destination := fmt.Sprintf("udp://%s:%d", s.hostname, addr.Port)

			err = s.createSub(cli, se.name, se.db, se.rp, "ANY", []string{destination})
			if err != nil {
				return err
			}
		}
	}

	kapacitor.NumSubscriptionsVar.Set(numSubscriptions)
	return nil
}
Esempio n. 17
0
func newTicker() *backoff.Ticker {
	b := backoff.NewExponentialBackOff()
	b.MaxInterval = 15 * time.Second

	return backoff.NewTicker(b)
}
Esempio n. 18
0
func (s *influxdbCluster) linkSubscriptions() error {
	s.logger.Println("D! linking subscriptions")
	b := backoff.NewExponentialBackOff()
	b.MaxElapsedTime = s.startupTimeout
	ticker := backoff.NewTicker(b)
	var err error
	var cli influxdb.Client
	for range ticker.C {
		cli, err = s.NewClient()
		if err != nil {
			s.logger.Println("D! failed to connect to InfluxDB, retrying... ", err)
			continue
		}
		ticker.Stop()
		break
	}
	if err != nil {
		return err
	}

	numSubscriptions := int64(0)

	tokens, err := s.AuthService.ListSubscriptionTokens()
	if err != nil {
		return errors.Wrap(err, "getting existing subscription tokens")
	}
	revokeTokens := make(map[string]bool, len(tokens))
	for _, token := range tokens {
		revokeTokens[token] = true
	}

	// Get all databases and retention policies
	var allSubs []subEntry
	resp, err := s.execQuery(cli, &influxql.ShowDatabasesStatement{})
	if err != nil {
		return err
	}

	if len(resp.Results) == 1 && len(resp.Results[0].Series) == 1 && len(resp.Results[0].Series[0].Values) > 0 {
		clusters := resp.Results[0].Series[0].Values
		for _, v := range clusters {
			clustername := v[0].(string)

			rpResp, err := s.execQuery(cli, &influxql.ShowRetentionPoliciesStatement{
				Database: clustername,
			})
			if err != nil {
				return err
			}
			if len(rpResp.Results) == 1 && len(rpResp.Results[0].Series) == 1 && len(rpResp.Results[0].Series[0].Values) > 0 {
				rps := rpResp.Results[0].Series[0].Values
				for _, v := range rps {
					rpname := v[0].(string)

					se := subEntry{
						cluster: clustername,
						rp:      rpname,
						name:    s.subName,
					}
					allSubs = append(allSubs, se)
				}
			}

		}
	}

	// Get all existing subscriptions
	resp, err = s.execQuery(cli, &influxql.ShowSubscriptionsStatement{})
	if err != nil {
		return err
	}
	existingSubs := make(map[subEntry]subInfo)
	for _, res := range resp.Results {
		for _, series := range res.Series {
			for _, v := range series.Values {
				se := subEntry{
					cluster: series.Name,
				}
				si := subInfo{}
				for i, c := range series.Columns {
					switch c {
					case "retention_policy":
						se.rp = v[i].(string)
					case "name":
						se.name = v[i].(string)
					case "mode":
						si.Mode = v[i].(string)
					case "destinations":
						destinations := v[i].([]interface{})
						si.Destinations = make([]string, len(destinations))
						for i := range destinations {
							si.Destinations[i] = destinations[i].(string)
						}
					}
				}
				if se.name == legacySubName {
					// This is an old-style subscription,
					// drop it and recreate with new name.
					err := s.dropSub(cli, se.name, se.cluster, se.rp)
					if err != nil {
						return err
					}
					se.name = s.subName
					err = s.createSub(cli, se.name, se.cluster, se.rp, si.Mode, si.Destinations)
					if err != nil {
						return err
					}
					existingSubs[se] = si
				} else if se.name == s.clusterID {
					// This is an just the cluster ID
					// drop it and recreate with new name.
					err := s.dropSub(cli, se.name, se.cluster, se.rp)
					se.name = s.subName
					err = s.createSub(cli, se.name, se.cluster, se.rp, si.Mode, si.Destinations)
					if err != nil {
						return err
					}
					existingSubs[se] = si
				} else if se.name == s.subName {
					if len(si.Destinations) == 0 {
						s.logger.Println("E! found subscription without any destinations:", se)
						continue
					}
					u, err := url.Parse(si.Destinations[0])
					if err != nil {
						s.logger.Println("E! found subscription with invalid destinations:", si)
						continue
					}
					host, port, err := net.SplitHostPort(u.Host)
					if err != nil {
						s.logger.Println("E! found subscription with invalid destinations:", si)
						continue
					}
					pn, err := strconv.ParseInt(port, 10, 64)
					if err != nil {
						s.logger.Println("E! found subscription with invalid destinations:", si)
						continue
					}
					// Check if the hostname, port or protocol have changed
					if host != s.hostname ||
						u.Scheme != s.protocol ||
						((u.Scheme == "http" || u.Scheme == "https") && int(pn) != s.httpPort) ||
						(s.useTokens && (u.User == nil || u.User.Username() != httpd.SubscriptionUser)) {
						// Remove access for changing subscriptions.
						if u.User != nil {
							if p, ok := u.User.Password(); ok {
								s.AuthService.RevokeSubscriptionAccess(p)
							}
						}
						// Something changed, drop the sub and let it get recreated
						s.dropSub(cli, se.name, se.cluster, se.rp)
					} else {
						existingSubs[se] = si
						// Do not revoke tokens that are still in use
						if u.User != nil {
							if t, ok := u.User.Password(); ok {
								revokeTokens[t] = false
							}
						}
					}
				}
			}
		}
	}

	// Compare to configured list
	startedSubs := make(map[subEntry]bool)
	all := len(s.configSubs) == 0
	for se, si := range existingSubs {
		if (s.configSubs[se] || all) && !s.exConfigSubs[se] && !s.runningSubs[se] {
			// Check if this kapacitor instance is in the list of hosts
			for _, dest := range si.Destinations {
				u, err := url.Parse(dest)
				if err != nil {
					s.logger.Println("E! invalid URL in subscription destinations:", err)
					continue
				}
				host, port, err := net.SplitHostPort(u.Host)
				if host == s.hostname {
					numSubscriptions++
					if u.Scheme == "udp" {
						_, err := s.startUDPListener(se.cluster, se.rp, port)
						if err != nil {
							s.logger.Println("E! failed to start UDP listener:", err)
						}
					}
					startedSubs[se] = true
					s.runningSubs[se] = true
					break
				}
			}
		}
	}
	// create and start any new subscriptions
	for _, se := range allSubs {
		// If we have been configured to subscribe and the subscription is not started yet.
		if (s.configSubs[se] || all) && !startedSubs[se] && !s.exConfigSubs[se] && !s.runningSubs[se] {
			var destination string
			switch s.protocol {
			case "http", "https":
				if s.useTokens {
					// Generate token
					token, err := s.generateRandomToken()
					if err != nil {
						return errors.Wrap(err, "generating token")
					}
					err = s.AuthService.GrantSubscriptionAccess(token, se.cluster, se.rp)
					if err != nil {
						return err
					}
					u := url.URL{
						Scheme: s.protocol,
						User:   url.UserPassword(httpd.SubscriptionUser, token),
						Host:   fmt.Sprintf("%s:%d", s.hostname, s.httpPort),
					}
					destination = u.String()
				} else {
					u := url.URL{
						Scheme: s.protocol,
						Host:   fmt.Sprintf("%s:%d", s.hostname, s.httpPort),
					}
					destination = u.String()
				}
			case "udp":
				addr, err := s.startUDPListener(se.cluster, se.rp, "0")
				if err != nil {
					s.logger.Println("E! failed to start UDP listener:", err)
				}
				destination = fmt.Sprintf("udp://%s:%d", s.hostname, addr.Port)
			}

			numSubscriptions++

			err = s.createSub(cli, se.name, se.cluster, se.rp, "ANY", []string{destination})
			if err != nil {
				return err
			}
			s.runningSubs[se] = true
		}
	}

	// revoke any extra tokens
	for t, revoke := range revokeTokens {
		if revoke {
			s.AuthService.RevokeSubscriptionAccess(t)
		}
	}

	kapacitor.NumSubscriptionsVar.Set(numSubscriptions)
	return nil
}