Example #1
0
// limitedLogAndRetry stops retrying after maxTimeout, failing the build.
func limitedLogAndRetry(buildupdater buildclient.BuildUpdater, maxTimeout time.Duration) controller.RetryFunc {
	return func(obj interface{}, err error, retries controller.Retry) bool {
		isFatal := strategy.IsFatal(err)
		build := obj.(*buildapi.Build)
		if !isFatal && time.Since(retries.StartTimestamp.Time) < maxTimeout {
			glog.V(4).Infof("Retrying Build %s/%s with error: %v", build.Namespace, build.Name, err)
			return true
		}
		build.Status.Phase = buildapi.BuildPhaseFailed
		if !isFatal {
			build.Status.Reason = buildapi.StatusReasonExceededRetryTimeout
			build.Status.Message = buildapi.StatusMessageExceededRetryTimeout
		}
		build.Status.Message = errors.ErrorToSentence(err)
		now := unversioned.Now()
		build.Status.CompletionTimestamp = &now
		glog.V(3).Infof("Giving up retrying Build %s/%s: %v", build.Namespace, build.Name, err)
		utilruntime.HandleError(err)
		if err := buildupdater.Update(build.Namespace, build); err != nil {
			// retry update, but only on error other than NotFound
			return !kerrors.IsNotFound(err)
		}
		return false
	}
}
Example #2
0
// handleComplete represents the default OnComplete handler. This Handler will
// check which build should be run next and update the StartTimestamp field for
// that build. That will trigger HandleBuild() to process that build immediately
// and as a result the build is immediately executed.
func handleComplete(lister buildclient.BuildLister, updater buildclient.BuildUpdater, build *buildapi.Build) error {
	bcName := buildutil.ConfigNameForBuild(build)
	if len(bcName) == 0 {
		return nil
	}
	nextBuilds, hasRunningBuilds, err := GetNextConfigBuild(lister, build.Namespace, bcName)
	if err != nil {
		return fmt.Errorf("unable to get the next build for %s/%s: %v", build.Namespace, build.Name, err)
	}
	if hasRunningBuilds || len(nextBuilds) == 0 {
		return nil
	}
	now := unversioned.Now()
	for _, build := range nextBuilds {
		build.Status.StartTimestamp = &now
		err := wait.Poll(500*time.Millisecond, 5*time.Second, func() (bool, error) {
			err := updater.Update(build.Namespace, build)
			if err != nil && errors.IsConflict(err) {
				glog.V(5).Infof("Error updating build %s/%s: %v (will retry)", build.Namespace, build.Name, err)
				return false, nil
			}
			return true, err
		})
		if err != nil {
			return err
		}
	}
	return nil
}
Example #3
0
// limitedLogAndRetry stops retrying after maxTimeout, failing the build.
func limitedLogAndRetry(buildupdater buildclient.BuildUpdater, maxTimeout time.Duration) controller.RetryFunc {
	return func(obj interface{}, err error, retries controller.Retry) bool {
		build := obj.(*buildapi.Build)
		if time.Since(retries.StartTimestamp.Time) < maxTimeout {
			glog.V(4).Infof("Retrying Build %s/%s with error: %v", build.Namespace, build.Name, err)
			return true
		}
		build.Status.Phase = buildapi.BuildPhaseFailed
		build.Status.Message = err.Error()
		now := kutil.Now()
		build.Status.CompletionTimestamp = &now
		glog.V(3).Infof("Giving up retrying Build %s/%s: %v", build.Namespace, build.Name, err)
		kutil.HandleError(err)
		if err := buildupdater.Update(build.Namespace, build); err != nil {
			// retry update, but only on error other than NotFound
			return !kerrors.IsNotFound(err)
		}
		return false
	}
}