// waitUntilFreshAndBlock waits until cache is at least as fresh as given <resourceVersion>. // NOTE: This function acquired lock and doesn't release it. // You HAVE TO explicitly call w.RUnlock() after this function. func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *util.Trace) error { startTime := w.clock.Now() go func() { // Wake us up when the time limit has expired. The docs // promise that time.After (well, NewTimer, which it calls) // will wait *at least* the duration given. Since this go // routine starts sometime after we record the start time, and // it will wake up the loop below sometime after the broadcast, // we don't need to worry about waking it up before the time // has expired accidentally. <-w.clock.After(blockTimeout) w.cond.Broadcast() }() w.RLock() if trace != nil { trace.Step("watchCache locked acquired") } for w.resourceVersion < resourceVersion { if w.clock.Since(startTime) >= blockTimeout { // Timeout with retry after 1 second. return errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %v, current: %v", resourceVersion, w.resourceVersion), 1) } w.cond.Wait() } if trace != nil { trace.Step("watchCache fresh enough") } return nil }
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response. // Any api.Status object returned is considered an "error", which interrupts the normal response flow. func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) { // these channels need to be buffered to prevent the goroutine below from hanging indefinitely // when the select statement reads something other than the one the goroutine sends on. ch := make(chan runtime.Object, 1) errCh := make(chan error, 1) panicCh := make(chan interface{}, 1) go func() { // panics don't cross goroutine boundaries, so we have to handle ourselves defer utilruntime.HandleCrash(func(panicReason interface{}) { // Propagate to parent goroutine panicCh <- panicReason }) if result, err := fn(); err != nil { errCh <- err } else { ch <- result } }() select { case result = <-ch: if status, ok := result.(*metav1.Status); ok { return nil, errors.FromObject(status) } return result, nil case err = <-errCh: return nil, err case p := <-panicCh: panic(p) case <-time.After(timeout): return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0) } }
// Create attempts to create a new eviction. That is, it tries to evict a pod. func (r *EvictionREST) Create(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) { eviction := obj.(*policy.Eviction) obj, err := r.store.Get(ctx, eviction.Name, &metav1.GetOptions{}) if err != nil { return nil, err } pod := obj.(*api.Pod) var rtStatus *metav1.Status var pdbName string err = retry.RetryOnConflict(EvictionsRetry, func() error { pdbs, err := r.getPodDisruptionBudgets(ctx, pod) if err != nil { return err } if len(pdbs) > 1 { rtStatus = &metav1.Status{ Status: metav1.StatusFailure, Message: "This pod has more than one PodDisruptionBudget, which the eviction subresource does not support.", Code: 500, } return nil } else if len(pdbs) == 1 { pdb := pdbs[0] pdbName = pdb.Name // Try to verify-and-decrement // If it was false already, or if it becomes false during the course of our retries, // raise an error marked as a 429. ok, err := r.checkAndDecrement(pod.Namespace, pod.Name, pdb) if err != nil { return err } if !ok { rtStatus = &metav1.Status{ Status: metav1.StatusFailure, // TODO(mml): Include some more details about why the eviction is disallowed. // Ideally any such text is generated by the DisruptionController (offline). Message: "Cannot evict pod as it would violate the pod's disruption budget.", Code: 429, // TODO(mml): Add a Retry-After header. Once there are time-based // budgets, we can sometimes compute a sensible suggested value. But // even without that, we can give a suggestion (10 minutes?) that // prevents well-behaved clients from hammering us. } } } return nil }) if err == wait.ErrWaitTimeout { err = errors.NewTimeoutError(fmt.Sprintf("couldn't update PodDisruptionBudget %q due to conflicts", pdbName), 10) } if err != nil { return nil, err } if rtStatus != nil { return rtStatus, nil } // At this point there was either no PDB or we succeded in decrementing // Try the delete _, err = r.store.Delete(ctx, eviction.Name, eviction.DeleteOptions) if err != nil { return nil, err } // Success! return &metav1.Status{Status: metav1.StatusSuccess}, nil }