// WaitUntilPodIsGone waits until the named Pod will disappear func WaitUntilPodIsGone(c kcoreclient.PodInterface, podName string, timeout time.Duration) error { return wait.Poll(1*time.Second, timeout, func() (bool, error) { _, err := c.Get(podName) if err != nil { if strings.Contains(err.Error(), "not found") { return true, nil } return true, err } return false, nil }) }
// GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names. func GetPodNamesByFilter(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool) (podNames []string, err error) { podList, err := c.List(kapi.ListOptions{LabelSelector: label}) if err != nil { return nil, err } for _, pod := range podList.Items { if predicate(pod) { podNames = append(podNames, pod.Name) } } return podNames, nil }
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored. // The returned bool value can be used to tell if the pod is actually updated. func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, bool, error) { var err error var podUpdated bool oldPod := pod if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pod, err = podClient.Get(oldPod.Name) if err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. if err = applyUpdate(pod); err != nil { return false, err } if pod, err = podClient.Update(pod); err == nil { // Update successful. return true, nil } // TODO: don't retry on perm-failed errors and handle them gracefully // Update could have failed due to conflict error. Try again. return false, nil }); err == nil { // When there's no error, we've updated this pod. podUpdated = true } // Handle returned error from wait poll if err == wait.ErrWaitTimeout { err = fmt.Errorf("timed out trying to update pod: %#v", oldPod) } // Ignore the pod not found error, but the pod isn't updated. if errors.IsNotFound(err) { glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) err = nil } // Ignore the precondition violated error, but the pod isn't updated. if err == errorsutil.ErrPreconditionViolated { glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) err = nil } // If the error is non-nil the returned pod cannot be trusted; if podUpdated is false, the pod isn't updated; // if the error is nil and podUpdated is true, the returned pod contains the applied update. return pod, podUpdated, err }
// NewPodWatch creates a pod watching function which is backed by a // FIFO/reflector pair. This avoids managing watches directly. // A stop channel to close the watch's reflector is also returned. // It is the caller's responsibility to defer closing the stop channel to prevent leaking resources. func NewPodWatch(client kcoreclient.PodInterface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod { fieldSelector := fields.OneTermEqualSelector("metadata.name", name) podLW := &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector return client.List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector return client.Watch(options) }, } queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel) return func() *kapi.Pod { obj := cache.Pop(queue) return obj.(*kapi.Pod) } }
func installationStarted(c kcoreclient.PodInterface, name string, s kcoreclient.SecretInterface) wait.ConditionFunc { return func() (bool, error) { pod, err := c.Get(name) if err != nil { return false, err } if pod.Status.Phase == kapi.PodPending { return false, nil } // delete a secret named the same as the pod if it exists if secret, err := s.Get(name); err == nil { if secret.Annotations[newcmd.GeneratedForJob] == "true" && secret.Annotations[newcmd.GeneratedForJobFor] == pod.Annotations[newcmd.GeneratedForJobFor] { if err := s.Delete(name, nil); err != nil { glog.V(4).Infof("Failed to delete install secret %s: %v", name, err) } } } return true, nil } }
func installationComplete(c kcoreclient.PodInterface, name string, out io.Writer) wait.ConditionFunc { return func() (bool, error) { pod, err := c.Get(name) if err != nil { if kapierrors.IsNotFound(err) { return false, fmt.Errorf("installation pod was deleted; unable to determine whether it completed successfully") } return false, nil } switch pod.Status.Phase { case kapi.PodSucceeded: fmt.Fprintf(out, "--> Success\n") if err := c.Delete(name, nil); err != nil { glog.V(4).Infof("Failed to delete install pod %s: %v", name, err) } return true, nil case kapi.PodFailed: return true, fmt.Errorf("installation of %q did not complete successfully", name) default: return false, nil } } }
"k8s.io/kubernetes/test/e2e/framework" ) const ( gcePDDetachTimeout = 10 * time.Minute gcePDDetachPollTime = 10 * time.Second nodeStatusTimeout = 1 * time.Minute nodeStatusPollTime = 1 * time.Second gcePDRetryTimeout = 5 * time.Minute gcePDRetryPollTime = 5 * time.Second ) var _ = framework.KubeDescribe("Pod Disks", func() { var ( podClient unversionedcore.PodInterface nodeClient unversionedcore.NodeInterface host0Name types.NodeName host1Name types.NodeName ) f := framework.NewDefaultFramework("pod-disks") BeforeEach(func() { framework.SkipUnlessNodeCountIsAtLeast(2) podClient = f.ClientSet.Core().Pods(f.Namespace.Name) nodeClient = f.ClientSet.Core().Nodes() nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name) host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)