// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action. // If policyCachePollTimeout is reached without the expected condition matching, an error is returned func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error { review := &authorizationv1beta1.SubjectAccessReview{ Spec: authorizationv1beta1.SubjectAccessReviewSpec{ ResourceAttributes: &authorizationv1beta1.ResourceAttributes{ Group: resource.Group, Verb: verb, Resource: resource.Resource, Namespace: namespace, }, User: user, }, } err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { response, err := c.SubjectAccessReviews().Create(review) // GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine // has adjusted as expected. In this case, simply wait one second and hope it's up to date if apierrors.IsNotFound(err) { fmt.Printf("SubjectAccessReview endpoint is missing\n") time.Sleep(1 * time.Second) return true, nil } if err != nil { return false, err } if response.Status.Allowed != allowed { return false, nil } return true, nil }) return err }
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error { oldName := rc.Name rc.Name = newName rc.ResourceVersion = "" // First delete the oldName RC and orphan its pods. trueVar := true err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar}) if err != nil && !errors.IsNotFound(err) { return err } err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { _, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{}) if err == nil { return false, nil } else if errors.IsNotFound(err) { return true, nil } else { return false, err } }) if err != nil { return err } // Then create the same RC with the new name. _, err = c.ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return err } return nil }
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient fedclientset.Interface) error { service := cachedService.lastState glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name) var err error for i := 0; i < clientRetryCount; i++ { _, err := fedClient.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) return nil } _, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service) if err == nil { glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name) return nil } if errors.IsNotFound(err) { glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) return nil } if errors.IsConflict(err) { glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) return err } time.Sleep(cachedService.nextFedUpdateDelay()) } return err }
func (f *Framework) deleteFederationNs() { ns := f.FederationNamespace By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } clientset := f.FederationClientset // First delete the namespace from federation apiserver. // Also delete the corresponding namespaces from underlying clusters. orphanDependents := false if err := clientset.Core().Namespaces().Delete(ns.Name, &v1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil { framework.Failf("Error while deleting federation namespace %s: %s", ns.Name, err) } // Verify that it got deleted. err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { if _, err := clientset.Core().Namespaces().Get(ns.Name, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return true, nil } framework.Logf("Error while waiting for namespace to be terminated: %v", err) return false, nil } return false, nil }) if err != nil { if !apierrors.IsNotFound(err) { framework.Failf("Couldn't delete ns %q: %s", ns.Name, err) } else { framework.Logf("Namespace %v was already deleted", ns.Name) } } }
// Ensure that when a deploymentRollback is created for a deployment that has already been deleted // by the API server, API server returns not-found error. func TestEtcdCreateDeploymentRollbackNoDeployment(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) defer storage.Deployment.Store.DestroyFunc() rollbackStorage := storage.Rollback ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), namespace) _, err := rollbackStorage.Create(ctx, &extensions.DeploymentRollback{ Name: name, UpdatedAnnotations: map[string]string{}, RollbackTo: extensions.RollbackConfig{Revision: 1}, }) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(storeerr.InterpretGetError(err, extensions.Resource("deployments"), name)) { t.Fatalf("Unexpected error returned: %#v", err) } _, err = storage.Deployment.Get(ctx, name, &metav1.GetOptions{}) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(storeerr.InterpretGetError(err, extensions.Resource("deployments"), name)) { t.Fatalf("Unexpected error: %v", err) } }
func TestStoreDelete(t *testing.T) { podA := &api.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: api.PodSpec{NodeName: "machine"}, } testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test") destroyFunc, registry := NewTestGenericStoreRegistry(t) defer destroyFunc() // test failure condition _, err := registry.Delete(testContext, podA.Name, nil) if !errors.IsNotFound(err) { t.Errorf("Unexpected error: %v", err) } // create pod _, err = registry.Create(testContext, podA) if err != nil { t.Errorf("Unexpected error: %v", err) } // delete object _, err = registry.Delete(testContext, podA.Name, nil) if err != nil { t.Errorf("Unexpected error: %v", err) } // try to get a item which should be deleted _, err = registry.Get(testContext, podA.Name, &metav1.GetOptions{}) if !errors.IsNotFound(err) { t.Errorf("Unexpected error: %v", err) } }
// ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { // Get the groupVersions exposed at /api v := &metav1.APIVersions{} err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) apiGroup := metav1.APIGroup{} if err == nil { apiGroup = apiVersionsToAPIGroup(v) } if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // Get the groupVersions exposed at /apis apiGroupList = &metav1.APIGroupList{} err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { apiGroupList = &metav1.APIGroupList{} } // append the group retrieved from /api to the list apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) return apiGroupList, nil }
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from // underlying clusters when orphan dependents is false and they are not deleted // when orphan dependents is true. func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { ingress := createIngressOrFail(clientset, nsName) ingressName := ingress.Name // Check subclusters if the ingress was created there. By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName)) waitForIngressShardsOrFail(nsName, ingress, clusters) By(fmt.Sprintf("Deleting ingress %s", ingressName)) deleteIngressOrFail(clientset, nsName, ingressName, orphanDependents) By(fmt.Sprintf("Verifying ingresses %s in underlying clusters", ingressName)) errMessages := []string{} // ingress should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true for clusterName, clusterClientset := range clusters { _, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for ingress %s in cluster %s, got error: %v", ingressName, clusterName, err)) } } if len(errMessages) != 0 { framework.Failf("%s", strings.Join(errMessages, "; ")) } }
// Ensure that when scheduler creates a binding for a pod that has already been deleted // by the API server, API server returns not-found error. func TestEtcdCreateBindingNoPod(t *testing.T) { storage, bindingStorage, _, server := newStorage(t) defer server.Terminate(t) defer storage.Store.DestroyFunc() ctx := genericapirequest.NewDefaultContext() // Assume that a pod has undergone the following: // - Create (apiserver) // - Schedule (scheduler) // - Delete (apiserver) _, err := bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: metav1.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(storeerr.InterpretGetError(err, api.Resource("pods"), "foo")) { t.Fatalf("Unexpected error returned: %#v", err) } _, err = storage.Get(ctx, "foo", &metav1.GetOptions{}) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(storeerr.InterpretGetError(err, api.Resource("pods"), "foo")) { t.Fatalf("Unexpected error: %v", err) } }
func TestStoreDeleteCollection(t *testing.T) { podA := &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} podB := &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}} testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test") destroyFunc, registry := NewTestGenericStoreRegistry(t) defer destroyFunc() if _, err := registry.Create(testContext, podA); err != nil { t.Errorf("Unexpected error: %v", err) } if _, err := registry.Create(testContext, podB); err != nil { t.Errorf("Unexpected error: %v", err) } // Delete all pods. deleted, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } deletedPods := deleted.(*api.PodList) if len(deletedPods.Items) != 2 { t.Errorf("Unexpected number of pods deleted: %d, expected: 2", len(deletedPods.Items)) } if _, err := registry.Get(testContext, podA.Name, &metav1.GetOptions{}); !errors.IsNotFound(err) { t.Errorf("Unexpected error: %v", err) } if _, err := registry.Get(testContext, podB.Name, &metav1.GetOptions{}); !errors.IsNotFound(err) { t.Errorf("Unexpected error: %v", err) } }
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { deployments := reaper.dClient.Deployments(namespace) replicaSets := reaper.rsClient.ReplicaSets(namespace) rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout} deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) if err != nil { return err } // Use observedGeneration to determine if the deployment controller noticed the pause. if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) { return deployments.Get(name, metav1.GetOptions{}) }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { return err } // Do not cascade deletion for overlapping deployments. if len(deployment.Annotations[deploymentutil.OverlapAnnotation]) > 0 { return deployments.Delete(name, nil) } // Stop all replica sets. selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return err } options := api.ListOptions{LabelSelector: selector} rsList, err := replicaSets.List(options) if err != nil { return err } errList := []error{} for _, rc := range rsList.Items { if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { scaleGetErr, ok := err.(ScaleError) if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) { continue } errList = append(errList, err) } } if len(errList) > 0 { return utilerrors.NewAggregate(errList) } // Delete deployment at the end. // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. var falseVar = false nonOrphanOption := api.DeleteOptions{OrphanDependents: &falseVar} return deployments.Delete(name, &nonOrphanOption) }
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) { tokenName := "" token := "" findToken := func() (bool, error) { user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } for _, ref := range user.Secrets { secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { continue } if err != nil { return false, err } if secret.Type != v1.SecretTypeServiceAccountToken { continue } name := secret.Annotations[v1.ServiceAccountNameKey] uid := secret.Annotations[v1.ServiceAccountUIDKey] tokenData := secret.Data[v1.ServiceAccountTokenKey] if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 { tokenName = secret.Name token = string(tokenData) return true, nil } } return false, nil } if shouldWait { err := wait.Poll(time.Second, 10*time.Second, findToken) if err != nil { return "", "", err } } else { ok, err := findToken() if err != nil { return "", "", err } if !ok { return "", "", fmt.Errorf("No token found for %s/%s", ns, name) } } return tokenName, token, nil }
// Get retrieves the object from the Namespace and Name fields func (i *Info) Get() (err error) { obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) if err != nil { if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != api.NamespaceDefault && i.Namespace != api.NamespaceAll { err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do().Error() if err2 != nil && errors.IsNotFound(err2) { return err2 } } return err } i.Object = obj i.ResourceVersion, _ = i.Mapping.MetadataAccessor.ResourceVersion(obj) return nil }
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error { err := wait.Poll(10*time.Second, FederatedDeploymentTimeout, func() (bool, error) { fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } specReplicas, statusReplicas := int32(0), int32(0) for _, cluster := range clusters { dep, err := cluster.Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err)) return false, err } if err == nil { if !verifyDeployment(fdep, dep) { By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, fdep, dep)) return false, nil } specReplicas += *dep.Spec.Replicas statusReplicas += dep.Status.Replicas } } if statusReplicas == fdep.Status.Replicas && specReplicas >= *fdep.Spec.Replicas { return true, nil } By(fmt.Sprintf("Replicas not match, federation replicas: %v/%v, clusters replicas: %v/%v\n", *fdep.Spec.Replicas, fdep.Status.Replicas, specReplicas, statusReplicas)) return false, nil }) return err }
// getOrCreateTargetControllerWithClient looks for an existing controller with // sourceId. If found, the existing controller is returned with true // indicating that the controller already exists. If the controller isn't // found, a new one is created and returned along with false indicating the // controller was created. // // Existing controllers are validated to ensure their sourceIdAnnotation // matches sourceId; if there's a mismatch, an error is returned. func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { existingRc, err := r.existingController(controller) if err != nil { if !errors.IsNotFound(err) { // There was an error trying to find the controller; don't assume we // should create it. return nil, false, err } if controller.Spec.Replicas <= 0 { return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas) } // The controller wasn't found, so create it. if controller.Annotations == nil { controller.Annotations = map[string]string{} } controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas) controller.Annotations[sourceIdAnnotation] = sourceId controller.Spec.Replicas = 0 newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) return newRc, false, err } // Validate and use the existing controller. annotations := existingRc.Annotations source := annotations[sourceIdAnnotation] _, ok := annotations[desiredReplicasAnnotation] if source != sourceId || !ok { return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) } return existingRc, true, nil }
// isNotFound checks if the given error is a NotFound status error. func isNotFound(err error) bool { statusErr := err if urlErr, ok := err.(*url.Error); ok { statusErr = urlErr.Err } return errors.IsNotFound(statusErr) }
func (o *DrainOptions) waitForDelete(pods []api.Pod, interval, timeout time.Duration, usingEviction bool, getPodFn func(string, string) (*api.Pod, error)) ([]api.Pod, error) { var verbStr string if usingEviction { verbStr = "evicted" } else { verbStr = "deleted" } err := wait.PollImmediate(interval, timeout, func() (bool, error) { pendingPods := []api.Pod{} for i, pod := range pods { p, err := getPodFn(pod.Namespace, pod.Name) if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) { cmdutil.PrintSuccess(o.mapper, false, o.out, "pod", pod.Name, false, verbStr) continue } else if err != nil { return false, err } else { pendingPods = append(pendingPods, pods[i]) } } pods = pendingPods if len(pendingPods) > 0 { return false, nil } return true, nil }) return pods, err }
func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*v1.Secret, error) { // Look up in cache obj, exists, err := e.secrets.GetByKey(makeCacheKey(ns, name)) if err != nil { return nil, err } if exists { secret, ok := obj.(*v1.Secret) if !ok { return nil, fmt.Errorf("expected *v1.Secret, got %#v", secret) } // Ensure UID matches if given if len(uid) == 0 || uid == secret.UID { return secret, nil } } if !fetchOnCacheMiss { return nil, nil } // Live lookup secret, err := e.client.Core().Secrets(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } if err != nil { return nil, err } // Ensure UID matches if given if len(uid) == 0 || uid == secret.UID { return secret, nil } return nil, nil }
func (s *ServiceController) persistUpdate(service *v1.Service) error { var err error for i := 0; i < clientRetryCount; i++ { _, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service) if err == nil { return nil } // If the object no longer exists, we don't want to recreate it. Just bail // out so that we can process the delete, which we should soon be receiving // if we haven't already. if errors.IsNotFound(err) { glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) return nil } // TODO: Try to resolve the conflict if the change was unrelated to load // balancer status. For now, just pass it up the stack. if errors.IsConflict(err) { return fmt.Errorf("Not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) } glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v", service.Namespace, service.Name, err) time.Sleep(clientRetryInterval) } return err }
func TestDeleteObjectNotFound(t *testing.T) { f, tf, _, _ := cmdtesting.NewAPIFactory() tf.Printer = &testPrinter{} tf.Client = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } tf.Namespace = "test" buf, errBuf := bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{}) options := &DeleteOptions{ FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../examples/guestbook/legacy/redis-master-controller.yaml"}, }, GracePeriod: -1, Cascade: false, Output: "name", } err := options.Complete(f, buf, errBuf, []string{}) if err != nil { t.Errorf("unexpected error: %v", err) } err = options.RunDelete() if err == nil || !errors.IsNotFound(err) { t.Errorf("unexpected error: expected NotFound, got %v", err) } }
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) error { err := wait.Poll(10*time.Second, FederatedReplicaSetTimeout, func() (bool, error) { frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) if err != nil { return false, err } specReplicas, statusReplicas := int32(0), int32(0) for _, cluster := range clusters { rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { By(fmt.Sprintf("Failed getting replicaset: %q/%q/%q, err: %v", cluster.name, namespace, replicaSetName, err)) return false, err } if err == nil { if !equivalentReplicaSet(frs, rs) { By(fmt.Sprintf("Replicaset meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, frs, rs)) return false, nil } specReplicas += *rs.Spec.Replicas statusReplicas += rs.Status.Replicas } } if statusReplicas == frs.Status.Replicas && specReplicas >= *frs.Spec.Replicas { return true, nil } By(fmt.Sprintf("Replicas not match, federation replicas: %v/%v, clusters replicas: %v/%v\n", *frs.Spec.Replicas, frs.Status.Replicas, specReplicas, statusReplicas)) return false, nil }) return err }
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { _, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") }
// GetAPIGroupResources uses the provided discovery client to gather // discovery information and populate a slice of APIGroupResources. func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) { apiGroups, err := cl.ServerGroups() if err != nil { return nil, err } var result []*APIGroupResources for _, group := range apiGroups.Groups { groupResources := &APIGroupResources{ Group: group, VersionedResources: make(map[string][]metav1.APIResource), } for _, version := range group.Versions { resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) if err != nil { if errors.IsNotFound(err) { continue // ignore as this can race with deletion of 3rd party APIs } return nil, err } groupResources.VersionedResources[version.Version] = resources.APIResources } result = append(result, groupResources) } return result, nil }
// Visit implements Visitor func (r *Selector) Visit(fn VisitorFunc) error { list, err := NewHelper(r.Client, r.Mapping).List(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector, r.Export) if err != nil { if errors.IsBadRequest(err) || errors.IsNotFound(err) { if se, ok := err.(*errors.StatusError); ok { // modify the message without hiding this is an API error if r.Selector.Empty() { se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) } else { se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, se.ErrStatus.Message) } return se } if r.Selector.Empty() { return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) } else { return fmt.Errorf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, err) } } return err } accessor := r.Mapping.MetadataAccessor resourceVersion, _ := accessor.ResourceVersion(list) info := &Info{ Client: r.Client, Mapping: r.Mapping, Namespace: r.Namespace, Object: list, ResourceVersion: resourceVersion, } return fn(info, nil) }
// Clean both server and client pods. func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) { By(fmt.Sprint("cleaning the environment after ", config.prefix)) defer GinkgoRecover() podClient := f.PodClient() err := podClient.Delete(config.prefix+"-client", nil) if err != nil { // Log the error before failing test: if the test has already failed, // framework.ExpectNoError() won't print anything to logs! glog.Warningf("Failed to delete client pod: %v", err) framework.ExpectNoError(err, "Failed to delete client pod: %v", err) } if config.serverImage != "" { if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } // See issue #24100. // Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits. By("sleeping a bit so client can stop and unmount") time.Sleep(20 * time.Second) err = podClient.Delete(config.prefix+"-server", nil) if err != nil { glog.Warningf("Failed to delete server pod: %v", err) framework.ExpectNoError(err, "Failed to delete server pod: %v", err) } } }
// Delete the PVC and wait for the PV to become Available again. Validate that the PV // has recycled (assumption here about reclaimPolicy). Caller tells this func which // phase value to expect for the pv bound to the to-be-deleted claim. func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expctPVPhase v1.PersistentVolumePhase) { pvname := pvc.Spec.VolumeName framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname) deletePersistentVolumeClaim(c, pvc.Name, ns) // Check that the PVC is really deleted. pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) Expect(apierrs.IsNotFound(err)).To(BeTrue()) // Wait for the PV's phase to return to Available framework.Logf("Waiting for recycling process to complete.") err = framework.WaitForPersistentVolumePhase(expctPVPhase, c, pv.Name, 1*time.Second, 300*time.Second) Expect(err).NotTo(HaveOccurred()) // examine the pv's ClaimRef and UID and compare to expected values pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) cr := pv.Spec.ClaimRef if expctPVPhase == v1.VolumeAvailable { if cr != nil { // may be ok if cr != nil Expect(len(cr.UID)).To(BeZero()) } } else if expctPVPhase == v1.VolumeBound { Expect(cr).NotTo(BeNil()) Expect(len(cr.UID)).NotTo(BeZero()) } framework.Logf("PV %v now in %q phase", pv.Name, expctPVPhase) }
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // disappear before the timeout, it will fail the test. func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) { err := c.Delete(name, options) if err != nil && !errors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } Expect(WaitForPodToDisappear(c.f.ClientSet, c.f.Namespace.Name, name, labels.Everything(), 2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name) }
// Delete the passed in pod. func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *v1.Pod) { if c != nil { if pod != nil && len(pod.Name) > 0 { framework.Logf("Deleting pod %v", pod.Name) err := c.Core().Pods(ns).Delete(pod.Name, nil) if err != nil && !apierrs.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } // Wait for pod to terminate. Expect apierr NotFound err = f.WaitForPodTerminated(pod.Name, "") if err != nil && !apierrs.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name) } } }
// Delete deletes the pet in the pcb from the apiserver. func (p *apiServerPetClient) Delete(pet *pcb) error { err := p.c.Core().Pods(pet.parent.Namespace).Delete(pet.pod.Name, nil) if errors.IsNotFound(err) { err = nil } p.event(pet.parent, "Delete", fmt.Sprintf("pet: %v", pet.pod.Name), err) return err }
func TestGracefulStoreHandleFinalizers(t *testing.T) { initialGeneration := int64(1) podWithFinalizer := &api.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Finalizers: []string{"foo.com/x"}, Generation: initialGeneration}, Spec: api.PodSpec{NodeName: "machine"}, } testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test") destroyFunc, registry := NewTestGenericStoreRegistry(t) registry.EnableGarbageCollection = true defaultDeleteStrategy := testRESTStrategy{api.Scheme, names.SimpleNameGenerator, true, false, true} registry.DeleteStrategy = testGracefulStrategy{defaultDeleteStrategy} defer destroyFunc() // create pod _, err := registry.Create(testContext, podWithFinalizer) if err != nil { t.Errorf("Unexpected error: %v", err) } // delete the pod with grace period=0, the pod should still exist because it has a finalizer _, err = registry.Delete(testContext, podWithFinalizer.Name, api.NewDeleteOptions(0)) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = registry.Get(testContext, podWithFinalizer.Name, &metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } updatedPodWithFinalizer := &api.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Finalizers: []string{"foo.com/x"}, ResourceVersion: podWithFinalizer.ObjectMeta.ResourceVersion}, Spec: api.PodSpec{NodeName: "machine"}, } _, _, err = registry.Update(testContext, updatedPodWithFinalizer.ObjectMeta.Name, rest.DefaultUpdatedObjectInfo(updatedPodWithFinalizer, api.Scheme)) if err != nil { t.Fatalf("Unexpected error: %v", err) } // the object should still exist, because it still has a finalizer _, err = registry.Get(testContext, podWithFinalizer.Name, &metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } podWithNoFinalizer := &api.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "foo", ResourceVersion: podWithFinalizer.ObjectMeta.ResourceVersion}, Spec: api.PodSpec{NodeName: "anothermachine"}, } _, _, err = registry.Update(testContext, podWithFinalizer.ObjectMeta.Name, rest.DefaultUpdatedObjectInfo(podWithNoFinalizer, api.Scheme)) if err != nil { t.Fatalf("Unexpected error: %v", err) } // the pod should be removed, because its finalizer is removed _, err = registry.Get(testContext, podWithFinalizer.Name, &metav1.GetOptions{}) if !errors.IsNotFound(err) { t.Fatalf("Unexpected error: %v", err) } }