// ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) { // Get the groupVersions exposed at /api v := &unversioned.APIVersions{} err = d.Get().AbsPath("/api").Do().Into(v) apiGroup := unversioned.APIGroup{} if err == nil { apiGroup = apiVersionsToAPIGroup(v) } if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // Get the groupVersions exposed at /apis apiGroupList = &unversioned.APIGroupList{} err = d.Get().AbsPath("/apis").Do().Into(apiGroupList) if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { apiGroupList = &unversioned.APIGroupList{} } // append the group retrieved from /api to the list apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) return apiGroupList, nil }
// Ensure that when scheduler creates a binding for a pod that has already been deleted // by the API server, API server returns not-found error. func TestEtcdCreateBindingNoPod(t *testing.T) { storage, bindingStorage, _, fakeClient := newStorage(t) ctx := api.NewDefaultContext() fakeClient.TestIndex = true key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) fakeClient.ExpectNotFoundGet(key) // Assume that a pod has undergone the following: // - Create (apiserver) // - Schedule (scheduler) // - Delete (apiserver) _, err := bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) { t.Fatalf("Unexpected error returned: %#v", err) } _, err = storage.Get(ctx, "foo") if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) { t.Fatalf("Unexpected error: %v", err) } }
func getReferencedServiceAccountToken(c *client.Client, ns string, name string, shouldWait bool) (string, string, error) { tokenName := "" token := "" findToken := func() (bool, error) { user, err := c.ServiceAccounts(ns).Get(name) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } for _, ref := range user.Secrets { secret, err := c.Secrets(ns).Get(ref.Name) if errors.IsNotFound(err) { continue } if err != nil { return false, err } if secret.Type != api.SecretTypeServiceAccountToken { continue } name := secret.Annotations[api.ServiceAccountNameKey] uid := secret.Annotations[api.ServiceAccountUIDKey] tokenData := secret.Data[api.ServiceAccountTokenKey] if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 { tokenName = secret.Name token = string(tokenData) return true, nil } } return false, nil } if shouldWait { err := wait.Poll(time.Second, 10*time.Second, findToken) if err != nil { return "", "", err } } else { ok, err := findToken() if err != nil { return "", "", err } if !ok { return "", "", fmt.Errorf("No token found for %s/%s", ns, name) } } return tokenName, token, nil }
// syncNamespace orchestrates deletion of a Namespace and its associated content. func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersions, namespace *api.Namespace) (err error) { if namespace.DeletionTimestamp == nil { return nil } glog.V(4).Infof("Syncing namespace %s", namespace.Name) // ensure that the status is up to date on the namespace // if we get a not found error, we assume the namespace is truly gone namespace, err = retryOnConflictError(kubeClient, namespace, updateNamespaceStatusFunc) if err != nil { if errors.IsNotFound(err) { return nil } return err } // if the namespace is already finalized, delete it if finalized(namespace) { err = kubeClient.Namespaces().Delete(namespace.Name) if err != nil && !errors.IsNotFound(err) { return err } return nil } // there may still be content for us to remove estimate, err := deleteAllContent(kubeClient, versions, namespace.Name, *namespace.DeletionTimestamp) if err != nil { return err } if estimate > 0 { return &contentRemainingError{estimate} } // we have removed content, so mark it finalized by us result, err := retryOnConflictError(kubeClient, namespace, finalizeNamespaceFunc) if err != nil { return err } // now check if all finalizers have reported that we delete now if finalized(result) { err = kubeClient.Namespaces().Delete(namespace.Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) { items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { return 0, err } expired := unversioned.Now().After(before.Time) var deleteOptions *api.DeleteOptions if expired { deleteOptions = api.NewDeleteOptions(0) } estimate := int64(0) for i := range items.Items { if items.Items[i].Spec.TerminationGracePeriodSeconds != nil { grace := *items.Items[i].Spec.TerminationGracePeriodSeconds if grace > estimate { estimate = grace } } err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions) if err != nil && !errors.IsNotFound(err) { return 0, err } } if expired { estimate = 0 } return estimate, nil }
func TestEtcdDelete(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, Spec: api.PodSpec{NodeName: "machine"}, } nodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, }, E: nil, } emptyNode := tools.EtcdResponseWithError{ R: &etcd.Response{}, E: tools.EtcdErrorNotFound, } testContext := api.WithNamespace(api.NewContext(), "test") key := "foo" table := map[string]struct { existing tools.EtcdResponseWithError expect tools.EtcdResponseWithError errOK func(error) bool }{ "normal": { existing: nodeWithPodA, expect: emptyNode, errOK: func(err error) bool { return err == nil }, }, "notExisting": { existing: emptyNode, expect: emptyNode, errOK: func(err error) bool { return errors.IsNotFound(err) }, }, } for name, item := range table { fakeClient, registry := NewTestGenericEtcdRegistry(t) path := etcdtest.AddPrefix("pods/foo") fakeClient.Data[path] = item.existing obj, err := registry.Delete(testContext, key, nil) if !item.errOK(err) { t.Errorf("%v: unexpected error: %v (%#v)", name, err, obj) } if item.expect.E != nil { item.expect.E.(*etcd.EtcdError).Index = fakeClient.ChangeIndex } if e, a := item.expect, fakeClient.Data[path]; !api.Semantic.DeepDerivative(e, a) { t.Errorf("%v:\n%s", name, util.ObjectDiff(e, a)) } } }
func (rs *REST) Delete(ctx api.Context, id string) (runtime.Object, error) { service, err := rs.registry.GetService(ctx, id) if err != nil { return nil, err } err = rs.registry.DeleteService(ctx, id) if err != nil { return nil, err } // TODO: can leave dangling endpoints, and potentially return incorrect // endpoints if a new service is created with the same name err = rs.endpoints.DeleteEndpoints(ctx, id) if err != nil && !errors.IsNotFound(err) { return nil, err } if api.IsServiceIPSet(service) { rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) } for _, nodePort := range CollectServiceNodePorts(service) { err := rs.serviceNodePorts.Release(nodePort) if err != nil { // these should be caught by an eventual reconciliation / restart glog.Errorf("Error releasing service %s node port %d: %v", service.Name, nodePort, err) } } return &unversioned.Status{Status: unversioned.StatusSuccess}, nil }
func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, "foo4") if err := setFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace)) if err != nil { t.Errorf("unexpected error: %v", err) } if _, err := getFn(ctx, foo); err != nil { t.Fatalf("did not gracefully delete resource", err) } // second delete is immediate, resource is deleted out, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(0)) if err != nil { t.Errorf("unexpected error: %v", err) } _, err = t.storage.(rest.Getter).Get(ctx, objectMeta.Name) if !errors.IsNotFound(err) { t.Errorf("unexpected error, object should be deleted immediately: %v", err) } objectMeta = t.getObjectMetaOrFail(out) if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != 0 { t.Errorf("unexpected deleted meta: %#v", objectMeta) } }
func TestDeleteObjectNotFound(t *testing.T) { f, tf, codec := NewAPIFactory() tf.Printer = &testPrinter{} tf.Client = &fake.RESTClient{ Codec: codec, Client: fake.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": return &http.Response{StatusCode: 404, Body: stringBody("")}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } tf.Namespace = "test" buf := bytes.NewBuffer([]byte{}) cmd := NewCmdDelete(f, buf) options := &DeleteOptions{ Filenames: []string{"../../../examples/guestbook/redis-master-controller.yaml"}, } cmd.Flags().Set("cascade", "false") cmd.Flags().Set("output", "name") err := RunDelete(f, buf, cmd, []string{}, options) if err == nil || !errors.IsNotFound(err) { t.Errorf("unexpected error: expected NotFound, got %v", err) } }
func (s *ServiceController) persistUpdate(service *api.Service) error { var err error for i := 0; i < clientRetryCount; i++ { _, err = s.kubeClient.Services(service.Namespace).Update(service) if err == nil { return nil } // If the object no longer exists, we don't want to recreate it. Just bail // out so that we can process the delete, which we should soon be receiving // if we haven't already. if errors.IsNotFound(err) { glog.Infof("Not persisting update to service that no longer exists: %v", err) return nil } // TODO: Try to resolve the conflict if the change was unrelated to load // balancer status. For now, just rely on the fact that we'll // also process the update that caused the resource version to change. if errors.IsConflict(err) { glog.Infof("Not persisting update to service that has been changed since we received it: %v", err) return nil } glog.Warningf("Failed to persist updated LoadBalancerStatus to service %s after creating its external load balancer: %v", service.Name, err) time.Sleep(clientRetryInterval) } return err }
// getOrCreateTargetControllerWithClient looks for an existing controller with // sourceId. If found, the existing controller is returned with true // indicating that the controller already exists. If the controller isn't // found, a new one is created and returned along with false indicating the // controller was created. // // Existing controllers are validated to ensure their sourceIdAnnotation // matches sourceId; if there's a mismatch, an error is returned. func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { existingRc, err := r.existingController(controller) if err != nil { if !errors.IsNotFound(err) { // There was an error trying to find the controller; don't assume we // should create it. return nil, false, err } if controller.Spec.Replicas <= 0 { return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec) } // The controller wasn't found, so create it. if controller.Annotations == nil { controller.Annotations = map[string]string{} } controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas) controller.Annotations[sourceIdAnnotation] = sourceId controller.Spec.Replicas = 0 newRc, err := r.c.ReplicationControllers(r.ns).Create(controller) return newRc, false, err } // Validate and use the existing controller. annotations := existingRc.Annotations source := annotations[sourceIdAnnotation] _, ok := annotations[desiredReplicasAnnotation] if source != sourceId || !ok { return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) } return existingRc, true, nil }
// setEndpoints sets the endpoints for the given service. // in a multi-master scenario only the master will be publishing an endpoint. // see SchedulerServer.bootstrap. func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) error { // The setting we want to find. want := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip.String()}}, Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}}, }} ctx := api.NewDefaultContext() e, err := m.client.Endpoints(api.NamespaceValue(ctx)).Get(serviceName) createOrUpdate := m.client.Endpoints(api.NamespaceValue(ctx)).Update if err != nil { if errors.IsNotFound(err) { createOrUpdate = m.client.Endpoints(api.NamespaceValue(ctx)).Create } e = &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, }, } } if !reflect.DeepEqual(e.Subsets, want) { e.Subsets = want glog.Infof("setting endpoints for master service %q to %#v", serviceName, e) _, err = createOrUpdate(e) return err } // We didn't make any changes, no need to actually call update. return nil }
// this pod may be out of sync with respect to the API server registry: // this pod | apiserver registry // -------------|---------------------- // host=.* | 404 ; pod was deleted // host=.* | 5xx ; failed to sync, try again later? // host="" | host="" ; perhaps no updates to process? // host="" | host="..." ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?) // host="..." | host="" ; pod is no longer scheduled, does it need to be re-queued? // host="..." | host="..." ; perhaps no updates to process? // // TODO(jdef) this needs an integration test func (s *schedulingPlugin) reconcileTask(t *podtask.T) { log.V(1).Infof("reconcile pod %v, assigned to slave %q", t.Pod.Name, t.Spec.AssignedSlave) ctx := api.WithNamespace(api.NewDefaultContext(), t.Pod.Namespace) pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name) if err != nil { if errors.IsNotFound(err) { // attempt to delete if err = s.deleter.deleteOne(&Pod{Pod: &t.Pod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr { log.Errorf("failed to delete pod: %v: %v", t.Pod.Name, err) } } else { //TODO(jdef) other errors should probably trigger a retry (w/ backoff). //For now, drop the pod on the floor log.Warning("aborting reconciliation for pod %v: %v", t.Pod.Name, err) } return } log.Infof("pod %v scheduled on %q according to apiserver", pod.Name, pod.Spec.NodeName) if t.Spec.AssignedSlave != pod.Spec.NodeName { if pod.Spec.NodeName == "" { // pod is unscheduled. // it's possible that we dropped the pod in the scheduler error handler // because of task misalignment with the pod (task.Has(podtask.Launched) == true) podKey, err := podtask.MakePodKey(ctx, pod.Name) if err != nil { log.Error(err) return } s.api.Lock() defer s.api.Unlock() if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown { //TODO(jdef) reconcile the task log.Errorf("task already registered for pod %v", pod.Name) return } now := time.Now() log.V(3).Infof("reoffering pod %v", podKey) s.qr.reoffer(&Pod{ Pod: pod, deadline: &now, }) } else { // pod is scheduled. // not sure how this happened behind our backs. attempt to reconstruct // at least a partial podtask.T record. //TODO(jdef) reconcile the task log.Errorf("pod already scheduled: %v", pod.Name) } } else { //TODO(jdef) for now, ignore the fact that the rest of the spec may be different //and assume that our knowledge of the pod aligns with that of the apiserver log.Error("pod reconciliation does not support updates; not yet implemented") } }
func (t *Tester) TestUpdateFailsOnNotFound(valid runtime.Object) { _, _, err := t.storage.(rest.Updater).Update(t.TestContext(), valid) if err == nil { t.Errorf("Expected an error, but we didn't get one") } else if !errors.IsNotFound(err) { t.Errorf("Expected NotFound error, got '%v'", err) } }
func (t *Tester) testDeleteNonExist(obj runtime.Object) { objectMeta := t.getObjectMetaOrFail(obj) t.withStorageError(tools.EtcdErrorNotFound, func() { _, err := t.storage.(rest.GracefulDeleter).Delete(t.TestContext(), objectMeta.Name, nil) if err == nil || !errors.IsNotFound(err) { t.Errorf("unexpected error: %v", err) } }) }
func LoadExistingNextReplicationController(c client.ReplicationControllersNamespacer, namespace, newName string) (*api.ReplicationController, error) { if len(newName) == 0 { return nil, nil } newRc, err := c.ReplicationControllers(namespace).Get(newName) if err != nil && errors.IsNotFound(err) { return nil, nil } return newRc, err }
func (t *Tester) testGetNotFound(obj runtime.Object) { ctx := t.TestContext() t.setObjectMeta(obj, "foo2") _, err := t.storage.(rest.Creater).Create(ctx, obj) if err != nil { t.Errorf("unexpected error: %v", err) } _, err = t.storage.(rest.Getter).Get(ctx, "foo3") if !errors.IsNotFound(err) { t.Errorf("unexpected error returned: %#v", err) } }
func (l *lifecycle) Admit(a admission.Attributes) (err error) { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind() == "Namespace" && l.immortalNamespaces.Has(a.GetName()) { return errors.NewForbidden(a.GetKind(), a.GetName(), fmt.Errorf("namespace can never be deleted")) } defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return errors.NewInternalError(err) } mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return errors.NewInternalError(err) } if mapping.Scope.Name() != meta.RESTScopeNameNamespace { return nil } namespaceObj, exists, err := l.store.Get(&api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, }) if err != nil { return errors.NewInternalError(err) } // refuse to operate on non-existent namespaces if !exists { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not namespaceObj, err = l.client.Namespaces().Get(a.GetNamespace()) if err != nil { if errors.IsNotFound(err) { return err } return errors.NewInternalError(err) } } // ensure that we're not trying to create objects in terminating namespaces if a.GetOperation() == admission.Create { namespace := namespaceObj.(*api.Namespace) if namespace.Status.Phase != api.NamespaceTerminating { return nil } // TODO: This should probably not be a 403 return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace())) } return nil }
func deleteResourceQuotas(kubeClient client.Interface, ns string) error { resourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything()) if err != nil { return err } for i := range resourceQuotas.Items { err := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func deleteReplicationControllers(kubeClient client.Interface, ns string) error { items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func deleteIngress(expClient client.ExtensionsInterface, ns string) error { items, err := expClient.Ingress(ns).List(labels.Everything(), fields.Everything()) if err != nil { return err } for i := range items.Items { err := expClient.Ingress(ns).Delete(items.Items[i].Name, nil) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error { items, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
// syncBatch syncs pods statuses with the apiserver. func (m *manager) syncBatch() error { syncRequest := <-m.podStatusChannel pod := syncRequest.pod status := syncRequest.status var err error statusPod := &api.Pod{ ObjectMeta: pod.ObjectMeta, } // TODO: make me easier to express from client code statusPod, err = m.kubeClient.Pods(statusPod.Namespace).Get(statusPod.Name) if errors.IsNotFound(err) { glog.V(3).Infof("Pod %q was deleted on the server", pod.Name) return nil } if err == nil { if len(pod.UID) > 0 && statusPod.UID != pod.UID { glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", kubeletUtil.FormatPodName(pod)) return nil } statusPod.Status = status // TODO: handle conflict as a retry, make that easier too. statusPod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod) if err == nil { glog.V(3).Infof("Status for pod %q updated successfully", kubeletUtil.FormatPodName(pod)) if pod.DeletionTimestamp == nil { return nil } if !notRunning(pod.Status.ContainerStatuses) { glog.V(3).Infof("Pod %q is terminated, but some pods are still running", pod.Name) return nil } if err := m.kubeClient.Pods(statusPod.Namespace).Delete(statusPod.Name, api.NewDeleteOptions(0)); err == nil { glog.V(3).Infof("Pod %q fully terminated and removed from etcd", statusPod.Name) m.DeletePodStatus(pod.UID) return nil } } } // We failed to update status. In order to make sure we retry next time // we delete cached value. This may result in an additional update, but // this is ok. // Doing this synchronously will lead to a deadlock if the podStatusChannel // is full, and the pod worker holding the lock is waiting on this method // to clear the channel. Even if this delete never runs subsequent container // changes on the node should trigger updates. go m.DeletePodStatus(pod.UID) return fmt.Errorf("error updating status for pod %q: %v", kubeletUtil.FormatPodName(pod), err) }
func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *DescribeOptions) error { selector := cmdutil.GetFlagString(cmd, "selector") cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if len(args) == 0 && len(options.Filenames) == 0 { fmt.Fprint(out, "You must specify the type of resource to describe. ", valid_resources) return cmdutil.UsageError(cmd, "Required resource not specified.") } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, options.Filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(true, args...). Flatten(). Do() err = r.Err() if err != nil { return err } allErrs := []error{} infos, err := r.Infos() if err != nil { if apierrors.IsNotFound(err) && len(args) == 2 { return DescribeMatchingResources(mapper, typer, f, cmdNamespace, args[0], args[1], out, err) } allErrs = append(allErrs, err) } for _, info := range infos { mapping := info.ResourceMapping() describer, err := f.Describer(mapping) if err != nil { allErrs = append(allErrs, err) continue } s, err := describer.Describe(info.Namespace, info.Name) if err != nil { allErrs = append(allErrs, err) continue } fmt.Fprintf(out, "%s\n\n", s) } return utilerrors.NewAggregate(allErrs) }
func Rename(c client.ReplicationControllersNamespacer, rc *api.ReplicationController, newName string) error { oldName := rc.Name rc.Name = newName rc.ResourceVersion = "" _, err := c.ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return err } err = c.ReplicationControllers(rc.Namespace).Delete(oldName) if err != nil && !errors.IsNotFound(err) { return err } return nil }
func (t *Tester) TestDeleteNonExist(createFn func() runtime.Object) { existing := createFn() objectMeta, err := api.ObjectMetaFor(existing) if err != nil { t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing) } context := t.TestContext() t.withStorageError(&etcd.EtcdError{ErrorCode: tools.EtcdErrorCodeNotFound}, func() { _, err := t.storage.(rest.GracefulDeleter).Delete(context, objectMeta.Name, nil) if err == nil || !errors.IsNotFound(err) { t.Fatalf("Unexpected error: %v", err) } }) }
func (t *Tester) TestDeleteGracefulUsesZeroOnNil(existing runtime.Object, expectedGrace int64) { objectMeta, err := api.ObjectMetaFor(existing) if err != nil { t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing) } ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, nil) if err != nil { t.Errorf("unexpected error: %v", err) } if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); !errors.IsNotFound(err) { t.Errorf("unexpected error, object should exist: %v", err) } }
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.Pods(podNamespace).Get(podName) if errors.IsNotFound(err) { return false, nil } if err != nil { // This could be a connection error so we want to retry. return false, nil } if pod.Spec.NodeName == "" { return false, nil } return true, nil } }
func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, setFn SetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, "foo5") if err := setFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, nil) if err != nil { t.Errorf("unexpected error: %v", err) } if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); !errors.IsNotFound(err) { t.Errorf("unexpected error, object should not exist: %v", err) } }
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.Pods(podNamespace).Get(podName) if apierrors.IsNotFound(err) { return false, nil } if err != nil { // This could be a connection error so we want to retry, but log the error. glog.Errorf("Error when reading pod %q: %v", podName, err) return false, nil } if pod.Status.Phase != api.PodRunning { return false, nil } return true, nil } }