// makeImageStreamTagAdmissionUsageFunc returns a function that computes a resource usage for given image
// stream tag during admission.
func makeImageStreamTagAdmissionUsageFunc(isNamespacer osclient.ImageStreamsNamespacer) generic.UsageFunc {
	return func(object runtime.Object) kapi.ResourceList {
		ist, ok := object.(*imageapi.ImageStreamTag)
		if !ok {
			return kapi.ResourceList{}
		}

		res := map[kapi.ResourceName]resource.Quantity{
			imageapi.ResourceImageStreams: *resource.NewQuantity(0, resource.BinarySI),
		}

		isName, _, err := imageapi.ParseImageStreamTagName(ist.Name)
		if err != nil {
			utilruntime.HandleError(err)
			return kapi.ResourceList{}
		}

		is, err := isNamespacer.ImageStreams(ist.Namespace).Get(isName)
		if err != nil && !kerrors.IsNotFound(err) {
			utilruntime.HandleError(fmt.Errorf("failed to get image stream %s/%s: %v", ist.Namespace, isName, err))
		}
		if is == nil || kerrors.IsNotFound(err) {
			res[imageapi.ResourceImageStreams] = *resource.NewQuantity(1, resource.BinarySI)
		}

		return res
	}
}
예제 #2
0
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted.
func (bc *BuildController) CancelBuild(build *buildapi.Build) error {
	if !isBuildCancellable(build) {
		glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase)
		return nil
	}

	glog.V(4).Infof("Cancelling Build %s/%s.", build.Namespace, build.Name)

	pod, err := bc.PodManager.GetPod(build.Namespace, buildutil.GetBuildPodName(build))
	if err != nil {
		if !errors.IsNotFound(err) {
			return fmt.Errorf("Failed to get Pod for build %s/%s: %v", build.Namespace, build.Name, err)
		}
	} else {
		err := bc.PodManager.DeletePod(build.Namespace, pod)
		if err != nil && !errors.IsNotFound(err) {
			return fmt.Errorf("Couldn't delete Build Pod %s/%s: %v", build.Namespace, pod.Name, err)
		}
	}

	build.Status.Phase = buildapi.BuildPhaseCancelled
	now := util.Now()
	build.Status.CompletionTimestamp = &now
	if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil {
		return fmt.Errorf("Failed to update Build %s/%s: %v", build.Namespace, build.Name, err)
	}

	glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name)
	return nil
}
예제 #3
0
// Ensure that when a deploymentRollback is created for a deployment that has already been deleted
// by the API server, API server returns not-found error.
func TestEtcdCreateDeploymentRollbackNoDeployment(t *testing.T) {
	storage, server := newStorage(t)
	defer server.Terminate(t)
	rollbackStorage := storage.Rollback
	ctx := api.WithNamespace(api.NewContext(), namespace)

	key, _ := storage.Deployment.KeyFunc(ctx, name)
	key = etcdtest.AddPrefix(key)
	_, err := rollbackStorage.Create(ctx, &extensions.DeploymentRollback{
		Name:               name,
		UpdatedAnnotations: map[string]string{},
		RollbackTo:         extensions.RollbackConfig{Revision: 1},
	})
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(etcderrors.InterpretGetError(err, extensions.Resource("deployments"), name)) {
		t.Fatalf("Unexpected error returned: %#v", err)
	}

	_, err = storage.Deployment.Get(ctx, name)
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(etcderrors.InterpretGetError(err, extensions.Resource("deployments"), name)) {
		t.Fatalf("Unexpected error: %v", err)
	}
}
예제 #4
0
func (f *Framework) deleteFederationNs() {
	ns := f.FederationNamespace
	By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
	timeout := 5 * time.Minute
	if f.NamespaceDeletionTimeout != 0 {
		timeout = f.NamespaceDeletionTimeout
	}

	clientset := f.FederationClientset_1_4
	// First delete the namespace from federation apiserver.
	if err := clientset.Core().Namespaces().Delete(ns.Name, &api.DeleteOptions{}); err != nil {
		Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
	}
	// Verify that it got deleted.
	err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
		if _, err := clientset.Core().Namespaces().Get(ns.Name); err != nil {
			if apierrs.IsNotFound(err) {
				return true, nil
			}
			Logf("Error while waiting for namespace to be terminated: %v", err)
			return false, nil
		}
		return false, nil
	})
	if err != nil {
		if !apierrs.IsNotFound(err) {
			Failf("Couldn't delete ns %q: %s", ns.Name, err)
		} else {
			Logf("Namespace %v was already deleted", ns.Name)
		}
	}

	// TODO: Delete the namespace from underlying clusters.
}
// ReplaceChangedRoles will reconcile all the changed roles back to the recommended bootstrap policy
func (o *ReconcileClusterRolesOptions) ReplaceChangedRoles(changedRoles []*authorizationapi.ClusterRole) error {
	for i := range changedRoles {
		role, err := o.RoleClient.Get(changedRoles[i].Name)
		if err != nil && !kapierrors.IsNotFound(err) {
			return err
		}

		if kapierrors.IsNotFound(err) {
			createdRole, err := o.RoleClient.Create(changedRoles[i])
			if err != nil {
				return err
			}

			fmt.Fprintf(o.Out, "clusterrole/%s\n", createdRole.Name)
			continue
		}

		role.Rules = changedRoles[i].Rules
		updatedRole, err := o.RoleClient.Update(role)
		if err != nil {
			return err
		}

		fmt.Fprintf(o.Out, "clusterrole/%s\n", updatedRole.Name)
	}

	return nil
}
예제 #6
0
// Ensure that when scheduler creates a binding for a pod that has already been deleted
// by the API server, API server returns not-found error.
func TestEtcdCreateBindingNoPod(t *testing.T) {
	storage, bindingStorage, _, server := newStorage(t)
	defer server.Terminate(t)
	ctx := api.NewDefaultContext()

	key, _ := storage.KeyFunc(ctx, "foo")
	key = etcdtest.AddPrefix(key)
	// Assume that a pod has undergone the following:
	// - Create (apiserver)
	// - Schedule (scheduler)
	// - Delete (apiserver)
	_, err := bindingStorage.Create(ctx, &api.Binding{
		ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"},
		Target:     api.ObjectReference{Name: "machine"},
	})
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) {
		t.Fatalf("Unexpected error returned: %#v", err)
	}

	_, err = storage.Get(ctx, "foo")
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) {
		t.Fatalf("Unexpected error: %v", err)
	}
}
예제 #7
0
func TestStoreDeleteCollection(t *testing.T) {
	podA := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
	podB := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}

	testContext := api.WithNamespace(api.NewContext(), "test")
	server, registry := NewTestGenericStoreRegistry(t)
	defer server.Terminate(t)

	if _, err := registry.Create(testContext, podA); err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if _, err := registry.Create(testContext, podB); err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// Delete all pods.
	deleted, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{})
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	deletedPods := deleted.(*api.PodList)
	if len(deletedPods.Items) != 2 {
		t.Errorf("Unexpected number of pods deleted: %d, expected: 2", len(deletedPods.Items))
	}

	if _, err := registry.Get(testContext, podA.Name); !errors.IsNotFound(err) {
		t.Errorf("Unexpected error: %v", err)
	}
	if _, err := registry.Get(testContext, podB.Name); !errors.IsNotFound(err) {
		t.Errorf("Unexpected error: %v", err)
	}
}
예제 #8
0
// HandleBuildDeletion deletes a build pod if the corresponding build has been deleted
func (bc *BuildDeleteController) HandleBuildDeletion(build *buildapi.Build) error {
	glog.V(4).Infof("Handling deletion of build %s", build.Name)
	if build.Spec.Strategy.JenkinsPipelineStrategy != nil {
		glog.V(4).Infof("Ignoring build with jenkins pipeline strategy")
		return nil
	}
	podName := buildapi.GetBuildPodName(build)
	pod, err := bc.PodManager.GetPod(build.Namespace, podName)
	if err != nil && !errors.IsNotFound(err) {
		glog.V(2).Infof("Failed to find pod with name %s for build %s in namespace %s due to error: %v", podName, build.Name, build.Namespace, err)
		return err
	}
	if pod == nil {
		glog.V(2).Infof("Did not find pod with name %s for build %s in namespace %s", podName, build.Name, build.Namespace)
		return nil
	}
	if buildName := buildapi.GetBuildName(pod); buildName != build.Name {
		glog.V(2).Infof("Not deleting pod %s/%s because the build label %s does not match the build name %s", pod.Namespace, podName, buildName, build.Name)
		return nil
	}
	err = bc.PodManager.DeletePod(build.Namespace, pod)
	if err != nil && !errors.IsNotFound(err) {
		glog.V(2).Infof("Failed to delete pod %s/%s for build %s due to error: %v", build.Namespace, podName, build.Name, err)
		return err
	}
	return nil
}
예제 #9
0
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted.
func (bc *BuildController) CancelBuild(build *buildapi.Build) error {
	if !isBuildCancellable(build) {
		glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase)
		return nil
	}

	glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name)

	pod, err := bc.PodManager.GetPod(build.Namespace, buildapi.GetBuildPodName(build))
	if err != nil {
		if !errors.IsNotFound(err) {
			return fmt.Errorf("Failed to get pod for build %s/%s: %v", build.Namespace, build.Name, err)
		}
	} else {
		err := bc.PodManager.DeletePod(build.Namespace, pod)
		if err != nil && !errors.IsNotFound(err) {
			return fmt.Errorf("Couldn't delete build pod %s/%s: %v", build.Namespace, pod.Name, err)
		}
	}

	build.Status.Phase = buildapi.BuildPhaseCancelled
	now := unversioned.Now()
	build.Status.CompletionTimestamp = &now
	// set the status details for the cancelled build before updating the build
	// object.
	build.Status.Reason = buildapi.StatusReasonCancelledBuild
	build.Status.Message = buildapi.StatusMessageCancelledBuild
	if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil {
		return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err)
	}

	glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name)
	return nil
}
// makeImageStreamImportAdmissionUsageFunc retuns a function for computing a usage of an image stream import.
func makeImageStreamImportAdmissionUsageFunc(isNamespacer osclient.ImageStreamsNamespacer) generic.UsageFunc {
	return func(object runtime.Object) kapi.ResourceList {
		isi, ok := object.(*imageapi.ImageStreamImport)
		if !ok {
			return kapi.ResourceList{}
		}

		usage := map[kapi.ResourceName]resource.Quantity{
			imageapi.ResourceImageStreams: *resource.NewQuantity(0, resource.DecimalSI),
		}

		if !isi.Spec.Import || (len(isi.Spec.Images) == 0 && isi.Spec.Repository == nil) {
			return usage
		}

		is, err := isNamespacer.ImageStreams(isi.Namespace).Get(isi.Name)
		if err != nil && !kerrors.IsNotFound(err) {
			utilruntime.HandleError(fmt.Errorf("failed to list image streams: %v", err))
		}
		if is == nil || kerrors.IsNotFound(err) {
			usage[imageapi.ResourceImageStreams] = *resource.NewQuantity(1, resource.DecimalSI)
		}

		return usage
	}
}
예제 #11
0
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error {
	oldName := rc.Name
	rc.Name = newName
	rc.ResourceVersion = ""
	// First delete the oldName RC and orphan its pods.
	trueVar := true
	err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar})
	if err != nil && !errors.IsNotFound(err) {
		return err
	}
	err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
		_, err := c.ReplicationControllers(rc.Namespace).Get(oldName)
		if err == nil {
			return false, nil
		} else if errors.IsNotFound(err) {
			return true, nil
		} else {
			return false, err
		}
	})
	if err != nil {
		return err
	}
	// Then create the same RC with the new name.
	_, err = c.ReplicationControllers(rc.Namespace).Create(rc)
	if err != nil {
		return err
	}
	return nil
}
예제 #12
0
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from
// underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
	ingress := createIngressOrFail(clientset, nsName)
	ingressName := ingress.Name
	// Check subclusters if the ingress was created there.
	By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName))
	waitForIngressShardsOrFail(nsName, ingress, clusters)

	By(fmt.Sprintf("Deleting ingress %s", ingressName))
	deleteIngressOrFail(clientset, nsName, ingressName, orphanDependents)

	By(fmt.Sprintf("Verifying ingresses %s in underlying clusters", ingressName))
	errMessages := []string{}
	// ingress should be present in underlying clusters unless orphanDependents is false.
	shouldExist := orphanDependents == nil || *orphanDependents == true
	for clusterName, clusterClientset := range clusters {
		_, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName)
		if shouldExist && errors.IsNotFound(err) {
			errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName))
		} else if !shouldExist && !errors.IsNotFound(err) {
			errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for ingress %s in cluster %s, got error: %v", ingressName, clusterName, err))
		}
	}
	if len(errMessages) != 0 {
		framework.Failf("%s", strings.Join(errMessages, "; "))
	}
}
예제 #13
0
// Cleanup up pvs and pvcs in multi-pv-pvc test cases. All entries found in the pv and
// claims maps are deleted.
// Note: this is the only code that deletes PV objects.
func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {

	if c != nil && len(ns) > 0 {
		for pvcKey := range claims {
			_, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name)
			if !apierrs.IsNotFound(err) {
				Expect(err).NotTo(HaveOccurred())
				framework.Logf("   deleting PVC %v ...", pvcKey)
				err = c.Core().PersistentVolumeClaims(pvcKey.Namespace).Delete(pvcKey.Name, nil)
				Expect(err).NotTo(HaveOccurred())
				framework.Logf("   deleted  PVC %v", pvcKey)
			}
			delete(claims, pvcKey)
		}

		for name := range pvols {
			_, err := c.Core().PersistentVolumes().Get(name)
			if !apierrs.IsNotFound(err) {
				Expect(err).NotTo(HaveOccurred())
				framework.Logf("   deleting PV %v ...", name)
				err = c.Core().PersistentVolumes().Delete(name, nil)
				Expect(err).NotTo(HaveOccurred())
				framework.Logf("   deleted  PV %v", name)
			}
			delete(pvols, name)
		}
	}
}
예제 #14
0
// Ensure that when scheduler creates a binding for a pod that has already been deleted
// by the API server, API server returns not-found error.
func TestEtcdCreateBindingNoPod(t *testing.T) {
	storage, bindingStorage, _, server := newStorage(t)
	defer server.Terminate(t)
	defer storage.Store.DestroyFunc()
	ctx := api.NewDefaultContext()

	// Assume that a pod has undergone the following:
	// - Create (apiserver)
	// - Schedule (scheduler)
	// - Delete (apiserver)
	_, err := bindingStorage.Create(ctx, &api.Binding{
		ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"},
		Target:     api.ObjectReference{Name: "machine"},
	})
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(storeerr.InterpretGetError(err, api.Resource("pods"), "foo")) {
		t.Fatalf("Unexpected error returned: %#v", err)
	}

	_, err = storage.Get(ctx, "foo", &metav1.GetOptions{})
	if err == nil {
		t.Fatalf("Expected not-found-error but got nothing")
	}
	if !errors.IsNotFound(storeerr.InterpretGetError(err, api.Resource("pods"), "foo")) {
		t.Fatalf("Unexpected error: %v", err)
	}
}
예제 #15
0
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient federation_release_1_3.Interface) error {
	service := cachedService.lastState
	glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name)
	var err error
	for i := 0; i < clientRetryCount; i++ {
		_, err := fedClient.Core().Services(service.Namespace).Get(service.Name)
		if errors.IsNotFound(err) {
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		}
		_, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service)
		if err == nil {
			glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name)
			return nil
		}
		if errors.IsNotFound(err) {
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		}
		if errors.IsConflict(err) {
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
			return err
		}
		time.Sleep(cachedService.nextFedUpdateDelay())
	}
	return err
}
예제 #16
0
// Delete the passed in pod.
func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error {

	framework.Logf("Deleting pod %v", pod.Name)
	err := c.Pods(ns).Delete(pod.Name, nil)
	if err != nil {
		return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
	}

	// Wait for pod to terminate
	err = f.WaitForPodTerminated(pod.Name, "")
	if err != nil && !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v will not teminate: %v", pod.Name, err)
	}

	// Re-get the pod to double check that it has been deleted; expect err
	// Note: Get() writes a log error if the pod is not found
	_, err = c.Pods(ns).Get(pod.Name)
	if err == nil {
		return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
	}
	if !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v has been deleted but still exists: %v", pod.Name, err)
	}

	framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
	return nil
}
예제 #17
0
func TestEtcdDelete(t *testing.T) {
	podA := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Spec:       api.PodSpec{NodeName: "machine"},
	}

	testContext := api.WithNamespace(api.NewContext(), "test")
	server, registry := NewTestGenericEtcdRegistry(t)
	defer server.Terminate(t)

	// test failure condition
	_, err := registry.Delete(testContext, podA.Name, nil)
	if !errors.IsNotFound(err) {
		t.Errorf("Unexpected error: %v", err)
	}

	// create pod
	_, err = registry.Create(testContext, podA)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// delete object
	_, err = registry.Delete(testContext, podA.Name, nil)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// try to get a item which should be deleted
	_, err = registry.Get(testContext, podA.Name)
	if !errors.IsNotFound(err) {
		t.Errorf("Unexpected error: %v", err)
	}
}
// ServerGroups returns the supported groups, with information like supported versions and the
// preferred version.
func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) {
	// Get the groupVersions exposed at /api
	v := &unversioned.APIVersions{}
	err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
	apiGroup := unversioned.APIGroup{}
	if err == nil {
		apiGroup = apiVersionsToAPIGroup(v)
	}
	if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
		return nil, err
	}

	// Get the groupVersions exposed at /apis
	apiGroupList = &unversioned.APIGroupList{}
	err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
	if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
		return nil, err
	}
	// to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
	if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
		apiGroupList = &unversioned.APIGroupList{}
	}

	// append the group retrieved from /api to the list
	apiGroupList.Groups = append(apiGroupList.Groups, apiGroup)
	return apiGroupList, nil
}
예제 #19
0
func (o RolloutLatestOptions) RunRolloutLatest() error {
	info := o.infos[0]
	config, ok := info.Object.(*deployapi.DeploymentConfig)
	if !ok {
		return fmt.Errorf("%s is not a deployment config", info.Name)
	}

	// TODO: Consider allowing one-off deployments for paused configs
	// See https://github.com/openshift/origin/issues/9903
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}

	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName)
	switch {
	case err == nil:
		// Reject attempts to start a concurrent deployment.
		if !deployutil.IsTerminatedDeployment(deployment) {
			status := deployutil.DeploymentStatusFor(deployment)
			return fmt.Errorf("#%d is already in progress (%s).", config.Status.LatestVersion, status)
		}
	case !kerrors.IsNotFound(err):
		return err
	}

	dc := config
	if !o.DryRun {
		request := &deployapi.DeploymentRequest{
			Name:   config.Name,
			Latest: !o.again,
			Force:  true,
		}

		dc, err = o.oc.DeploymentConfigs(config.Namespace).Instantiate(request)

		// Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing
		// latestVersion on them.
		if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
			config.Status.LatestVersion++
			dc, err = o.oc.DeploymentConfigs(config.Namespace).Update(config)
		}

		if err != nil {
			return err
		}

		info.Refresh(dc, true)
	}

	if o.output == "revision" {
		fmt.Fprintf(o.out, fmt.Sprintf("%d", dc.Status.LatestVersion))
		return nil
	}

	kcmdutil.PrintSuccess(o.mapper, o.output == "name", o.out, info.Mapping.Resource, info.Name, o.DryRun, "rolled out")
	return nil
}
예제 #20
0
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.dClient.Deployments(namespace)
	replicaSets := reaper.rsClient.ReplicaSets(namespace)
	rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout}

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {
		return deployments.Get(name, metav1.GetOptions{})
	}, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil {
		return err
	}

	// Do not cascade deletion for overlapping deployments.
	if len(deployment.Annotations[deploymentutil.OverlapAnnotation]) > 0 {
		return deployments.Delete(name, nil)
	}

	// Stop all replica sets.
	selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			scaleGetErr, ok := err.(ScaleError)
			if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) {
				continue
			}
			errList = append(errList, err)
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
	return deployments.Delete(name, nil)
}
예제 #21
0
// syncNamespace makes namespace life-cycle decisions
func syncNamespace(kubeClient client.Interface, experimentalMode bool, namespace api.Namespace) (err error) {
	if namespace.DeletionTimestamp == nil {
		return nil
	}
	glog.V(4).Infof("Syncing namespace %s", namespace.Name)

	// if there is a deletion timestamp, and the status is not terminating, then update status
	if !namespace.DeletionTimestamp.IsZero() && namespace.Status.Phase != api.NamespaceTerminating {
		newNamespace := api.Namespace{}
		newNamespace.ObjectMeta = namespace.ObjectMeta
		newNamespace.Status = namespace.Status
		newNamespace.Status.Phase = api.NamespaceTerminating
		result, err := kubeClient.Namespaces().Status(&newNamespace)
		if err != nil {
			return err
		}
		// work with the latest copy so we can proceed to clean up right away without another interval
		namespace = *result
	}

	// if the namespace is already finalized, delete it
	if finalized(namespace) {
		err = kubeClient.Namespaces().Delete(namespace.Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
		return nil
	}

	// there may still be content for us to remove
	estimate, err := deleteAllContent(kubeClient, experimentalMode, namespace.Name, *namespace.DeletionTimestamp)
	if err != nil {
		return err
	}
	if estimate > 0 {
		return &contentRemainingError{estimate}
	}

	// we have removed content, so mark it finalized by us
	result, err := finalize(kubeClient, namespace)
	if err != nil {
		return err
	}

	// now check if all finalizers have reported that we delete now
	if finalized(*result) {
		err = kubeClient.Namespaces().Delete(namespace.Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}

	return nil
}
예제 #22
0
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) {
	tokenName := ""
	token := ""

	findToken := func() (bool, error) {
		user, err := c.Core().ServiceAccounts(ns).Get(name)
		if errors.IsNotFound(err) {
			return false, nil
		}
		if err != nil {
			return false, err
		}

		for _, ref := range user.Secrets {
			secret, err := c.Core().Secrets(ns).Get(ref.Name)
			if errors.IsNotFound(err) {
				continue
			}
			if err != nil {
				return false, err
			}
			if secret.Type != api.SecretTypeServiceAccountToken {
				continue
			}
			name := secret.Annotations[api.ServiceAccountNameKey]
			uid := secret.Annotations[api.ServiceAccountUIDKey]
			tokenData := secret.Data[api.ServiceAccountTokenKey]
			if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
				tokenName = secret.Name
				token = string(tokenData)
				return true, nil
			}
		}

		return false, nil
	}

	if shouldWait {
		err := wait.Poll(time.Second, 10*time.Second, findToken)
		if err != nil {
			return "", "", err
		}
	} else {
		ok, err := findToken()
		if err != nil {
			return "", "", err
		}
		if !ok {
			return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
		}
	}
	return tokenName, token, nil
}
// ReplaceChangedRoleBindings will reconcile all the changed system role bindings back to the recommended bootstrap policy
func (o *ReconcileClusterRoleBindingsOptions) ReplaceChangedRoleBindings(changedRoleBindings []*authorizationapi.ClusterRoleBinding) error {
	errs := []error{}
	for i := range changedRoleBindings {
		roleBinding, err := o.RoleBindingClient.Get(changedRoleBindings[i].Name)
		if err != nil && !kapierrors.IsNotFound(err) {
			errs = append(errs, err)
			continue
		}

		if kapierrors.IsNotFound(err) {
			createdRoleBinding, err := o.RoleBindingClient.Create(changedRoleBindings[i])
			if err != nil {
				errs = append(errs, err)
				continue
			}
			fmt.Fprintf(o.Out, "clusterrolebinding/%s\n", createdRoleBinding.Name)
			continue
		}

		// RoleRef is immutable, to reset this, we have to delete/recreate
		if !kapi.Semantic.DeepEqual(roleBinding.RoleRef, changedRoleBindings[i].RoleRef) {
			roleBinding.RoleRef = changedRoleBindings[i].RoleRef
			roleBinding.Subjects = changedRoleBindings[i].Subjects

			// TODO: for extra credit, determine whether the right to delete/create this rolebinding for the current user came from this rolebinding before deleting it
			err := o.RoleBindingClient.Delete(roleBinding.Name)
			if err != nil {
				errs = append(errs, err)
				continue
			}
			createdRoleBinding, err := o.RoleBindingClient.Create(changedRoleBindings[i])
			if err != nil {
				errs = append(errs, err)
				continue
			}
			fmt.Fprintf(o.Out, "clusterrolebinding/%s\n", createdRoleBinding.Name)
			continue
		}

		roleBinding.Subjects = changedRoleBindings[i].Subjects
		updatedRoleBinding, err := o.RoleBindingClient.Update(roleBinding)
		if err != nil {
			errs = append(errs, err)
			continue
		}

		fmt.Fprintf(o.Out, "clusterrolebinding/%s\n", updatedRoleBinding.Name)
	}

	return kutilerrors.NewAggregate(errs)
}
예제 #24
0
// syncNamespace orchestrates deletion of a Namespace and its associated content.
func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersions, namespace *api.Namespace) (err error) {
	if namespace.DeletionTimestamp == nil {
		return nil
	}
	glog.V(4).Infof("Syncing namespace %s", namespace.Name)

	// ensure that the status is up to date on the namespace
	// if we get a not found error, we assume the namespace is truly gone
	namespace, err = retryOnConflictError(kubeClient, namespace, updateNamespaceStatusFunc)
	if err != nil {
		if errors.IsNotFound(err) {
			return nil
		}
		return err
	}

	// if the namespace is already finalized, delete it
	if finalized(namespace) {
		err = kubeClient.Namespaces().Delete(namespace.Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
		return nil
	}

	// there may still be content for us to remove
	estimate, err := deleteAllContent(kubeClient, versions, namespace.Name, *namespace.DeletionTimestamp)
	if err != nil {
		return err
	}
	if estimate > 0 {
		return &contentRemainingError{estimate}
	}

	// we have removed content, so mark it finalized by us
	result, err := retryOnConflictError(kubeClient, namespace, finalizeNamespaceFunc)
	if err != nil {
		return err
	}

	// now check if all finalizers have reported that we delete now
	if finalized(result) {
		err = kubeClient.Namespaces().Delete(namespace.Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}

	return nil
}
예제 #25
0
// Get retrieves the object from the Namespace and Name fields
func (i *Info) Get() (err error) {
	obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export)
	if err != nil {
		if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != api.NamespaceDefault && i.Namespace != api.NamespaceAll {
			err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do().Error()
			if err2 != nil && errors.IsNotFound(err2) {
				return err2
			}
		}
		return err
	}
	i.Object = obj
	i.ResourceVersion, _ = i.Mapping.MetadataAccessor.ResourceVersion(obj)
	return nil
}
예제 #26
0
func (plugin *OsdnNode) getLocalSubnet() (string, error) {
	var subnet *osapi.HostSubnet
	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   2,
		Steps:    8,
	}
	err := utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		var err error
		subnet, err = plugin.osClient.HostSubnets().Get(plugin.hostName)
		if err == nil {
			return true, nil
		} else if kapierrors.IsNotFound(err) {
			glog.Warningf("Could not find an allocated subnet for node: %s, Waiting...", plugin.hostName)
			return false, nil
		} else {
			return false, err
		}
	})
	if err != nil {
		return "", fmt.Errorf("Failed to get subnet for this host: %s, error: %v", plugin.hostName, err)
	}

	if err = plugin.networkInfo.validateNodeIP(subnet.HostIP); err != nil {
		return "", fmt.Errorf("Failed to validate own HostSubnet: %v", err)
	}

	return subnet.Subnet, nil
}
예제 #27
0
func (e *exists) Admit(a admission.Attributes) (err error) {
	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		return nil
	}

	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := e.store.Get(namespace)
	if err != nil {
		return errors.NewInternalError(err)
	}
	if exists {
		return nil
	}

	// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
	_, err = e.client.Core().Namespaces().Get(a.GetNamespace())
	if err != nil {
		if errors.IsNotFound(err) {
			return err
		}
		return errors.NewInternalError(err)
	}

	return nil
}
예제 #28
0
func TestUpdateError(t *testing.T) {
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})

	storage := makeTestStorage()
	obj, err := storage.Create(ctx, &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-different"},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
		return
	}
	original := obj.(*authorizationapi.RoleBinding)

	roleBinding := &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-roleBinding", ResourceVersion: original.ResourceVersion},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	}

	_, _, err = storage.Update(ctx, roleBinding)
	if err == nil {
		t.Errorf("Missing expected error")
		return
	}
	if !kapierrors.IsNotFound(err) {
		t.Errorf("Unexpected error %v", err)
	}
}
// listCollection will list the items in the specified namespace
// it returns the following:
//  the list of items in the collection (if found)
//  a boolean if the operation is supported
//  an error if the operation is supported but could not be completed.
func listCollection(
	dynamicClient *dynamic.Client,
	opCache operationNotSupportedCache,
	gvr unversioned.GroupVersionResource,
	namespace string,
) (*runtime.UnstructuredList, bool, error) {
	glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)

	key := operationKey{op: operationList, gvr: gvr}
	if !opCache.isSupported(key) {
		glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
		return nil, false, nil
	}

	apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
	unstructuredList, err := dynamicClient.Resource(&apiResource, namespace).List(v1.ListOptions{})
	if err == nil {
		return unstructuredList, true, nil
	}

	// this is strange, but we need to special case for both MethodNotSupported and NotFound errors
	// TODO: https://github.com/kubernetes/kubernetes/issues/22413
	// we have a resource returned in the discovery API that supports no top-level verbs:
	//  /apis/extensions/v1beta1/namespaces/default/replicationcontrollers
	// when working with this resource type, we will get a literal not found error rather than expected method not supported
	// remember next time that this resource does not support delete collection...
	if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
		glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
		opCache[key] = true
		return nil, false, nil
	}

	return nil, true, err
}
예제 #30
0
func (s *ServiceController) persistUpdate(service *api.Service) error {
	var err error
	for i := 0; i < clientRetryCount; i++ {
		_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)

		switch {
		case err == nil:
			return nil
		case errors.IsNotFound(err):
			// If the object no longer exists, we don't want to recreate it. Just bail
			// out so that we can process the delete, which we should soon be receiving
			// if we haven't already.
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		case errors.IsConflict(err):
			// TODO: Try to resolve the conflict if the change was unrelated to load
			// balancer status. For now, just rely on the fact that we'll
			// also process the update that caused the resource version to change.
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
			return nil
		}

		glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
			service.Namespace, service.Name, err)
		time.Sleep(clientRetryInterval)
	}
	return err
}