Esempio n. 1
0
func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c, Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c, Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c, Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c, Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c, Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Esempio n. 2
0
func cleanupDensityTest(dtc DensityTestConfig) {
	defer GinkgoRecover()
	By("Deleting created Collections")
	// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
	for i := range dtc.Configs {
		name := dtc.Configs[i].GetName()
		namespace := dtc.Configs[i].GetNamespace()
		kind := dtc.Configs[i].GetKind()
		// TODO: Remove Deployment guard once GC is implemented for Deployments.
		if framework.TestContext.GarbageCollectorEnabled && kind != extensions.Kind("Deployment") {
			By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
			err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
			framework.ExpectNoError(err)
		} else {
			By(fmt.Sprintf("Cleaning up the %v and pods", kind))
			err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
			framework.ExpectNoError(err)
		}
	}

	// Delete all secrets
	for i := range dtc.SecretConfigs {
		dtc.SecretConfigs[i].Stop()
	}

	for i := range dtc.DaemonConfigs {
		framework.ExpectNoError(framework.DeleteResourceAndPods(
			dtc.ClientSet,
			dtc.InternalClientset,
			extensions.Kind("DaemonSet"),
			dtc.DaemonConfigs[i].Namespace,
			dtc.DaemonConfigs[i].Name,
		))
	}
}
Esempio n. 3
0
func generateConfigsForGroup(
	nss []*v1.Namespace, groupName string, size, count int, image string, command []string, kind schema.GroupKind) []testutils.RunObjectConfig {
	configs := make([]testutils.RunObjectConfig, 0, count)
	for i := 1; i <= count; i++ {
		baseConfig := &testutils.RCConfig{
			Client:         nil, // this will be overwritten later
			InternalClient: nil, // this will be overwritten later
			Name:           groupName + "-" + strconv.Itoa(i),
			Namespace:      nss[i%len(nss)].Name,
			Timeout:        10 * time.Minute,
			Image:          image,
			Command:        command,
			Replicas:       size,
			CpuRequest:     10,       // 0.01 core
			MemRequest:     26214400, // 25MB
		}
		var config testutils.RunObjectConfig
		switch kind {
		case api.Kind("ReplicationController"):
			config = baseConfig
		case extensions.Kind("ReplicaSet"):
			config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
		case extensions.Kind("Deployment"):
			config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
		default:
			framework.Failf("Unsupported kind for config creation: %v", kind)
		}
		configs = append(configs, config)
	}
	return configs
}
Esempio n. 4
0
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c.Core()}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c.Core()}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil

	case apps.Kind("StatefulSet"):
		return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Esempio n. 5
0
func generateConfigsForGroup(
	nss []*v1.Namespace,
	groupName string,
	size, count int,
	image string,
	command []string,
	kind schema.GroupKind,
	secretsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) {
	configs := make([]testutils.RunObjectConfig, 0, count)
	secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
	for i := 1; i <= count; i++ {
		namespace := nss[i%len(nss)].Name
		secretNames := make([]string, 0, secretsPerPod)

		for j := 0; j < secretsPerPod; j++ {
			secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
			secretConfigs = append(secretConfigs, &testutils.SecretConfig{
				Content:   map[string]string{"foo": "bar"},
				Client:    nil, // this will be overwritten later
				Name:      secretName,
				Namespace: namespace,
				LogFunc:   framework.Logf,
			})
			secretNames = append(secretNames, secretName)
		}

		baseConfig := &testutils.RCConfig{
			Client:         nil, // this will be overwritten later
			InternalClient: nil, // this will be overwritten later
			Name:           groupName + "-" + strconv.Itoa(i),
			Namespace:      namespace,
			Timeout:        10 * time.Minute,
			Image:          image,
			Command:        command,
			Replicas:       size,
			CpuRequest:     10,       // 0.01 core
			MemRequest:     26214400, // 25MB
			SecretNames:    secretNames,
		}

		var config testutils.RunObjectConfig
		switch kind {
		case api.Kind("ReplicationController"):
			config = baseConfig
		case extensions.Kind("ReplicaSet"):
			config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
		case extensions.Kind("Deployment"):
			config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
		case batch.Kind("Job"):
			config = &testutils.JobConfig{RCConfig: *baseConfig}
		default:
			framework.Failf("Unsupported kind for config creation: %v", kind)
		}
		configs = append(configs, config)
	}
	return configs, secretConfigs
}
Esempio n. 6
0
func (f *ring0Factory) CanBeAutoscaled(kind schema.GroupKind) error {
	switch kind {
	case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
		// nothing to do here
	default:
		return fmt.Errorf("cannot autoscale a %v", kind)
	}
	return nil
}
Esempio n. 7
0
func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c}, nil
	case extensions.Kind("Job"):
		return &JobScaler{c.Extensions()}, nil
	case extensions.Kind("Deployment"):
		return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Esempio n. 8
0
func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c}, nil
	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetScaler{c.Extensions()}, nil
	case extensions.Kind("Job"):
		return &JobScaler{c.Extensions()}, nil
		// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
		// case extensions.Kind("Deployment"):
		// 	return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Esempio n. 9
0
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c.Core()}, nil
	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetScaler{c.Extensions()}, nil
	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
	case apps.Kind("StatefulSet"):
		return &StatefulSetScaler{c.Apps()}, nil
	case extensions.Kind("Deployment"):
		return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Esempio n. 10
0
// Update scales the DeploymentConfig for the given Scale subresource, returning the updated Scale.
func (r *ScaleREST) Update(ctx kapi.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
	deploymentConfig, err := r.registry.GetDeploymentConfig(ctx, name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("scale"), name)
	}

	old := api.ScaleFromConfig(deploymentConfig)
	obj, err := objInfo.UpdatedObject(ctx, old)
	if err != nil {
		return nil, false, err
	}

	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	deploymentConfig.Spec.Replicas = scale.Spec.Replicas
	if err := r.registry.UpdateDeploymentConfig(ctx, deploymentConfig); err != nil {
		return nil, false, err
	}

	return scale, false, nil
}
Esempio n. 11
0
func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentStatusViewer{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
Esempio n. 12
0
func (r *ScaleREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
	rc, err := (*r.registry).GetController(ctx, name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("replicationcontrollers/scale"), name)
	}
	oldScale := scaleFromRC(rc)

	obj, err := objInfo.UpdatedObject(ctx, oldScale)

	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	rc.Spec.Replicas = scale.Spec.Replicas
	rc.ResourceVersion = scale.ResourceVersion
	rc, err = (*r.registry).UpdateController(ctx, rc)
	if err != nil {
		return nil, false, errors.NewConflict(extensions.Resource("replicationcontrollers/scale"), scale.Name, err)
	}
	return scaleFromRC(rc), false, nil
}
Esempio n. 13
0
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("expected input object type to be Scale, but %T", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	deployment, err := r.registry.GetDeployment(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), scale.Name)
	}
	deployment.Spec.Replicas = scale.Spec.Replicas
	deployment.ResourceVersion = scale.ResourceVersion
	deployment, err = r.registry.UpdateDeployment(ctx, deployment)
	if err != nil {
		return nil, false, err
	}
	newScale, err := scaleFromDeployment(deployment)
	if err != nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
	}
	return newScale, false, nil
}
Esempio n. 14
0
func RollbackerFor(kind unversioned.GroupKind, c client.Interface) (Rollbacker, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentRollbacker{c}, nil
	}
	return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind)
}
Esempio n. 15
0
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	rs, err := r.registry.GetReplicaSet(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("replicasets/scale"), scale.Name)
	}
	rs.Spec.Replicas = scale.Spec.Replicas
	rs.ResourceVersion = scale.ResourceVersion
	rs, err = r.registry.UpdateReplicaSet(ctx, rs)
	if err != nil {
		return nil, false, err
	}
	newScale, err := scaleFromReplicaSet(rs)
	if err != nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
	}
	return newScale, false, err
}
Esempio n. 16
0
func HistoryViewerFor(kind unversioned.GroupKind, c clientset.Interface) (HistoryViewer, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentHistoryViewer{c}, nil
	}
	return nil, fmt.Errorf("no history viewer has been implemented for %q", kind)
}
Esempio n. 17
0
func (f *ring0Factory) CanBeExposed(kind schema.GroupKind) error {
	switch kind {
	case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
		// nothing to do here
	default:
		return fmt.Errorf("cannot expose a %s", kind)
	}
	return nil
}
Esempio n. 18
0
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.Extensions().Deployments(namespace)
	replicaSets := reaper.Extensions().ReplicaSets(namespace)
	rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
		return deployments.Get(name)
	}, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil {
		return err
	}

	// Stop all replica sets.
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			scaleGetErr, ok := err.(*ScaleError)
			if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) {
				continue
			}
			errList = append(errList, err)
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
	return deployments.Delete(name, nil)
}
Esempio n. 19
0
func (r *RollbackREST) Create(ctx api.Context, obj runtime.Object) (out runtime.Object, err error) {
	rollback, ok := obj.(*extensions.DeploymentRollback)
	if !ok {
		return nil, fmt.Errorf("expected input object type to be DeploymentRollback, but %T", obj)
	}

	if errs := extvalidation.ValidateDeploymentRollback(rollback); len(errs) != 0 {
		return nil, errors.NewInvalid(extensions.Kind("DeploymentRollback"), rollback.Name, errs)
	}

	// Update the Deployment with information in DeploymentRollback to trigger rollback
	err = r.rollbackDeployment(ctx, rollback.Name, &rollback.RollbackTo, rollback.UpdatedAnnotations)
	return
}
Esempio n. 20
0
func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) {
	defer GinkgoRecover()
	defer wg.Done()

	sleepUpTo(deletingTime)
	if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") {
		framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
			config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
			fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
	} else {
		framework.ExpectNoError(framework.DeleteResourceAndPods(
			config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
			fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
	}
}
Esempio n. 21
0
// Update scales the DeploymentConfig for the given Scale subresource, returning the updated Scale.
func (r *ScaleREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	deploymentConfig, err := r.registry.GetDeploymentConfig(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("scale"), scale.Name)
	}

	scaleRet := &extensions.Scale{
		ObjectMeta: kapi.ObjectMeta{
			Name:              deploymentConfig.Name,
			Namespace:         deploymentConfig.Namespace,
			CreationTimestamp: deploymentConfig.CreationTimestamp,
		},
		Spec: extensions.ScaleSpec{
			Replicas: scale.Spec.Replicas,
		},
		Status: extensions.ScaleStatus{
			Selector: &unversioned.LabelSelector{MatchLabels: deploymentConfig.Spec.Selector},
		},
	}

	// TODO(directxman12): this is going to be a bit out of sync, since we are calculating it
	// here and not as part of the deploymentconfig loop -- is there a better way of doing it?
	totalReplicas, err := r.replicasForDeploymentConfig(deploymentConfig.Namespace, deploymentConfig.Name)
	if err != nil {
		return nil, false, err
	}

	oldReplicas := deploymentConfig.Spec.Replicas
	deploymentConfig.Spec.Replicas = scale.Spec.Replicas
	if err := r.registry.UpdateDeploymentConfig(ctx, deploymentConfig); err != nil {
		return nil, false, err
	}
	scaleRet.Status.Replicas = totalReplicas + (scale.Spec.Replicas - oldReplicas)

	return scaleRet, false, nil
}
Esempio n. 22
0
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	jobs := reaper.Extensions().Jobs(namespace)
	pods := reaper.Pods(namespace)
	scaler, err := ScalerFor(extensions.Kind("Job"), *reaper)
	if err != nil {
		return err
	}
	job, err := jobs.Get(name)
	if err != nil {
		return err
	}
	if timeout == 0 {
		// we will never have more active pods than job.Spec.Parallelism
		parallelism := *job.Spec.Parallelism
		timeout = Timeout + time.Duration(10*parallelism)*time.Second
	}

	// TODO: handle overlapping jobs
	retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
	waitForJobs := NewRetryParams(reaper.pollInterval, timeout)
	if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil {
		return err
	}
	// at this point only dead pods are left, that should be removed
	selector, _ := extensions.LabelSelectorAsSelector(job.Spec.Selector)
	options := api.ListOptions{LabelSelector: selector}
	podList, err := pods.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, pod := range podList.Items {
		if err := pods.Delete(pod.Name, gracePeriod); err != nil {
			// ignores the error when the pod isn't found
			if !errors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}
	// once we have all the pods removed we can safely remove the job itself
	if err := jobs.Delete(name, gracePeriod); err != nil {
		return err
	}
	return nil
}
Esempio n. 23
0
func ResourcesWithPodSpecs() []*meta.RESTMapping {
	restMaps := []*meta.RESTMapping{}
	resourcesWithTemplates := []string{"ReplicationController", "Deployment", "DaemonSet", "Job", "ReplicaSet"}
	mapper, _ := NewFactory(nil).Object()

	for _, resource := range resourcesWithTemplates {
		restmap, err := mapper.RESTMapping(unversioned.GroupKind{Kind: resource})
		if err == nil {
			restMaps = append(restMaps, restmap)
		} else {
			mapping, _ := mapper.RESTMapping(extensions.Kind(resource))
			restMaps = append(restMaps, mapping)
		}
	}
	return restMaps
}
Esempio n. 24
0
func (r *RollbackREST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	rollback, ok := obj.(*extensions.DeploymentRollback)
	if !ok {
		return nil, errors.NewBadRequest(fmt.Sprintf("not a DeploymentRollback: %#v", obj))
	}

	if errs := extvalidation.ValidateDeploymentRollback(rollback); len(errs) != 0 {
		return nil, errors.NewInvalid(extensions.Kind("DeploymentRollback"), rollback.Name, errs)
	}

	// Update the Deployment with information in DeploymentRollback to trigger rollback
	err := r.rollbackDeployment(ctx, rollback.Name, &rollback.RollbackTo, rollback.UpdatedAnnotations)
	if err != nil {
		return nil, err
	}
	return &metav1.Status{
		Message: fmt.Sprintf("rollback request for deployment %q succeeded", rollback.Name),
		Code:    http.StatusOK,
	}, nil
}
Esempio n. 25
0
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	rc, err := (*r.registry).GetController(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("replicationcontrollers/scale"), scale.Name)
	}
	rc.Spec.Replicas = scale.Spec.Replicas
	rc, err = (*r.registry).UpdateController(ctx, rc)
	if err != nil {
		return nil, false, errors.NewConflict(extensions.Resource("replicationcontrollers/scale"), scale.Name, err)
	}
	return &extensions.Scale{
		ObjectMeta: api.ObjectMeta{
			Name:              rc.Name,
			Namespace:         rc.Namespace,
			CreationTimestamp: rc.CreationTimestamp,
		},
		Spec: extensions.ScaleSpec{
			Replicas: rc.Spec.Replicas,
		},
		Status: extensions.ScaleStatus{
			Replicas: rc.Status.Replicas,
			Selector: rc.Spec.Selector,
		},
	}, false, nil
}
Esempio n. 26
0
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
	deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
	Expect(err).NotTo(HaveOccurred())

	Logf("deleting deployment %s", deploymentName)
	reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
	Expect(err).NotTo(HaveOccurred())
	timeout := 1 * time.Minute
	err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
	Expect(err).NotTo(HaveOccurred())

	Logf("ensuring deployment %s was deleted", deploymentName)
	_, err = c.Extensions().Deployments(ns).Get(deployment.Name)
	Expect(err).To(HaveOccurred())
	Expect(errors.IsNotFound(err)).To(BeTrue())
	Logf("ensuring deployment %s rcs were deleted", deploymentName)
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	Expect(err).NotTo(HaveOccurred())
	options := api.ListOptions{LabelSelector: selector}
	rss, err := c.Extensions().ReplicaSets(ns).List(options)
	Expect(err).NotTo(HaveOccurred())
	Expect(rss.Items).Should(HaveLen(0))
	Logf("ensuring deployment %s pods were deleted", deploymentName)
	var pods *api.PodList
	if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
		pods, err = c.Core().Pods(ns).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		if len(pods.Items) == 0 {
			return true, nil
		}
		return false, nil
	}); err != nil {
		Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
	}
}
Esempio n. 27
0
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs)
	}

	deployment, err := (*r.registry).GetDeployment(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), scale.Name)
	}
	deployment.Spec.Replicas = scale.Spec.Replicas
	deployment, err = (*r.registry).UpdateDeployment(ctx, deployment)
	if err != nil {
		return nil, false, errors.NewConflict(extensions.Resource("deployments/scale"), scale.Name, err)
	}
	return extensions.ScaleFromDeployment(deployment), false, nil
}
Esempio n. 28
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}

			// eventually this should allow me choose a group priority based on the order of the discovery doc, for now hardcode a given order
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}

			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch mapping.GroupVersionKind.Group {
			case api.GroupName:
				return client.RESTClient, nil
			case autoscaling.GroupName:
				return client.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return client.BatchClient.RESTClient, nil
			case extensions.GroupName:
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("invalid label selector: %v", err)
				}
				return selector.String(), nil
			case *extensions.ReplicaSet:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
				}
				return selector.String(), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
	}
}
Esempio n. 29
0
	})

	It("should scale a job up", func() {
		startParallelism := 1
		endParallelism := 2
		By("Creating a job")
		job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
		job, err := createJob(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job up")
		scaler, err := kubectl.ScalerFor(extensions.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should scale a job down", func() {
		startParallelism := 2
		endParallelism := 1
		By("Creating a job")
Esempio n. 30
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
		// have been dynamically added to the apiserver
		Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			if discoverDynamicAPIs {
				client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
				CheckErr(err)

				versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery())
				CheckErr(err)
				if len(versions) > 0 {
					priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
						return nil, nil
					}
					multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
						return nil, nil
					}
					groupsMap := map[string][]unversioned.GroupVersion{}
					for _, version := range versions {
						groupsMap[version.Group] = append(groupsMap[version.Group], version)
					}
					for group, versionList := range groupsMap {
						preferredExternalVersion := versionList[0]

						thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
						CheckErr(err)
						accessor := meta.NewAccessor()
						groupMeta := apimachinery.GroupMeta{
							GroupVersion:  preferredExternalVersion,
							GroupVersions: versionList,
							RESTMapper:    thirdPartyMapper,
							SelfLinker:    runtime.SelfLinker(accessor),
							InterfacesFor: makeInterfacesFor(versionList),
						}

						CheckErr(registered.RegisterGroup(groupMeta))
						registered.AddThirdPartyAPIGroupVersions(versionList...)
						multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
					}
					priorityMapper.Delegate = multiMapper
					// Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we
					// don't re-assign, the above assignement won't actually update mapper.RESTMapper
					mapper.RESTMapper = priorityMapper
				}
			}
			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}
			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			gvk := mapping.GroupVersionKind
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			c, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch gvk.Group {
			case api.GroupName:
				return c.RESTClient, nil
			case autoscaling.GroupName:
				return c.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return c.BatchClient.RESTClient, nil
			case apps.GroupName:
				return c.AppsClient.RESTClient, nil
			case extensions.GroupName:
				return c.ExtensionsClient.RESTClient, nil
			case api.SchemeGroupVersion.Group:
				return c.RESTClient, nil
			case extensions.SchemeGroupVersion.Group:
				return c.ExtensionsClient.RESTClient, nil
			default:
				if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
					return nil, fmt.Errorf("unknown api group/version: %s", gvk.String())
				}
				cfg, err := clientConfig.ClientConfig()
				if err != nil {
					return nil, err
				}
				gv := gvk.GroupVersion()
				cfg.GroupVersion = &gv
				cfg.APIPath = "/apis"
				cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind)
				return restclient.RESTClientFor(cfg)
			}
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}
			}
		},
	}
}