예제 #1
0
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error {
	oldName := rc.Name
	rc.Name = newName
	rc.ResourceVersion = ""
	// First delete the oldName RC and orphan its pods.
	trueVar := true
	err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar})
	if err != nil && !errors.IsNotFound(err) {
		return err
	}
	err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
		_, err := c.ReplicationControllers(rc.Namespace).Get(oldName)
		if err == nil {
			return false, nil
		} else if errors.IsNotFound(err) {
			return true, nil
		} else {
			return false, err
		}
	})
	if err != nil {
		return err
	}
	// Then create the same RC with the new name.
	_, err = c.ReplicationControllers(rc.Namespace).Create(rc)
	if err != nil {
		return err
	}
	return nil
}
예제 #2
0
func Rename(c client.ReplicationControllersNamespacer, rc *api.ReplicationController, newName string) error {
	oldName := rc.Name
	rc.Name = newName
	rc.ResourceVersion = ""

	_, err := c.ReplicationControllers(rc.Namespace).Create(rc)
	if err != nil {
		return err
	}
	err = c.ReplicationControllers(rc.Namespace).Delete(oldName)
	if err != nil && !errors.IsNotFound(err) {
		return err
	}
	return nil
}
예제 #3
0
func Rename(c RollingUpdaterClient, rc *api.ReplicationController, newName string) error {
	oldName := rc.Name
	rc.Name = newName
	rc.ResourceVersion = ""

	_, err := c.CreateReplicationController(rc.Namespace, rc)
	if err != nil {
		return err
	}
	err = c.DeleteReplicationController(rc.Namespace, oldName)
	if err != nil && !errors.IsNotFound(err) {
		return err
	}
	return nil
}
예제 #4
0
// getOrCreateTargetControllerWithClient looks for an existing controller with
// sourceId. If found, the existing controller is returned with true
// indicating that the controller already exists. If the controller isn't
// found, a new one is created and returned along with false indicating the
// controller was created.
//
// Existing controllers are validated to ensure their sourceIdAnnotation
// matches sourceId; if there's a mismatch, an error is returned.
func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
	existingRc, err := r.existingController(controller)
	if err != nil {
		if !errors.IsNotFound(err) {
			// There was an error trying to find the controller; don't assume we
			// should create it.
			return nil, false, err
		}
		if controller.Spec.Replicas <= 0 {
			return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec)
		}
		// The controller wasn't found, so create it.
		if controller.Annotations == nil {
			controller.Annotations = map[string]string{}
		}
		controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas)
		controller.Annotations[sourceIdAnnotation] = sourceId
		controller.Spec.Replicas = 0
		newRc, err := r.c.ReplicationControllers(r.ns).Create(controller)
		return newRc, false, err
	}
	// Validate and use the existing controller.
	annotations := existingRc.Annotations
	source := annotations[sourceIdAnnotation]
	_, ok := annotations[desiredReplicasAnnotation]
	if source != sourceId || !ok {
		return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations)
	}
	return existingRc, true, nil
}
예제 #5
0
func deployerPod(deployment *kapi.ReplicationController, alternateName string, related bool) *kapi.Pod {
	deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
	if len(alternateName) > 0 {
		deployerPodName = alternateName
	}

	deployment.Namespace = "test"

	pod := &kapi.Pod{
		ObjectMeta: kapi.ObjectMeta{
			Name:      deployerPodName,
			Namespace: deployment.Namespace,
			Labels: map[string]string{
				deployapi.DeployerPodForDeploymentLabel: deployment.Name,
			},
			Annotations: map[string]string{
				deployapi.DeploymentAnnotation: deployment.Name,
			},
		},
	}

	if !related {
		delete(pod.Annotations, deployapi.DeploymentAnnotation)
	}

	return pod
}
func (dc *DeploymentController) updateRCRevision(rc api.ReplicationController, revision string) error {
	if rc.Annotations == nil {
		rc.Annotations = make(map[string]string)
	}
	rc.Annotations[deploymentutil.RevisionAnnotation] = revision
	_, err := dc.client.Legacy().ReplicationControllers(rc.ObjectMeta.Namespace).Update(&rc)
	return err
}
예제 #7
0
func validateRC(rc *kube.ReplicationController) error {
	errList := validation.ValidateReplicationController(rc).Filter(
		// remove errors about missing template
		func(e error) bool {
			return e.Error() == "spec.template: Required value"
		},
	)

	meta := rc.GetObjectMeta()
	if len(meta.GetName()) == 0 && len(meta.GetGenerateName()) > 0 {
		errList = errList.Filter(func(e error) bool {
			return e.Error() == "metadata.name: Required value: name or generateName is required"
		})
	}

	return errList.ToAggregate()
}
func TestWatchControllers(t *testing.T) {
	fakeWatch := watch.NewFake()
	c := &fake.Clientset{}
	c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
	manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
	manager.podStoreSynced = alwaysReady

	var testControllerSpec api.ReplicationController
	received := make(chan string)

	// The update sent through the fakeWatcher should make its way into the workqueue,
	// and eventually into the syncHandler. The handler validates the received controller
	// and closes the received channel to indicate that the test can finish.
	manager.syncHandler = func(key string) error {

		obj, exists, err := manager.rcStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		controllerSpec := *obj.(*api.ReplicationController)
		if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
			t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
		}
		close(received)
		return nil
	}
	// Start only the rc watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go manager.rcController.Run(stopCh)
	go wait.Until(manager.worker, 10*time.Millisecond, stopCh)

	testControllerSpec.Name = "foo"
	fakeWatch.Add(&testControllerSpec)

	select {
	case <-received:
	case <-time.After(wait.ForeverTestTimeout):
		t.Errorf("Expected 1 call but got 0")
	}
}
예제 #9
0
파일: main.go 프로젝트: aclisp/kubecon
func createReplicationController(c *gin.Context) {
	namespace := c.Param("ns")
	rcjson := c.PostForm("json")

	var rc api.ReplicationController
	err := json.Unmarshal([]byte(rcjson), &rc)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	if rc.Spec.Selector == nil {
		rc.Spec.Selector = make(map[string]string)
	}
	rc.Spec.Selector["managed-by"] = rc.Name
	if rc.Spec.Template.Labels == nil {
		rc.Spec.Template.Labels = make(map[string]string)
	}
	rc.Spec.Template.Labels["managed-by"] = rc.Name
	rc.Spec.Template.Spec.Containers[0].Name = rc.Name

	var meta api.ObjectMeta // clean metadata
	meta.Name = rc.Name
	meta.GenerateName = rc.GenerateName
	meta.Labels = rc.Labels
	meta.Annotations = rc.Annotations
	if meta.Labels != nil {
		meta.Labels["managed-by"] = rc.Name
	}
	rc.ObjectMeta = meta

	_, err = kubeclient.Get().ReplicationControllers(namespace).Create(&rc)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	c.Redirect(http.StatusMovedPermanently, fmt.Sprintf("/namespaces/%s", namespace))
}
// setNewRCAnnotations sets new rc's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if rc's annotation is changed.
func setNewRCAnnotations(deployment *extensions.Deployment, rc *api.ReplicationController, newRevision string) bool {
	// First, copy deployment's annotations
	annotationChanged := copyDeploymentAnnotationsToRC(deployment, rc)
	// Then, update RC's revision annotation
	if rc.Annotations == nil {
		rc.Annotations = make(map[string]string)
	}
	if rc.Annotations[deploymentutil.RevisionAnnotation] != newRevision {
		rc.Annotations[deploymentutil.RevisionAnnotation] = newRevision
		annotationChanged = true
		glog.V(4).Infof("updating RC %q's revision to %s - %+v\n", rc.Name, newRevision)
	}
	return annotationChanged
}
// copyDeploymentAnnotationsToRC copies deployment's annotations to rc's annotations,
// and returns true if rc's annotation is changed
func copyDeploymentAnnotationsToRC(deployment *extensions.Deployment, rc *api.ReplicationController) bool {
	rcAnnotationsChanged := false
	if rc.Annotations == nil {
		rc.Annotations = make(map[string]string)
	}
	for k, v := range deployment.Annotations {
		// Skip apply annotations
		// TODO: How to decide which annotations should / should not be copied?
		// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
		if k == kubectl.LastAppliedConfigAnnotation || rc.Annotations[k] == v {
			continue
		}
		rc.Annotations[k] = v
		rcAnnotationsChanged = true
	}
	return rcAnnotationsChanged
}
예제 #12
0
파일: k8s.go 프로젝트: fabric8io/kansible
// GetOrCreatePodSpec returns the PodSpec for this ReplicationController
// lazily creating structures as required
func GetOrCreatePodSpec(rc *api.ReplicationController) *api.PodSpec {
	spec := &rc.Spec
	if spec == nil {
		rc.Spec = api.ReplicationControllerSpec{}
		spec = &rc.Spec
	}
	template := spec.Template
	if template == nil {
		spec.Template = &api.PodTemplateSpec{}
		template = spec.Template
	}
	podSpec := &template.Spec
	if podSpec == nil {
		template.Spec = api.PodSpec{}
		podSpec = &template.Spec
	}
	return podSpec
}
// updateReplicationControllerStatus attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerInterface, rc api.ReplicationController, newStatus api.ReplicationControllerStatus) (updateErr error) {
	// This is the steady state. It happens when the rc doesn't have any expectations, since
	// we do a periodic relist every 30s. If the generations differ but the replicas are
	// the same, a caller might've resized to the same replica count.
	if rc.Status.Replicas == newStatus.Replicas &&
		rc.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas &&
		rc.Status.ReadyReplicas == newStatus.ReadyReplicas &&
		rc.Status.AvailableReplicas == newStatus.AvailableReplicas &&
		rc.Generation == rc.Status.ObservedGeneration &&
		reflect.DeepEqual(rc.Status.Conditions, newStatus.Conditions) {
		return nil
	}
	// Save the generation number we acted on, otherwise we might wrongfully indicate
	// that we've seen a spec update when we retry.
	// TODO: This can clobber an update if we allow multiple agents to write to the
	// same status.
	newStatus.ObservedGeneration = rc.Generation

	var getErr error
	for i, rc := 0, &rc; ; i++ {
		glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", rc.Namespace, rc.Name) +
			fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, rc.Spec.Replicas) +
			fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rc.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
			fmt.Sprintf("readyReplicas %d->%d, ", rc.Status.ReadyReplicas, newStatus.ReadyReplicas) +
			fmt.Sprintf("availableReplicas %d->%d, ", rc.Status.AvailableReplicas, newStatus.AvailableReplicas) +
			fmt.Sprintf("sequence No: %v->%v", rc.Status.ObservedGeneration, newStatus.ObservedGeneration))

		rc.Status = newStatus
		_, updateErr = c.UpdateStatus(rc)
		if updateErr == nil || i >= statusUpdateRetries {
			return updateErr
		}
		// Update the controller with the latest resource version for the next poll
		if rc, getErr = c.Get(rc.Name); getErr != nil {
			// If the GET fails we can't trust status.Replicas anymore. This error
			// is bound to be more interesting than the update failure.
			return getErr
		}
	}
}
예제 #14
0
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *RollingUpdateOptions) error {
	if len(os.Args) > 1 && os.Args[1] == "rollingupdate" {
		printDeprecationWarning("rolling-update", "rollingupdate")
	}
	err := validateArguments(cmd, options.Filenames, args)
	if err != nil {
		return err
	}

	deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key")
	filename := ""
	image := cmdutil.GetFlagString(cmd, "image")
	pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy")
	oldName := args[0]
	rollback := cmdutil.GetFlagBool(cmd, "rollback")
	period := cmdutil.GetFlagDuration(cmd, "update-period")
	interval := cmdutil.GetFlagDuration(cmd, "poll-interval")
	timeout := cmdutil.GetFlagDuration(cmd, "timeout")
	dryrun := cmdutil.GetFlagBool(cmd, "dry-run")
	outputFormat := cmdutil.GetFlagString(cmd, "output")
	container := cmdutil.GetFlagString(cmd, "container")

	if len(options.Filenames) > 0 {
		filename = options.Filenames[0]
	}

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	var newRc *api.ReplicationController
	// fetch rc
	oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName)
	if err != nil {
		if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 {
			return err
		}
		// We're in the middle of a rename, look for an RC with a source annotation of oldName
		newRc, err := kubectl.FindSourceController(client, cmdNamespace, oldName)
		if err != nil {
			return err
		}
		return kubectl.Rename(client, newRc, oldName)
	}

	var keepOldName bool
	var replicasDefaulted bool

	mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd))

	if len(filename) != 0 {
		schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir"))
		if err != nil {
			return err
		}

		request := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
			Schema(schema).
			NamespaceParam(cmdNamespace).DefaultNamespace().
			FilenameParam(enforceNamespace, false, filename).
			Do()
		obj, err := request.Object()
		if err != nil {
			return err
		}
		var ok bool
		// Handle filename input from stdin. The resource builder always returns an api.List
		// when creating resource(s) from a stream.
		if list, ok := obj.(*api.List); ok {
			if len(list.Items) > 1 {
				return cmdutil.UsageError(cmd, "%s specifies multiple items", filename)
			}
			obj = list.Items[0]
		}
		newRc, ok = obj.(*api.ReplicationController)
		if !ok {
			if gvk, err := typer.ObjectKind(obj); err == nil {
				return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvk)
			}
			glog.V(4).Infof("Object %#v is not a ReplicationController", obj)
			return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
		}
		infos, err := request.Infos()
		if err != nil || len(infos) != 1 {
			glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted")
		} else {
			replicasDefaulted = isReplicasDefaulted(infos[0])
		}
	}
	// If the --image option is specified, we need to create a new rc with at least one different selector
	// than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for
	// same-image updates.
	if len(image) != 0 {
		codec := api.Codecs.LegacyCodec(client.APIVersion())
		keepOldName = len(args) == 1
		newName := findNewName(args, oldRc)
		if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
			return err
		}
		if newRc != nil {
			if inProgressImage := newRc.Spec.Template.Spec.Containers[0].Image; inProgressImage != image {
				return cmdutil.UsageError(cmd, "Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage)
			}
			fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
		} else {
			config := &kubectl.NewControllerConfig{
				Namespace:     cmdNamespace,
				OldName:       oldName,
				NewName:       newName,
				Image:         image,
				Container:     container,
				DeploymentKey: deploymentKey,
			}
			if oldRc.Spec.Template.Spec.Containers[0].Image == image {
				if len(pullPolicy) == 0 {
					return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image")
				}
				config.PullPolicy = api.PullPolicy(pullPolicy)
			}
			newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, config)
			if err != nil {
				return err
			}
		}
		// Update the existing replication controller with pointers to the 'next' controller
		// and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller.
		oldHash, err := api.HashObject(oldRc, codec)
		if err != nil {
			return err
		}
		// If new image is same as old, the hash may not be distinct, so add a suffix.
		oldHash += "-orig"
		oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
		if err != nil {
			return err
		}
	}

	if rollback {
		keepOldName = len(args) == 1
		newName := findNewName(args, oldRc)
		if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
			return err
		}

		if newRc == nil {
			return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName)
		}
	}

	if oldName == newRc.Name {
		return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
			filename, oldName)
	}

	updater := kubectl.NewRollingUpdater(newRc.Namespace, client)

	// To successfully pull off a rolling update the new and old rc have to differ
	// by at least one selector. Every new pod should have the selector and every
	// old pod should not have the selector.
	var hasLabel bool
	for key, oldValue := range oldRc.Spec.Selector {
		if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
			hasLabel = true
			break
		}
	}
	if !hasLabel {
		return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
			filename, oldName)
	}
	// TODO: handle scales during rolling update
	if replicasDefaulted {
		newRc.Spec.Replicas = oldRc.Spec.Replicas
	}
	if dryrun {
		oldRcData := &bytes.Buffer{}
		newRcData := &bytes.Buffer{}
		if outputFormat == "" {
			oldRcData.WriteString(oldRc.Name)
			newRcData.WriteString(newRc.Name)
		} else {
			if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil {
				return err
			}
			if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil {
				return err
			}
		}
		fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes()))
		return nil
	}
	updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy
	if keepOldName {
		updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy
	}
	config := &kubectl.RollingUpdaterConfig{
		Out:            out,
		OldRc:          oldRc,
		NewRc:          newRc,
		UpdatePeriod:   period,
		Interval:       interval,
		Timeout:        timeout,
		CleanupPolicy:  updateCleanupPolicy,
		MaxUnavailable: intstr.FromInt(0),
		MaxSurge:       intstr.FromInt(1),
	}
	if rollback {
		err = kubectl.AbortRollingUpdate(config)
		if err != nil {
			return err
		}
		client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc)
	}
	err = updater.Update(config)
	if err != nil {
		return err
	}

	message := "rolling updated"
	if keepOldName {
		newRc.Name = oldName
	} else {
		message = fmt.Sprintf("rolling updated to %q", newRc.Name)
	}
	newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name)
	if err != nil {
		return err
	}
	if outputFormat != "" {
		return f.PrintObject(cmd, mapper, newRc, out)
	}
	kind, err := api.Scheme.ObjectKind(newRc)
	if err != nil {
		return err
	}
	_, res := meta.KindToResource(kind)
	cmdutil.PrintSuccess(mapper, false, out, res.Resource, oldName, message)
	return nil
}
예제 #15
0
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *RollingUpdateOptions) error {
	if len(os.Args) > 1 && os.Args[1] == "rollingupdate" {
		printDeprecationWarning("rolling-update", "rollingupdate")
	}
	deploymentKey, filename, image, oldName, err := validateArguments(cmd, options.Filenames, args)
	if err != nil {
		return err
	}
	period := cmdutil.GetFlagDuration(cmd, "update-period")
	interval := cmdutil.GetFlagDuration(cmd, "poll-interval")
	timeout := cmdutil.GetFlagDuration(cmd, "timeout")
	dryrun := cmdutil.GetFlagBool(cmd, "dry-run")
	outputFormat := cmdutil.GetFlagString(cmd, "output")

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	var newRc *api.ReplicationController
	// fetch rc
	oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName)
	if err != nil {
		if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 {
			return err
		}
		// We're in the middle of a rename, look for an RC with a source annotation of oldName
		newRc, err := kubectl.FindSourceController(client, cmdNamespace, oldName)
		if err != nil {
			return err
		}
		return kubectl.Rename(client, newRc, oldName)
	}

	var keepOldName bool
	var replicasDefaulted bool

	mapper, typer := f.Object()

	if len(filename) != 0 {
		schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"))
		if err != nil {
			return err
		}

		request := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
			Schema(schema).
			NamespaceParam(cmdNamespace).DefaultNamespace().
			FilenameParam(enforceNamespace, filename).
			Do()
		obj, err := request.Object()
		if err != nil {
			return err
		}
		var ok bool
		// Handle filename input from stdin. The resource builder always returns an api.List
		// when creating resource(s) from a stream.
		if list, ok := obj.(*api.List); ok {
			if len(list.Items) > 1 {
				return cmdutil.UsageError(cmd, "%s specifies multiple items", filename)
			}
			obj = list.Items[0]
		}
		newRc, ok = obj.(*api.ReplicationController)
		if !ok {
			if _, kind, err := typer.ObjectVersionAndKind(obj); err == nil {
				return cmdutil.UsageError(cmd, "%s contains a %s not a ReplicationController", filename, kind)
			}
			glog.V(4).Infof("Object %#v is not a ReplicationController", obj)
			return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
		}
		infos, err := request.Infos()
		if err != nil || len(infos) != 1 {
			glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted")
		} else {
			replicasDefaulted = isReplicasDefaulted(infos[0])
		}
	}
	// If the --image option is specified, we need to create a new rc with at least one different selector
	// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
	// different image.
	if len(image) != 0 {
		keepOldName = len(args) == 1
		newName := findNewName(args, oldRc)
		if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
			return err
		}
		if newRc != nil {
			fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
		} else {
			newRc, err = kubectl.CreateNewControllerFromCurrentController(client, cmdNamespace, oldName, newName, image, deploymentKey)
			if err != nil {
				return err
			}
		}
		// Update the existing replication controller with pointers to the 'next' controller
		// and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller.
		oldHash, err := api.HashObject(oldRc, client.Codec)
		if err != nil {
			return err
		}
		oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
		if err != nil {
			return err
		}
	}
	if oldName == newRc.Name {
		return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
			filename, oldName)
	}

	updater := kubectl.NewRollingUpdater(newRc.Namespace, client)

	// To successfully pull off a rolling update the new and old rc have to differ
	// by at least one selector. Every new pod should have the selector and every
	// old pod should not have the selector.
	var hasLabel bool
	for key, oldValue := range oldRc.Spec.Selector {
		if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
			hasLabel = true
			break
		}
	}
	if !hasLabel {
		return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
			filename, oldName)
	}
	// TODO: handle scales during rolling update
	if replicasDefaulted {
		newRc.Spec.Replicas = oldRc.Spec.Replicas
	}
	if dryrun {
		oldRcData := &bytes.Buffer{}
		newRcData := &bytes.Buffer{}
		if outputFormat == "" {
			oldRcData.WriteString(oldRc.Name)
			newRcData.WriteString(newRc.Name)
		} else {
			if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil {
				return err
			}
			if err := f.PrintObject(cmd, newRc, newRcData); err != nil {
				return err
			}
		}
		fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes()))
		return nil
	}
	updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy
	if keepOldName {
		updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy
	}
	config := &kubectl.RollingUpdaterConfig{
		Out:            out,
		OldRc:          oldRc,
		NewRc:          newRc,
		UpdatePeriod:   period,
		Interval:       interval,
		Timeout:        timeout,
		CleanupPolicy:  updateCleanupPolicy,
		MaxUnavailable: util.NewIntOrStringFromInt(0),
		MaxSurge:       util.NewIntOrStringFromInt(1),
	}
	if cmdutil.GetFlagBool(cmd, "rollback") {
		kubectl.AbortRollingUpdate(config)
		client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc)
	}
	err = updater.Update(config)
	if err != nil {
		return err
	}

	message := "rolling updated"
	if keepOldName {
		newRc.Name = oldName
	} else {
		message = fmt.Sprintf("rolling updated to %q", newRc.Name)
	}
	newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name)
	if err != nil {
		return err
	}
	if outputFormat != "" {
		return f.PrintObject(cmd, newRc, out)
	}
	_, kind, err := api.Scheme.ObjectVersionAndKind(newRc)
	if err != nil {
		return err
	}
	_, res := meta.KindToResource(kind, false)
	cmdutil.PrintSuccess(mapper, false, out, res, oldName, message)
	return nil
}
예제 #16
0
func withCreated(item *kapi.ReplicationController, creationTimestamp unversioned.Time) *kapi.ReplicationController {
	item.CreationTimestamp = creationTimestamp
	return item
}
예제 #17
0
func SetNextControllerAnnotation(rc *api.ReplicationController, name string) {
	if rc.Annotations == nil {
		rc.Annotations = map[string]string{}
	}
	rc.Annotations[nextControllerAnnotation] = name
}
예제 #18
0
func (self *realKubeFramework) CreateRC(ns string, rc *api.ReplicationController) (*api.ReplicationController, error) {
	rc.Namespace = ns
	return self.kubeClient.ReplicationControllers(ns).Create(rc)
}
예제 #19
0
// WaitForADeployment waits for a deployment to fulfill either isOK or isFailed.
// When isOK returns true, WaitForADeployment returns nil, when isFailed returns
// true, WaitForADeployment returns an error including the deployment status.
// WaitForADeployment waits for at most a certain timeout (non-configurable).
func WaitForADeployment(client kclient.ReplicationControllerInterface, name string, isOK, isFailed func(*kapi.ReplicationController) bool, oc *CLI) error {
	timeout := 15 * time.Minute

	// closing done signals that any pending operation should be aborted.
	done := make(chan struct{})
	defer close(done)

	// okOrFailed returns whether a replication controller matches either of
	// the predicates isOK or isFailed, and the associated error in case of
	// failure.
	okOrFailed := func(rc *kapi.ReplicationController) (err error, matched bool) {
		if isOK(rc) {
			return nil, true
		}
		if isFailed(rc) {
			return fmt.Errorf("The deployment %q status is %q", name, rc.Annotations[deployapi.DeploymentStatusAnnotation]), true
		}
		return nil, false
	}

	// waitForDeployment waits until okOrFailed returns true or the done
	// channel is closed.
	waitForDeployment := func() (err error, retry bool) {
		requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, selection.Equals, sets.NewString(name))
		if err != nil {
			return fmt.Errorf("unexpected error generating label selector: %v", err), false
		}
		list, err := client.List(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement)})
		if err != nil {
			return err, false
		}
		// multiple deployments are conceivable; so we look to see how the latest depoy does
		var lastRC *kapi.ReplicationController
		for _, rc := range list.Items {
			if lastRC == nil {
				lastRC = &rc
				continue
			}
			// assuming won't have to deal with more than 9 deployments
			if lastRC.GetName() <= rc.GetName() {
				lastRC = &rc
			}
		}

		if lastRC != nil {
			err, matched := okOrFailed(lastRC)
			if matched {
				return err, false
			}
		}

		w, err := client.Watch(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement), ResourceVersion: list.ResourceVersion})
		if err != nil {
			return err, false
		}
		defer w.Stop()
		for {
			select {
			case val, ok := <-w.ResultChan():
				if !ok {
					// watcher error, re-get and re-watch
					return nil, true
				}
				if rc, ok := val.Object.(*kapi.ReplicationController); ok {
					if lastRC == nil {
						lastRC = rc
					}
					// multiple deployments are conceivable; so we look to see how the latest depoy does
					if lastRC.GetName() <= rc.GetName() {
						lastRC = rc
						err, matched := okOrFailed(rc)
						if matched {
							return err, false
						}
					}
				}
			case <-done:
				// no more time left, stop what we were doing,
				// do no retry.
				return nil, false
			}
		}
	}

	// errCh is buffered so the goroutine below never blocks on sending,
	// preventing a goroutine leak if we reach the timeout.
	errCh := make(chan error, 1)

	go func() {
		defer close(errCh)
		err, retry := waitForDeployment()
		for retry {
			err, retry = waitForDeployment()
		}
		errCh <- err
	}()

	select {
	case err := <-errCh:
		if err != nil {
			DumpDeploymentLogs(name, oc)
		}
		return err
	case <-time.After(timeout):
		DumpDeploymentLogs(name, oc)
		// end for timing issues where we miss watch updates
		return fmt.Errorf("timed out waiting for deployment %q after %v", name, timeout)
	}
}
예제 #20
0
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status. Since this controller started using caches,
// the provided rc MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
	// Copy all the annotations from the deployment.
	updatedAnnotations := make(map[string]string)
	for key, value := range deployment.Annotations {
		updatedAnnotations[key] = value
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
	deployer, deployerErr := c.getPod(deployment.Namespace, deployerPodName)
	if deployerErr == nil {
		nextStatus = c.nextStatus(deployer, deployment, updatedAnnotations)
	}

	switch currentStatus {
	case deployapi.DeploymentStatusNew:
		// If the deployment has been cancelled, don't create a deployer pod.
		// Instead try to delete any deployer pods found and transition the
		// deployment to Pending so that the deployment config controller
		// continues to see the deployment as in-flight. Eventually the deletion
		// of the deployer pod should cause a requeue of this deployment and
		// then it can be transitioned to Failed by this controller.
		if deployutil.IsDeploymentCancelled(deployment) {
			nextStatus = deployapi.DeploymentStatusPending
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
			break
		}

		// If the pod already exists, it's possible that a previous CreatePod
		// succeeded but the deployment state update failed and now we're re-
		// entering. Ensure that the pod is the one we created by verifying the
		// annotation on it, and throw a retryable error.
		if deployerErr != nil && !kerrors.IsNotFound(deployerErr) {
			return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), deployerErr)
		}
		if deployerErr == nil && deployer != nil {
			// Do a stronger check to validate that the existing deployer pod is
			// actually for this deployment, and if not, fail this deployment.
			//
			// TODO: Investigate checking the container image of the running pod and
			// comparing with the intended deployer pod image. If we do so, we'll need
			// to ensure that changes to 'unrelated' pods don't result in updates to
			// the deployment. So, the image check will have to be done in other areas
			// of the code as well.
			if deployutil.DeploymentNameFor(deployer) != deployment.Name {
				nextStatus = deployapi.DeploymentStatusFailed
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "FailedCreate", fmt.Sprintf("Error creating deployer pod since another pod with the same name (%q) exists", deployer.Name))
				glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), deployer.Name)
			} else {
				// Update to pending or to the appropriate status relative to the existing validated deployer pod.
				updatedAnnotations[deployapi.DeploymentPodAnnotation] = deployer.Name
				nextStatus = nextStatusComp(nextStatus, deployapi.DeploymentStatusPending)
				glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", deployer.Name, deployutil.LabelForDeployment(deployment))
			}
			// Don't try and re-create the deployer pod.
			break
		}

		if _, ok := deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok {
			return nil
		}

		// Generate a deployer pod spec.
		deployerPod, err := c.makeDeployerPod(deployment)
		if err != nil {
			return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		// Create the deployer pod.
		deploymentPod, err := c.pn.Pods(deployment.Namespace).Create(deployerPod)
		// Retry on error.
		if err != nil {
			return actionableError(fmt.Sprintf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		updatedAnnotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
		nextStatus = deployapi.DeploymentStatusPending
		glog.V(4).Infof("Created deployer pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))

	case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
		switch {
		case kerrors.IsNotFound(deployerErr):
			nextStatus = deployapi.DeploymentStatusFailed
			// If the deployment is cancelled here then we deleted the deployer in a previous
			// resync of the deployment.
			if !deployutil.IsDeploymentCancelled(deployment) {
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "Failed", fmt.Sprintf("Deployer pod %q has gone missing", deployerPodName))
				deployerErr = fmt.Errorf("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
				utilruntime.HandleError(deployerErr)
			}

		case deployerErr != nil:
			// We'll try again later on resync. Continue to process cancellations.
			deployerErr = fmt.Errorf("Error getting deployer pod %q for deployment %q: %v", deployerPodName, deployutil.LabelForDeployment(deployment), deployerErr)
			utilruntime.HandleError(deployerErr)

		default: /* err == nil */
			// If the deployment has been cancelled, delete any deployer pods
			// found and transition the deployment to Pending so that the
			// deployment config controller continues to see the deployment
			// as in-flight. Eventually the deletion of the deployer pod should
			// cause a requeue of this deployment and then it can be transitioned
			// to Failed by this controller.
			if deployutil.IsDeploymentCancelled(deployment) {
				if err := c.cleanupDeployerPods(deployment); err != nil {
					return err
				}
			}
		}

	case deployapi.DeploymentStatusFailed:
		// Try to cleanup once more a cancelled deployment in case hook pods
		// were created just after we issued the first cleanup request.
		if deployutil.IsDeploymentCancelled(deployment) {
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
		}

	case deployapi.DeploymentStatusComplete:
		if err := c.cleanupDeployerPods(deployment); err != nil {
			return err
		}
	}

	// Update only if we need to transition to a new phase.
	if deployutil.CanTransitionPhase(currentStatus, nextStatus) {
		deployment, err := deployutil.DeploymentDeepCopy(deployment)
		if err != nil {
			return err
		}

		updatedAnnotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		deployment.Annotations = updatedAnnotations

		// if we are going to transition to failed or complete and scale is non-zero, we'll check one more
		// time to see if we are a test deployment to guarantee that we maintain the test invariant.
		if deployment.Spec.Replicas != 0 && deployutil.IsTerminatedDeployment(deployment) {
			if config, err := deployutil.DecodeDeploymentConfig(deployment, c.codec); err == nil && config.Spec.Test {
				deployment.Spec.Replicas = 0
			}
		}

		if _, err := c.rn.ReplicationControllers(deployment.Namespace).Update(deployment); err != nil {
			return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas)
	}
	return nil
}