func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) { // load the old RC into the "new" RC newRc, err := c.ReplicationControllers(namespace).Get(oldName) if err != nil { return nil, err } if len(newRc.Spec.Template.Spec.Containers) > 1 { // TODO: support multi-container image update. return nil, goerrors.New("Image update is not supported for multi-container pods") } if len(newRc.Spec.Template.Spec.Containers) == 0 { return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) } newRc.Spec.Template.Spec.Containers[0].Image = image newHash, err := api.HashObject(newRc, c.Codec) if err != nil { return nil, err } if len(newName) == 0 { newName = fmt.Sprintf("%s-%s", newRc.Name, newHash) } newRc.Name = newName newRc.Spec.Selector[deploymentKey] = newHash newRc.Spec.Template.Labels[deploymentKey] = newHash // Clear resource version after hashing so that identical updates get different hashes. newRc.ResourceVersion = "" return newRc, nil }
func (k *Krud) update(h *Webhook) error { h.UpdateAttempt = true h.UpdateStart = time.Now() defer func() { h.UpdateEnd = time.Now() }() conf := &client.Config{ Host: k.Endpoint, } client, err := client.New(conf) if err != nil { return err } rcs := client.ReplicationControllers(k.Namespace) oldRc, err := rcs.Get(k.ControllerName) if err != nil { return err } newRc, err := rcs.Get(k.ControllerName) if err != nil { return err } hash, err := api.HashObject(oldRc, client.Codec) if err != nil { return err } h.UpdateID = hash newRc.Name = fmt.Sprintf("%s-%s", k.ControllerName, hash) newRc.ResourceVersion = "" apply := func(key, value string, ms ...map[string]string) { for _, m := range ms { m[key] = value } } apply(k.DeploymentKey, hash, newRc.Spec.Selector, newRc.Spec.Template.Labels) apply("run", k.ControllerName, newRc.Spec.Selector, newRc.Spec.Template.Labels) ruconf := kubectl.RollingUpdaterConfig{ Out: &lockBuffer{ k: k, h: h, }, OldRc: oldRc, NewRc: newRc, UpdatePeriod: time.Second * 3, // todo: change to time.Minute Timeout: time.Minute * 5, Interval: time.Second * 3, UpdateAcceptor: kubectl.DefaultUpdateAcceptor, CleanupPolicy: kubectl.RenameRollingUpdateCleanupPolicy, } ruc := kubectl.NewRollingUpdaterClient(client) println("doing rolling update") err = kubectl.NewRollingUpdater(k.Namespace, ruc).Update(&ruconf) println("done") k.Lock() h.UpdateSuccess = err == nil k.Unlock() return err }
func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { containerIndex := 0 // load the old RC into the "new" RC newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) if err != nil { return nil, err } if len(cfg.Container) != 0 { containerFound := false for i, c := range newRc.Spec.Template.Spec.Containers { if c.Name == cfg.Container { containerIndex = i containerFound = true break } } if !containerFound { return nil, fmt.Errorf("container %s not found in pod", cfg.Container) } } if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { return nil, goerrors.New("Must specify container to update when updating a multi-container pod") } if len(newRc.Spec.Template.Spec.Containers) == 0 { return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) } newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image if len(cfg.PullPolicy) != 0 { newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy } newHash, err := api.HashObject(newRc, codec) if err != nil { return nil, err } if len(cfg.NewName) == 0 { cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) } newRc.Name = cfg.NewName newRc.Spec.Selector[cfg.DeploymentKey] = newHash newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash // Clear resource version after hashing so that identical updates get different hashes. newRc.ResourceVersion = "" return newRc, nil }
func TestRollingUpdater_multipleContainersInPod(t *testing.T) { tests := []struct { oldRc *api.ReplicationController newRc *api.ReplicationController container string image string deploymentKey string }{ { oldRc: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "foo", }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "dk": "old", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "dk": "old", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "container1", Image: "image1", }, { Name: "container2", Image: "image2", }, }, }, }, }, }, newRc: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "foo", }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "dk": "old", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "dk": "old", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "container1", Image: "newimage", }, { Name: "container2", Image: "image2", }, }, }, }, }, }, container: "container1", image: "newimage", deploymentKey: "dk", }, { oldRc: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "bar", }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "dk": "old", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "dk": "old", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "container1", Image: "image1", }, }, }, }, }, }, newRc: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "bar", }, Spec: api.ReplicationControllerSpec{ Selector: map[string]string{ "dk": "old", }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "dk": "old", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "container1", Image: "newimage", }, }, }, }, }, }, container: "container1", image: "newimage", deploymentKey: "dk", }, } for _, test := range tests { fake := &testclient.Fake{} fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { switch action.(type) { case testclient.GetAction: return true, test.oldRc, nil } return false, nil, nil }) codec := testapi.Default.Codec() deploymentHash, err := api.HashObject(test.newRc, codec) if err != nil { t.Errorf("unexpected error: %v", err) } test.newRc.Spec.Selector[test.deploymentKey] = deploymentHash test.newRc.Spec.Template.Labels[test.deploymentKey] = deploymentHash test.newRc.Name = fmt.Sprintf("%s-%s", test.newRc.Name, deploymentHash) config := &NewControllerConfig{ OldName: test.oldRc.ObjectMeta.Name, NewName: test.newRc.ObjectMeta.Name, Image: test.image, Container: test.container, DeploymentKey: test.deploymentKey, } updatedRc, err := CreateNewControllerFromCurrentController(fake, codec, config) if err != nil { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(updatedRc, test.newRc) { t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.newRc, updatedRc) } } }
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { if len(os.Args) > 1 && os.Args[1] == "rollingupdate" { printDeprecationWarning("rolling-update", "rollingupdate") } deploymentKey, filename, image, oldName, err := validateArguments(cmd, args) if err != nil { return err } period := cmdutil.GetFlagDuration(cmd, "update-period") interval := cmdutil.GetFlagDuration(cmd, "poll-interval") timeout := cmdutil.GetFlagDuration(cmd, "timeout") dryrun := cmdutil.GetFlagBool(cmd, "dry-run") cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } updaterClient := kubectl.NewRollingUpdaterClient(client) var newRc *api.ReplicationController // fetch rc oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName) if err != nil { if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 { return err } // We're in the middle of a rename, look for an RC with a source annotation of oldName newRc, err := kubectl.FindSourceController(updaterClient, cmdNamespace, oldName) if err != nil { return err } return kubectl.Rename(kubectl.NewRollingUpdaterClient(client), newRc, oldName) } var keepOldName bool var replicasDefaulted bool mapper, typer := f.Object() if len(filename) != 0 { schema, err := f.Validator() if err != nil { return err } request := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). Schema(schema). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, filename). Do() obj, err := request.Object() if err != nil { return err } var ok bool // Handle filename input from stdin. The resource builder always returns an api.List // when creating resource(s) from a stream. if list, ok := obj.(*api.List); ok { if len(list.Items) > 1 { return cmdutil.UsageError(cmd, "%s specifies multiple items", filename) } obj = list.Items[0] } newRc, ok = obj.(*api.ReplicationController) if !ok { if _, kind, err := typer.ObjectVersionAndKind(obj); err == nil { return cmdutil.UsageError(cmd, "%s contains a %s not a ReplicationController", filename, kind) } glog.V(4).Infof("Object %#v is not a ReplicationController", obj) return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) } infos, err := request.Infos() if err != nil || len(infos) != 1 { glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted") } else { replicasDefaulted = isReplicasDefaulted(infos[0]) } } // If the --image option is specified, we need to create a new rc with at least one different selector // than the old rc. This selector is the hash of the rc, which will differ because the new rc has a // different image. if len(image) != 0 { keepOldName = len(args) == 1 newName := findNewName(args, oldRc) if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil { return err } if newRc != nil { fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name) } else { newRc, err = kubectl.CreateNewControllerFromCurrentController(client, cmdNamespace, oldName, newName, image, deploymentKey) if err != nil { return err } } // Update the existing replication controller with pointers to the 'next' controller // and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller. oldHash, err := api.HashObject(oldRc, client.Codec) if err != nil { return err } oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out) if err != nil { return err } } if oldName == newRc.Name { return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s", filename, oldName) } updater := kubectl.NewRollingUpdater(newRc.Namespace, updaterClient) // To successfully pull off a rolling update the new and old rc have to differ // by at least one selector. Every new pod should have the selector and every // old pod should not have the selector. var hasLabel bool for key, oldValue := range oldRc.Spec.Selector { if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue { hasLabel = true break } } if !hasLabel { return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s", filename, oldName) } // TODO: handle scales during rolling update if replicasDefaulted { newRc.Spec.Replicas = oldRc.Spec.Replicas } if dryrun { oldRcData := &bytes.Buffer{} if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil { return err } newRcData := &bytes.Buffer{} if err := f.PrintObject(cmd, newRc, newRcData); err != nil { return err } fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes())) return nil } updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy if keepOldName { updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy } config := &kubectl.RollingUpdaterConfig{ Out: out, OldRc: oldRc, NewRc: newRc, UpdatePeriod: period, Interval: interval, Timeout: timeout, CleanupPolicy: updateCleanupPolicy, UpdateAcceptor: kubectl.DefaultUpdateAcceptor, } if cmdutil.GetFlagBool(cmd, "rollback") { kubectl.AbortRollingUpdate(config) client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc) } err = updater.Update(config) if err != nil { return err } if keepOldName { fmt.Fprintf(out, "%s\n", oldName) } else { fmt.Fprintf(out, "%s\n", newRc.Name) } return nil }
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *RollingUpdateOptions) error { if len(os.Args) > 1 && os.Args[1] == "rollingupdate" { printDeprecationWarning("rolling-update", "rollingupdate") } err := validateArguments(cmd, options.Filenames, args) if err != nil { return err } deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key") filename := "" image := cmdutil.GetFlagString(cmd, "image") pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy") oldName := args[0] rollback := cmdutil.GetFlagBool(cmd, "rollback") period := cmdutil.GetFlagDuration(cmd, "update-period") interval := cmdutil.GetFlagDuration(cmd, "poll-interval") timeout := cmdutil.GetFlagDuration(cmd, "timeout") dryrun := cmdutil.GetFlagBool(cmd, "dry-run") outputFormat := cmdutil.GetFlagString(cmd, "output") container := cmdutil.GetFlagString(cmd, "container") if len(options.Filenames) > 0 { filename = options.Filenames[0] } cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } var newRc *api.ReplicationController // fetch rc oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName) if err != nil { if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 { return err } // We're in the middle of a rename, look for an RC with a source annotation of oldName newRc, err := kubectl.FindSourceController(client, cmdNamespace, oldName) if err != nil { return err } return kubectl.Rename(client, newRc, oldName) } var keepOldName bool var replicasDefaulted bool mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) if len(filename) != 0 { schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } request := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, false, filename). Do() obj, err := request.Object() if err != nil { return err } var ok bool // Handle filename input from stdin. The resource builder always returns an api.List // when creating resource(s) from a stream. if list, ok := obj.(*api.List); ok { if len(list.Items) > 1 { return cmdutil.UsageError(cmd, "%s specifies multiple items", filename) } obj = list.Items[0] } newRc, ok = obj.(*api.ReplicationController) if !ok { if gvk, err := typer.ObjectKind(obj); err == nil { return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvk) } glog.V(4).Infof("Object %#v is not a ReplicationController", obj) return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) } infos, err := request.Infos() if err != nil || len(infos) != 1 { glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted") } else { replicasDefaulted = isReplicasDefaulted(infos[0]) } } // If the --image option is specified, we need to create a new rc with at least one different selector // than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for // same-image updates. if len(image) != 0 { codec := api.Codecs.LegacyCodec(client.APIVersion()) keepOldName = len(args) == 1 newName := findNewName(args, oldRc) if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil { return err } if newRc != nil { if inProgressImage := newRc.Spec.Template.Spec.Containers[0].Image; inProgressImage != image { return cmdutil.UsageError(cmd, "Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage) } fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name) } else { config := &kubectl.NewControllerConfig{ Namespace: cmdNamespace, OldName: oldName, NewName: newName, Image: image, Container: container, DeploymentKey: deploymentKey, } if oldRc.Spec.Template.Spec.Containers[0].Image == image { if len(pullPolicy) == 0 { return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") } config.PullPolicy = api.PullPolicy(pullPolicy) } newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, config) if err != nil { return err } } // Update the existing replication controller with pointers to the 'next' controller // and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller. oldHash, err := api.HashObject(oldRc, codec) if err != nil { return err } // If new image is same as old, the hash may not be distinct, so add a suffix. oldHash += "-orig" oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out) if err != nil { return err } } if rollback { keepOldName = len(args) == 1 newName := findNewName(args, oldRc) if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil { return err } if newRc == nil { return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName) } } if oldName == newRc.Name { return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s", filename, oldName) } updater := kubectl.NewRollingUpdater(newRc.Namespace, client) // To successfully pull off a rolling update the new and old rc have to differ // by at least one selector. Every new pod should have the selector and every // old pod should not have the selector. var hasLabel bool for key, oldValue := range oldRc.Spec.Selector { if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue { hasLabel = true break } } if !hasLabel { return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s", filename, oldName) } // TODO: handle scales during rolling update if replicasDefaulted { newRc.Spec.Replicas = oldRc.Spec.Replicas } if dryrun { oldRcData := &bytes.Buffer{} newRcData := &bytes.Buffer{} if outputFormat == "" { oldRcData.WriteString(oldRc.Name) newRcData.WriteString(newRc.Name) } else { if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil { return err } if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil { return err } } fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes())) return nil } updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy if keepOldName { updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy } config := &kubectl.RollingUpdaterConfig{ Out: out, OldRc: oldRc, NewRc: newRc, UpdatePeriod: period, Interval: interval, Timeout: timeout, CleanupPolicy: updateCleanupPolicy, MaxUnavailable: intstr.FromInt(0), MaxSurge: intstr.FromInt(1), } if rollback { err = kubectl.AbortRollingUpdate(config) if err != nil { return err } client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc) } err = updater.Update(config) if err != nil { return err } message := "rolling updated" if keepOldName { newRc.Name = oldName } else { message = fmt.Sprintf("rolling updated to %q", newRc.Name) } newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name) if err != nil { return err } if outputFormat != "" { return f.PrintObject(cmd, mapper, newRc, out) } kind, err := api.Scheme.ObjectKind(newRc) if err != nil { return err } _, res := meta.KindToResource(kind) cmdutil.PrintSuccess(mapper, false, out, res.Resource, oldName, message) return nil }