Пример #1
0
// Complete verifies command line arguments and loads data from the command environment
func (p *AttachOptions) Complete(f *cmdutil.Factory, cmd *cobra.Command, argsIn []string) error {
	if len(argsIn) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for attach")
	}
	if len(argsIn) > 1 {
		return cmdutil.UsageError(cmd, fmt.Sprintf("expected a single argument: POD, saw %d: %s", len(argsIn), argsIn))
	}
	p.PodName = argsIn[0]

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	p.Namespace = namespace

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}
	p.Config = config

	client, err := f.Client()
	if err != nil {
		return err
	}
	p.Client = client

	return nil
}
func (o *AddSecretOptions) Complete(f *cmdutil.Factory, args []string, typeFlag string) error {
	if len(args) < 2 {
		return errors.New("must have service account name and at least one secret name")
	}
	o.TargetName = args[0]
	o.SecretNames = args[1:]

	loweredTypeFlag := strings.ToLower(typeFlag)
	switch loweredTypeFlag {
	case string(PullType):
		o.Type = PullType
	case string(MountType):
		o.Type = MountType
	default:
		return fmt.Errorf("unknown for: %v", typeFlag)
	}

	var err error
	o.ClientInterface, err = f.Client()
	if err != nil {
		return err
	}

	o.Namespace, err = f.DefaultNamespace()
	if err != nil {
		return err
	}

	o.Mapper, o.Typer = f.Object()
	o.ClientMapper = f.ClientMapperForCommand()

	return nil
}
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string) error {
	podName := cmdutil.GetFlagString(cmd, "pod")
	if len(podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}

	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward")
	}

	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer signal.Stop(signals)

	stopCh := make(chan struct{}, 1)
	go func() {
		<-signals
		close(stopCh)
	}()

	req := client.RESTClient.Get().
		Prefix("proxy").
		Resource("nodes").
		Name(pod.Spec.Host).
		Suffix("portForward", namespace, podName)

	pf, err := portforward.New(req, config, args, stopCh)
	if err != nil {
		return err
	}

	return pf.ForwardPorts()
}
Пример #4
0
func Run(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	if len(os.Args) > 1 && os.Args[1] == "run-container" {
		printDeprecationWarning("run", "run-container")
	}

	if len(args) != 1 {
		return cmdutil.UsageError(cmd, "NAME is required for run")
	}

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	generatorName := cmdutil.GetFlagString(cmd, "generator")
	generator, found := f.Generator(generatorName)
	if !found {
		return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not found.", generator))
	}
	names := generator.ParamNames()
	params := kubectl.MakeParams(cmd, names)
	params["name"] = args[0]

	err = kubectl.ValidateParams(names, params)
	if err != nil {
		return err
	}

	controller, err := generator.Generate(params)
	if err != nil {
		return err
	}

	inline := cmdutil.GetFlagString(cmd, "overrides")
	if len(inline) > 0 {
		controller, err = cmdutil.Merge(controller, inline, "ReplicationController")
		if err != nil {
			return err
		}
	}

	// TODO: extract this flag to a central location, when such a location exists.
	if !cmdutil.GetFlagBool(cmd, "dry-run") {
		controller, err = client.ReplicationControllers(namespace).Create(controller.(*api.ReplicationController))
		if err != nil {
			return err
		}
	}

	return f.PrintObject(cmd, controller, out)
}
Пример #5
0
func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	if len(args) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for log")
	}

	if len(args) > 2 {
		return cmdutil.UsageError(cmd, "log POD [CONTAINER]")
	}

	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	client, err := f.Client()
	if err != nil {
		return err
	}

	podID := args[0]

	pod, err := client.Pods(namespace).Get(podID)
	if err != nil {
		return err
	}

	var container string
	if len(args) == 1 {
		if len(pod.Spec.Containers) != 1 {
			return fmt.Errorf("POD %s has more than one container; please specify the container to print logs for", pod.ObjectMeta.Name)
		}
		container = pod.Spec.Containers[0].Name
	} else {
		container = args[1]
	}

	follow := false
	if cmdutil.GetFlagBool(cmd, "follow") {
		follow = true
	}

	readCloser, err := client.RESTClient.Get().
		Prefix("proxy").
		Resource("nodes").
		Name(pod.Spec.Host).
		Suffix("containerLogs", namespace, podID, container).
		Param("follow", strconv.FormatBool(follow)).
		Stream()
	if err != nil {
		return err
	}

	defer readCloser.Close()
	_, err = io.Copy(out, readCloser)
	return err
}
Пример #6
0
func RunApiVersions(f *cmdutil.Factory, out io.Writer) error {
	if len(os.Args) > 1 && os.Args[1] == "apiversions" {
		printDeprecationWarning("api-versions", "apiversions")
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	kubectl.GetApiVersions(out, client)
	return nil
}
Пример #7
0
func RunVersion(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error {
	if cmdutil.GetFlagBool(cmd, "client") {
		kubectl.GetClientVersion(out)
		return nil
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	kubectl.GetVersion(out, client)
	return nil
}
Пример #8
0
func (o *CreateDockerConfigOptions) Complete(f *cmdutil.Factory, args []string) error {
	if len(args) != 1 {
		return errors.New("must have exactly one argument: secret name")
	}
	o.SecretName = args[0]

	client, err := f.Client()
	if err != nil {
		return err
	}
	o.SecretNamespace, err = f.DefaultNamespace()
	if err != nil {
		return err
	}

	o.SecretsInterface = client.Secrets(o.SecretNamespace)

	return nil
}
Пример #9
0
func (o *AddSecretOptions) Complete(f *cmdutil.Factory, args []string, typeFlags []string) error {
	if len(args) < 2 {
		return errors.New("must have service account name and at least one secret name")
	}
	o.TargetName = args[0]
	o.SecretNames = args[1:]

	if len(typeFlags) == 0 {
		o.ForMount = true
	} else {
		for _, flag := range typeFlags {
			loweredValue := strings.ToLower(flag)
			switch loweredValue {
			case "pull":
				o.ForPull = true
			case "mount":
				o.ForMount = true
			default:
				return fmt.Errorf("unknown for: %v", flag)
			}
		}
	}

	var err error
	o.ClientInterface, err = f.Client()
	if err != nil {
		return err
	}

	o.Namespace, err = f.DefaultNamespace()
	if err != nil {
		return err
	}

	o.Mapper, o.Typer = f.Object()
	o.ClientMapper = f.ClientMapperForCommand()

	return nil
}
Пример #10
0
// Complete verifies command line arguments and loads data from the command environment
func (p *ExecOptions) Complete(f *cmdutil.Factory, cmd *cobra.Command, argsIn []string) error {
	if len(p.PodName) == 0 && len(argsIn) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}
	if len(p.PodName) != 0 {
		printDeprecationWarning("exec POD", "-p POD")
		if len(argsIn) < 1 {
			return cmdutil.UsageError(cmd, "COMMAND is required for exec")
		}
		p.Command = argsIn
	} else {
		p.PodName = argsIn[0]
		p.Command = argsIn[1:]
		if len(p.Command) < 1 {
			return cmdutil.UsageError(cmd, "COMMAND is required for exec")
		}
	}

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	p.Namespace = namespace

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}
	p.Config = config

	client, err := f.Client()
	if err != nil {
		return err
	}
	p.Client = client

	return nil
}
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	if os.Args[1] == "rollingupdate" {
		printDeprecationWarning("rolling-update", "rollingupdate")
	}

	filename := cmdutil.GetFlagString(cmd, "filename")
	if len(filename) == 0 {
		return cmdutil.UsageError(cmd, "Must specify filename for new controller")
	}
	period := cmdutil.GetFlagDuration(cmd, "update-period")
	interval := cmdutil.GetFlagDuration(cmd, "poll-interval")
	timeout := cmdutil.GetFlagDuration(cmd, "timeout")
	if len(args) != 1 {
		return cmdutil.UsageError(cmd, "Must specify the controller to update")
	}
	oldName := args[0]

	cmdNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	mapper, typer := f.Object()
	// TODO: use resource.Builder instead
	obj, err := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
		NamespaceParam(cmdNamespace).RequireNamespace().
		FilenameParam(filename).
		Do().
		Object()
	if err != nil {
		return err
	}
	newRc, ok := obj.(*api.ReplicationController)
	if !ok {
		return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
	}
	newName := newRc.Name
	if oldName == newName {
		return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
			filename, oldName)
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	updater := kubectl.NewRollingUpdater(newRc.Namespace, kubectl.NewRollingUpdaterClient(client))

	// fetch rc
	oldRc, err := client.ReplicationControllers(newRc.Namespace).Get(oldName)
	if err != nil {
		return err
	}

	var hasLabel bool
	for key, oldValue := range oldRc.Spec.Selector {
		if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
			hasLabel = true
			break
		}
	}
	if !hasLabel {
		return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
			filename, oldName)
	}
	// TODO: handle resizes during rolling update
	if newRc.Spec.Replicas == 0 {
		newRc.Spec.Replicas = oldRc.Spec.Replicas
	}
	err = updater.Update(&kubectl.RollingUpdaterConfig{
		Out:           out,
		OldRc:         oldRc,
		NewRc:         newRc,
		UpdatePeriod:  period,
		Interval:      interval,
		Timeout:       timeout,
		CleanupPolicy: kubectl.DeleteRollingUpdateCleanupPolicy,
	})
	if err != nil {
		return err
	}

	fmt.Fprintf(out, "%s\n", newName)
	return nil
}
Пример #12
0
func RunExec(f *cmdutil.Factory, cmd *cobra.Command, cmdIn io.Reader, cmdOut, cmdErr io.Writer, p *execParams, argsIn []string, re remoteExecutor) error {
	podName, containerName, args, err := extractPodAndContainer(cmd, argsIn, p)
	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod %s is not running. Current status=%v", podName, pod.Status.Phase)
	}

	if len(containerName) == 0 {
		glog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name)
		containerName = pod.Spec.Containers[0].Name
	}

	var stdin io.Reader
	tty := p.tty
	if p.stdin {
		stdin = cmdIn
		if tty {
			if file, ok := cmdIn.(*os.File); ok {
				inFd := file.Fd()
				if term.IsTerminal(inFd) {
					oldState, err := term.SetRawTerminal(inFd)
					if err != nil {
						glog.Fatal(err)
					}
					// this handles a clean exit, where the command finished
					defer term.RestoreTerminal(inFd, oldState)

					// SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens
					// for SIGINT and restores the terminal before exiting)

					// this handles SIGTERM
					sigChan := make(chan os.Signal, 1)
					signal.Notify(sigChan, syscall.SIGTERM)
					go func() {
						<-sigChan
						term.RestoreTerminal(inFd, oldState)
						os.Exit(0)
					}()
				} else {
					glog.Warning("Stdin is not a terminal")
				}
			} else {
				tty = false
				glog.Warning("Unable to use a TTY")
			}
		}
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	req := client.RESTClient.Post().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)

	postErr := re.Execute(req, config, args, stdin, cmdOut, cmdErr, tty)

	// if we don't have an error, return.  If we did get an error, try a GET because v3.0.0 shipped with exec running as a GET.
	if postErr == nil {
		return nil
	}

	// only try the get if the error is either a forbidden or method not supported, otherwise trying with a GET probably won't help
	if !apierrors.IsForbidden(postErr) && !apierrors.IsMethodNotSupported(postErr) {
		return postErr
	}

	getReq := client.RESTClient.Get().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)
	getErr := re.Execute(getReq, config, args, stdin, cmdOut, cmdErr, tty)
	if getErr == nil {
		return nil
	}

	// if we got a getErr, return the postErr because it's more likely to be correct.  GET is legacy
	return postErr
}
Пример #13
0
func RunExec(f *cmdutil.Factory, cmd *cobra.Command, cmdIn io.Reader, cmdOut, cmdErr io.Writer, p *execParams, args []string, re remoteExecutor) error {
	if len(p.podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}

	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "COMMAND is required for exec")
	}
	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(p.podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	containerName := p.containerName
	if len(containerName) == 0 {
		containerName = pod.Spec.Containers[0].Name
	}

	var stdin io.Reader
	tty := p.tty
	if p.stdin {
		stdin = cmdIn
		if tty {
			if file, ok := cmdIn.(*os.File); ok {
				inFd := file.Fd()
				if term.IsTerminal(inFd) {
					oldState, err := term.SetRawTerminal(inFd)
					if err != nil {
						glog.Fatal(err)
					}
					// this handles a clean exit, where the command finished
					defer term.RestoreTerminal(inFd, oldState)

					// SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens
					// for SIGINT and restores the terminal before exiting)

					// this handles SIGTERM
					sigChan := make(chan os.Signal, 1)
					signal.Notify(sigChan, syscall.SIGTERM)
					go func() {
						<-sigChan
						term.RestoreTerminal(inFd, oldState)
						os.Exit(0)
					}()
				} else {
					glog.Warning("Stdin is not a terminal")
				}
			} else {
				tty = false
				glog.Warning("Unable to use a TTY")
			}
		}
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	req := client.RESTClient.Get().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)

	return re.Execute(req, config, args, stdin, cmdOut, cmdErr, tty)
}
Пример #14
0
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error {
	podName := cmdutil.GetFlagString(cmd, "pod")
	if len(podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}
	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward")
	}

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer signal.Stop(signals)

	stopCh := make(chan struct{}, 1)
	go func() {
		<-signals
		close(stopCh)
	}()

	req := client.RESTClient.Post().
		Resource("pods").
		Namespace(namespace).
		Name(pod.Name).
		SubResource("portforward")

	postErr := fw.ForwardPorts(req, config, args, stopCh)

	// if we don't have an error, return.  If we did get an error, try a GET because v3.0.0 shipped with port-forward running as a GET.
	if postErr == nil {
		return nil
	}

	// only try the get if the error is either a forbidden or method not supported, otherwise trying with a GET probably won't help
	if !apierrors.IsForbidden(postErr) && !apierrors.IsMethodNotSupported(postErr) {
		return postErr
	}

	getReq := client.RESTClient.Get().
		Resource("pods").
		Namespace(namespace).
		Name(pod.Name).
		SubResource("portforward")
	getErr := fw.ForwardPorts(getReq, config, args, stopCh)
	if getErr == nil {
		return nil
	}

	// if we got a getErr, return the postErr because it's more likely to be correct.  GET is legacy
	return postErr
}
Пример #15
0
// RunLog retrieves a pod log
func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, p *logParams) error {
	if len(os.Args) > 1 && os.Args[1] == "log" {
		printDeprecationWarning("logs", "log")
	}

	if len(args) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for log")
	}

	if len(args) > 2 {
		return cmdutil.UsageError(cmd, "log POD [CONTAINER]")
	}

	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	client, err := f.Client()
	if err != nil {
		return err
	}

	podID := args[0]

	pod, err := client.Pods(namespace).Get(podID)
	if err != nil {
		return err
	}

	var container string
	if cmdutil.GetFlagString(cmd, "container") != "" {
		// [-c CONTAINER]
		container = p.containerName
	} else {
		// [CONTAINER] (container as arg not flag) is supported as legacy behavior. See PR #10519 for more details.
		if len(args) == 1 {
			if len(pod.Spec.Containers) != 1 {
				return fmt.Errorf("POD %s has more than one container; please specify the container to print logs for", pod.ObjectMeta.Name)
			}
			container = pod.Spec.Containers[0].Name
		} else {
			container = args[1]
		}
	}

	follow := false
	if cmdutil.GetFlagBool(cmd, "follow") {
		follow = true
	}

	previous := false
	if cmdutil.GetFlagBool(cmd, "previous") {
		previous = true
	}

	readCloser, err := client.RESTClient.Get().
		Namespace(namespace).
		Name(podID).
		Resource("pods").
		SubResource("log").
		Param("follow", strconv.FormatBool(follow)).
		Param("container", container).
		Param("previous", strconv.FormatBool(previous)).
		Stream()
	if err != nil {
		return err
	}

	defer readCloser.Close()
	_, err = io.Copy(out, readCloser)
	return err
}
Пример #16
0
func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	var name, resource string
	switch l := len(args); {
	case l == 2:
		resource, name = args[0], args[1]
	default:
		return cmdutil.UsageError(cmd, "the type and name of a resource to expose are required arguments")
	}

	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	client, err := f.Client()
	if err != nil {
		return err
	}

	generatorName := cmdutil.GetFlagString(cmd, "generator")

	generator, found := kubectl.Generators[generatorName]
	if !found {
		return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generator))
	}
	names := generator.ParamNames()
	params := kubectl.MakeParams(cmd, names)
	if len(cmdutil.GetFlagString(cmd, "service-name")) == 0 {
		params["name"] = name
	} else {
		params["name"] = cmdutil.GetFlagString(cmd, "service-name")
	}
	if s, found := params["selector"]; !found || len(s) == 0 || cmdutil.GetFlagInt(cmd, "port") < 1 {
		mapper, _ := f.Object()
		v, k, err := mapper.VersionAndKindForResource(resource)
		if err != nil {
			return err
		}
		mapping, err := mapper.RESTMapping(k, v)
		if err != nil {
			return err
		}
		if len(s) == 0 {
			s, err := f.PodSelectorForResource(mapping, namespace, name)
			if err != nil {
				return err
			}
			params["selector"] = s
		}
		if cmdutil.GetFlagInt(cmd, "port") < 0 {
			ports, err := f.PortsForResource(mapping, namespace, name)
			if err != nil {
				return err
			}
			if len(ports) == 0 {
				return cmdutil.UsageError(cmd, "couldn't find a suitable port via --port flag or introspection")
			}
			if len(ports) > 1 {
				return cmdutil.UsageError(cmd, "more than one port to choose from, please explicitly specify a port using the --port flag.")
			}
			params["port"] = ports[0]
		}
	}
	if cmdutil.GetFlagBool(cmd, "create-external-load-balancer") {
		params["create-external-load-balancer"] = "true"
	}

	err = kubectl.ValidateParams(names, params)
	if err != nil {
		return err
	}

	service, err := generator.Generate(params)
	if err != nil {
		return err
	}

	inline := cmdutil.GetFlagString(cmd, "overrides")
	if len(inline) > 0 {
		service, err = cmdutil.Merge(service, inline, "Service")
		if err != nil {
			return err
		}
	}

	// TODO: extract this flag to a central location, when such a location exists.
	if !cmdutil.GetFlagBool(cmd, "dry-run") {
		service, err = client.Services(namespace).Create(service.(*api.Service))
		if err != nil {
			return err
		}
	}

	return f.PrintObject(cmd, service, out)
}
Пример #17
0
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
	if os.Args[1] == "rollingupdate" {
		printDeprecationWarning("rolling-update", "rollingupdate")
	}
	deploymentKey, filename, image, oldName, err := validateArguments(cmd, args)
	if err != nil {
		return err
	}
	period := cmdutil.GetFlagDuration(cmd, "update-period")
	interval := cmdutil.GetFlagDuration(cmd, "poll-interval")
	timeout := cmdutil.GetFlagDuration(cmd, "timeout")
	dryrun := cmdutil.GetFlagBool(cmd, "dry-run")

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	updaterClient := kubectl.NewRollingUpdaterClient(client)

	var newRc *api.ReplicationController
	// fetch rc
	oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName)
	if err != nil {
		if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 {
			return err
		}
		// We're in the middle of a rename, look for an RC with a source annotation of oldName
		newRc, err := kubectl.FindSourceController(updaterClient, cmdNamespace, oldName)
		if err != nil {
			return err
		}
		return kubectl.Rename(kubectl.NewRollingUpdaterClient(client), newRc, oldName)
	}

	var keepOldName bool
	var replicasDefaulted bool

	mapper, typer := f.Object()

	if len(filename) != 0 {
		schema, err := f.Validator()
		if err != nil {
			return err
		}

		request := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
			Schema(schema).
			NamespaceParam(cmdNamespace).DefaultNamespace().
			FilenameParam(enforceNamespace, filename).
			Do()
		obj, err := request.Object()
		if err != nil {
			return err
		}
		var ok bool
		// Handle filename input from stdin. The resource builder always returns an api.List
		// when creating resource(s) from a stream.
		if list, ok := obj.(*api.List); ok {
			if len(list.Items) > 1 {
				return cmdutil.UsageError(cmd, "%s specifies multiple items", filename)
			}
			obj = list.Items[0]
		}
		newRc, ok = obj.(*api.ReplicationController)
		if !ok {
			if _, kind, err := typer.ObjectVersionAndKind(obj); err == nil {
				return cmdutil.UsageError(cmd, "%s contains a %s not a ReplicationController", filename, kind)
			}
			glog.V(4).Infof("Object %#v is not a ReplicationController", obj)
			return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
		}
		infos, err := request.Infos()
		if err != nil || len(infos) != 1 {
			glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted")
		} else {
			replicasDefaulted = isReplicasDefaulted(infos[0])
		}
	}
	// If the --image option is specified, we need to create a new rc with at least one different selector
	// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
	// different image.
	if len(image) != 0 {
		keepOldName = len(args) == 1
		newName := findNewName(args, oldRc)
		if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
			return err
		}
		if newRc != nil {
			fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
		} else {
			newRc, err = kubectl.CreateNewControllerFromCurrentController(client, cmdNamespace, oldName, newName, image, deploymentKey)
			if err != nil {
				return err
			}
		}
		// Update the existing replication controller with pointers to the 'next' controller
		// and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller.
		oldHash, err := api.HashObject(oldRc, client.Codec)
		if err != nil {
			return err
		}
		oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
		if err != nil {
			return err
		}
	}
	if oldName == newRc.Name {
		return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
			filename, oldName)
	}

	updater := kubectl.NewRollingUpdater(newRc.Namespace, updaterClient)

	// To successfully pull off a rolling update the new and old rc have to differ
	// by at least one selector. Every new pod should have the selector and every
	// old pod should not have the selector.
	var hasLabel bool
	for key, oldValue := range oldRc.Spec.Selector {
		if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
			hasLabel = true
			break
		}
	}
	if !hasLabel {
		return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
			filename, oldName)
	}
	// TODO: handle scales during rolling update
	if replicasDefaulted {
		newRc.Spec.Replicas = oldRc.Spec.Replicas
	}
	if dryrun {
		oldRcData := &bytes.Buffer{}
		if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil {
			return err
		}
		newRcData := &bytes.Buffer{}
		if err := f.PrintObject(cmd, newRc, newRcData); err != nil {
			return err
		}
		fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes()))
		return nil
	}
	updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy
	if keepOldName {
		updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy
	}
	config := &kubectl.RollingUpdaterConfig{
		Out:           out,
		OldRc:         oldRc,
		NewRc:         newRc,
		UpdatePeriod:  period,
		Interval:      interval,
		Timeout:       timeout,
		CleanupPolicy: updateCleanupPolicy,
	}
	if cmdutil.GetFlagBool(cmd, "rollback") {
		kubectl.AbortRollingUpdate(config)
		client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc)
	}
	err = updater.Update(config)
	if err != nil {
		return err
	}

	if keepOldName {
		fmt.Fprintf(out, "%s\n", oldName)
	} else {
		fmt.Fprintf(out, "%s\n", newRc.Name)
	}
	return nil
}