It("should scale a job up", func() { startParallelism := 1 endParallelism := 2 By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor("Job", f.Client) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := 2 endParallelism := 1 By("Creating a job") job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job)
// RunScale executes the scaling func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool, options *ScaleOptions) error { if len(os.Args) > 1 && os.Args[1] == "resize" { printDeprecationWarning("scale", "resize") } count := cmdutil.GetFlagInt(cmd, "replicas") if count < 0 { return cmdutil.UsageError(cmd, "--replicas=COUNT is required, and COUNT must be greater than or equal to 0") } cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, options.Filenames...). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() err = r.Err() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } info := infos[0] mapping := info.ResourceMapping() scaler, err := f.Scaler(mapping) if err != nil { return err } resourceVersion := cmdutil.GetFlagString(cmd, "resource-version") if len(resourceVersion) != 0 && len(infos) > 1 { return fmt.Errorf("cannot use --resource-version with multiple controllers") } currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") if currentSize != -1 && len(infos) > 1 { return fmt.Errorf("cannot use --current-replicas with multiple controllers") } precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion} retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) var waitForReplicas *kubectl.RetryParams if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) } errs := []error{} for _, info := range infos { if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { errs = append(errs, err) continue } cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled") } return utilerrors.NewAggregate(errs) }