// ScaleRC scales the given rc to the given replicas. func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) { scaler, err := kubectl.ScalerFor("ReplicationController", restClient) if err != nil { return nil, err } retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas) if err != nil { return nil, err } scaled, err := restClient.ReplicationControllers(ns).Get(name) if err != nil { return nil, err } return scaled, nil }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, "horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { if kind != "ReplicationController" && kind != "Service" && kind != "Pod" { return fmt.Errorf("invalid resource provided: %v, only a replication controller, service or pod is accepted", kind) } return nil }, CanBeAutoscaled: func(kind string) error { // TODO: support autoscale for deployments if kind != "ReplicationController" { return fmt.Errorf("invalid resource provided: %v, only a replication controller is accepted", kind) } return nil }, } }
}) It("should scale a job up", func() { startParallelism := 1 endParallelism := 2 By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor("Job", f.Client) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := 2 endParallelism := 1 By("Creating a job")