// NewDeployer makes a new Deployer from a kube client. func NewDeployer(client kclient.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer { scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client) return &Deployer{ out: out, errOut: errOut, until: until, getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return client.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)}) }, scaler: scaler, strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) { switch config.Spec.Strategy.Type { case deployapi.DeploymentStrategyTypeRecreate: return recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until), nil case deployapi.DeploymentStrategyTypeRolling: recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until) return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil default: return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type) } }, } }
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy { if out == nil { out = ioutil.Discard } if errOut == nil { errOut = ioutil.Discard } scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client) return &RecreateDeploymentStrategy{ out: out, errOut: errOut, events: events, until: until, rcClient: client, eventClient: client, getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor { return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval, minReadySeconds) }, scaler: scaler, decoder: decoder, hookExecutor: stratsupport.NewHookExecutor(client, tagClient, client, os.Stdout, decoder), retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } }
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(oldClient kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy { if out == nil { out = ioutil.Discard } if errOut == nil { errOut = ioutil.Discard } scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), oldClient) // TODO internalclientset: get rid of oldClient after next rebase client := adapter.FromUnversionedClient(oldClient.(*kclient.Client)) return &RecreateDeploymentStrategy{ out: out, errOut: errOut, events: events, until: until, rcClient: client.Core(), eventClient: client.Core(), getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor { return stratsupport.NewAcceptAvailablePods(out, client.Core(), timeout, acceptorInterval, minReadySeconds) }, scaler: scaler, decoder: decoder, hookExecutor: stratsupport.NewHookExecutor(client.Core(), tagClient, client.Core(), os.Stdout, decoder), retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } }
func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset) }
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy { scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client) return &RecreateDeploymentStrategy{ getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, scaler: scaler, codec: codec, hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout, codec), retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } }
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, decoder runtime.Decoder) *RecreateDeploymentStrategy { scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client) return &RecreateDeploymentStrategy{ getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor { return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval) }, scaler: scaler, decoder: decoder, hookExecutor: stratsupport.NewHookExecutor(client, tagClient, os.Stdout, decoder), retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } }
// ScaleRC scales the given rc to the given replicas. func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) { scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient)) if err != nil { return nil, err } retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas) if err != nil { return nil, err } scaled, err := restClient.ReplicationControllers(ns).Get(name) if err != nil { return nil, err } return scaled, nil }
// ScaleRC scales the given rc to the given replicas. func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) { scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset) if err != nil { return nil, err } retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas) if err != nil { return nil, err } scaled, err := clientset.Core().ReplicationControllers(ns).Get(name) if err != nil { return nil, err } return scaled, nil }
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy { scaler, _ := kubectl.ScalerFor("ReplicationController", client) return &RecreateDeploymentStrategy{ getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, scaler: scaler, codec: codec, hookExecutor: &stratsupport.HookExecutor{ PodClient: &stratsupport.HookExecutorPodClientImpl{ CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return client.Pods(namespace).Create(pod) }, PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod { return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel) }, }, }, retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } }
// NewDeployer makes a new Deployer from a kube client. func NewDeployer(client kclient.Interface) *Deployer { scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client)) return &Deployer{ getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName)) }, scaler: scaler, strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) { switch config.Template.Strategy.Type { case deployapi.DeploymentStrategyTypeRecreate: return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil case deployapi.DeploymentStrategyTypeRolling: recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec) return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil default: return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type) } }, } }
}) It("should scale a job up", func() { startParallelism := int32(1) endParallelism := int32(2) By("Creating a job") job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := int32(2) endParallelism := int32(1) By("Creating a job")
}) It("should scale a job up", func() { startParallelism := 1 endParallelism := 2 By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor("Job", f.Client) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := 2 endParallelism := 1 By("Creating a job")
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} // eventually this should allow me choose a group priority based on the order of the discovery doc, for now hardcode a given order priorityRESTMapper := meta.PriorityRESTMapper{ Delegate: outputRESTMapper, ResourcePriority: []unversioned.GroupVersionResource{ {Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, }, KindPriority: []unversioned.GroupVersionKind{ {Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, }, } return priorityRESTMapper, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion(nil) }, ClientConfig: func() (*restclient.Config, error) { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } switch mapping.GroupVersionKind.Group { case api.GroupName: return client.RESTClient, nil case autoscaling.GroupName: return client.AutoscalingClient.RESTClient, nil case batch.GroupName: return client.BatchClient.RESTClient, nil case extensions.GroupName: return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Decoder: func(toInternal bool) runtime.Decoder { if toInternal { return api.Codecs.UniversalDecoder() } return api.Codecs.UniversalDeserializer() }, JSONEncoder: func() runtime.Encoder { return api.Codecs.LegacyCodec(registered.EnabledVersions()...) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return "", fmt.Errorf("invalid label selector: %v", err) } return selector.String(), nil case *extensions.ReplicaSet: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return "", fmt.Errorf("failed to convert label selector to selector: %v", err) } return selector.String(), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvk) } }, MapBasedSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format") } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensions.ReplicaSet: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format") } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvk) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil case *extensions.Deployment: return getPorts(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvk) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { c, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil case *api.ReplicationController: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) pod, numPods, err := GetFirstPod(c, t.Namespace, selector) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil case *extensions.ReplicaSet: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } pod, numPods, err := GetFirstPod(c, t.Namespace, selector) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvk) } }, PauseObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot pause %v", gvk) } }, ResumeObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot resume %v", gvk) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) }, HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) clientset := clientset.FromUnversionedClient(client) if err != nil { return nil, err } return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { version := gvk.GroupVersion() client, err := clients.ClientForVersion(&version) if err != nil { return nil, err } return client.Discovery().SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) pod, _, err := GetFirstPod(client, t.Namespace, selector) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } pod, _, err := GetFirstPod(client, t.Namespace, selector) return pod, err case *extensions.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } pod, _, err := GetFirstPod(client, t.Namespace, selector) return pod, err case *api.Pod: return t, nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk) } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, } }
}) It("should scale a job up", func() { startParallelism := int32(1) endParallelism := int32(2) By("Creating a job") job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createV1Job(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := int32(2) endParallelism := int32(1) By("Creating a job")
}) It("should scale a job up", func() { startParallelism := 1 endParallelism := 2 By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor(extensions.Kind("Job"), f.Client) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := 2 endParallelism := 1 By("Creating a job")
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, // If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that // have been dynamically added to the apiserver Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() checkErrWithPrefix("failed to get client config: ", err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } if discoverDynamicAPIs { client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"}) checkErrWithPrefix("failed to find client for version v1: ", err) var versions []unversioned.GroupVersion var gvks []unversioned.GroupVersionKind retries := 3 for i := 0; i < retries; i++ { versions, gvks, err = GetThirdPartyGroupVersions(client.Discovery()) // Retry if we got a NotFound error, because user may delete // a thirdparty group when the GetThirdPartyGroupVersions is // running. if err == nil || !apierrors.IsNotFound(err) { break } } checkErrWithPrefix("failed to get third-party group versions: ", err) if len(versions) > 0 { priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper) if !ok { CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper)) return nil, nil } multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper) if !ok { CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper)) return nil, nil } groupsMap := map[string][]unversioned.GroupVersion{} for _, version := range versions { groupsMap[version.Group] = append(groupsMap[version.Group], version) } for group, versionList := range groupsMap { preferredExternalVersion := versionList[0] thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group)) checkErrWithPrefix("failed to create third party resource mapper: ", err) accessor := meta.NewAccessor() groupMeta := apimachinery.GroupMeta{ GroupVersion: preferredExternalVersion, GroupVersions: versionList, RESTMapper: thirdPartyMapper, SelfLinker: runtime.SelfLinker(accessor), InterfacesFor: makeInterfacesFor(versionList), } checkErrWithPrefix("failed to register group: ", registered.RegisterGroup(groupMeta)) registered.AddThirdPartyAPIGroupVersions(versionList...) multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...) } priorityMapper.Delegate = multiMapper // Reassign to the RESTMapper here because priorityMapper is actually a copy, so if we // don't reassign, the above assignement won't actually update mapper.RESTMapper mapper.RESTMapper = priorityMapper } } outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} priorityRESTMapper := meta.PriorityRESTMapper{ Delegate: outputRESTMapper, } // TODO: this should come from registered versions groups := []string{api.GroupName, autoscaling.GroupName, extensions.GroupName, federation.GroupName, batch.GroupName} // set a preferred version for _, group := range groups { gvs := registered.EnabledVersionsForGroup(group) if len(gvs) == 0 { continue } priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: gvs[0].Version, Resource: meta.AnyResource}) priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: gvs[0].Version, Kind: meta.AnyKind}) } for _, group := range groups { priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) } return priorityRESTMapper, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion(nil) }, ClientConfig: func() (*restclient.Config, error) { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } if err := client.SetKubernetesDefaults(cfg); err != nil { return nil, err } gvk := mapping.GroupVersionKind switch gvk.Group { case federation.GroupName: mappingVersion := mapping.GroupVersionKind.GroupVersion() return clients.FederationClientForVersion(&mappingVersion) case api.GroupName: cfg.APIPath = "/api" default: cfg.APIPath = "/apis" } gv := gvk.GroupVersion() cfg.GroupVersion = &gv if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv) } return restclient.RESTClientFor(cfg) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() if mapping.GroupVersionKind.Group == federation.GroupName { fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion) if err != nil { return nil, err } if mapping.GroupVersionKind.Kind == "Cluster" { return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil } } client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Decoder: func(toInternal bool) runtime.Decoder { var decoder runtime.Decoder if toInternal { decoder = api.Codecs.UniversalDecoder() } else { decoder = api.Codecs.UniversalDeserializer() } return thirdpartyresourcedata.NewDecoder(decoder, "") }, JSONEncoder: func() runtime.Encoder { return api.Codecs.LegacyCodec(registered.EnabledVersions()...) }, Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(options), nil }, MapBasedSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensions.ReplicaSet: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil case *extensions.Deployment: return getPorts(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) } }, ProtocolsForObject: func(object runtime.Object) (map[string]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getProtocols(t.Spec.Template.Spec), nil case *api.Pod: return getProtocols(t.Spec), nil case *api.Service: return getServiceProtocols(t.Spec), nil case *extensions.Deployment: return getProtocols(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getProtocols(t.Spec.Template.Spec), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { c, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil case *api.ReplicationController: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil case *extensions.ReplicaSet: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) } }, PauseObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } return false, fmt.Errorf("cannot pause %v", gvks[0]) } }, ResumeObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } return false, fmt.Errorf("cannot resume %v", gvks[0]) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) }, HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) clientset := clientset.FromUnversionedClient(client) if err != nil { return nil, err } return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) }, StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } fedClient, err := clients.FederationClientForVersion(nil) if err != nil { return nil, err } return &clientSwaggerSchema{ c: client, fedc: fedClient, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { version := gvk.GroupVersion() client, err := clients.ClientForVersion(&version) if err != nil { return nil, err } return client.Discovery().SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *batch.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *api.Pod: return t, nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) } }, // UpdatePodSpecForObject update the pod specification for the provided object UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) switch t := obj.(type) { case *api.Pod: return true, fn(&t.Spec) case *api.ReplicationController: if t.Spec.Template == nil { t.Spec.Template = &api.PodTemplateSpec{} } return true, fn(&t.Spec.Template.Spec) case *extensions.Deployment: return true, fn(&t.Spec.Template.Spec) case *extensions.DaemonSet: return true, fn(&t.Spec.Template.Spec) case *extensions.ReplicaSet: return true, fn(&t.Spec.Template.Spec) case *apps.PetSet: return true, fn(&t.Spec.Template.Spec) case *batch.Job: return true, fn(&t.Spec.Template.Spec) default: return false, fmt.Errorf("the object is not a pod or does not have a pod template") } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { switch obj := obj.(type) { case *api.Service: if obj.Spec.Type == api.ServiceTypeNodePort { msg := fmt.Sprintf( `You have exposed your service on an external port on all nodes in your cluster. If you want to expose this service to the external internet, you may need to set up firewall rules for the service port(s) (%s) to serve traffic. See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. `, makePortsString(obj.Spec.Ports, true)) out.Write([]byte(msg)) } if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { msg := fmt.Sprintf( `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. Please use the loadBalancerSourceRanges field instead. See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. `) out.Write([]byte(msg)) } } }, } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, // If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that // have been dynamically added to the apiserver Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } if discoverDynamicAPIs { client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"}) CheckErr(err) versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery()) CheckErr(err) if len(versions) > 0 { priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper) if !ok { CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper)) return nil, nil } multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper) if !ok { CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper)) return nil, nil } groupsMap := map[string][]unversioned.GroupVersion{} for _, version := range versions { groupsMap[version.Group] = append(groupsMap[version.Group], version) } for group, versionList := range groupsMap { preferredExternalVersion := versionList[0] thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group)) CheckErr(err) accessor := meta.NewAccessor() groupMeta := apimachinery.GroupMeta{ GroupVersion: preferredExternalVersion, GroupVersions: versionList, RESTMapper: thirdPartyMapper, SelfLinker: runtime.SelfLinker(accessor), InterfacesFor: makeInterfacesFor(versionList), } CheckErr(registered.RegisterGroup(groupMeta)) registered.AddThirdPartyAPIGroupVersions(versionList...) multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...) } priorityMapper.Delegate = multiMapper // Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we // don't re-assign, the above assignement won't actually update mapper.RESTMapper mapper.RESTMapper = priorityMapper } } outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} priorityRESTMapper := meta.PriorityRESTMapper{ Delegate: outputRESTMapper, ResourcePriority: []unversioned.GroupVersionResource{ {Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, }, KindPriority: []unversioned.GroupVersionKind{ {Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, }, } return priorityRESTMapper, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion(nil) }, ClientConfig: func() (*restclient.Config, error) { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { gvk := mapping.GroupVersionKind mappingVersion := mapping.GroupVersionKind.GroupVersion() c, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } switch gvk.Group { case api.GroupName: return c.RESTClient, nil case autoscaling.GroupName: return c.AutoscalingClient.RESTClient, nil case batch.GroupName: return c.BatchClient.RESTClient, nil case apps.GroupName: return c.AppsClient.RESTClient, nil case extensions.GroupName: return c.ExtensionsClient.RESTClient, nil case api.SchemeGroupVersion.Group: return c.RESTClient, nil case extensions.SchemeGroupVersion.Group: return c.ExtensionsClient.RESTClient, nil default: if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { return nil, fmt.Errorf("unknown api group/version: %s", gvk.String()) } cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } gv := gvk.GroupVersion() cfg.GroupVersion = &gv cfg.APIPath = "/apis" cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind) return restclient.RESTClientFor(cfg) } }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Decoder: func(toInternal bool) runtime.Decoder { if toInternal { return api.Codecs.UniversalDecoder() } return api.Codecs.UniversalDeserializer() }, JSONEncoder: func() runtime.Encoder { return api.Codecs.LegacyCodec(registered.EnabledVersions()...) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil }, MapBasedSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensions.ReplicaSet: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvk) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil case *extensions.Deployment: return getPorts(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvk) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { c, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil case *api.ReplicationController: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil case *extensions.ReplicaSet: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvk) } }, PauseObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot pause %v", gvk) } }, ResumeObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot resume %v", gvk) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) }, HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) clientset := clientset.FromUnversionedClient(client) if err != nil { return nil, err } return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { version := gvk.GroupVersion() client, err := clients.ClientForVersion(&version) if err != nil { return nil, err } return client.Discovery().SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *batch.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *api.Pod: return t, nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk) } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { switch obj := obj.(type) { case *api.Service: if obj.Spec.Type == api.ServiceTypeNodePort { msg := fmt.Sprintf( `You have exposed your service on an external port on all nodes in your cluster. If you want to expose this service to the external internet, you may need to set up firewall rules for the service port(s) (%s) to serve traffic. See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. `, makePortsString(obj.Spec.Ports, true)) out.Write([]byte(msg)) } } }, } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*client.Request, error) { c, err := clients.ClientForVersion("") if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.PodLogs(t.Namespace).Get(t.Name, opts) default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %s", kind) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { if kind != "ReplicationController" && kind != "Service" && kind != "Pod" { return fmt.Errorf("invalid resource provided: %v, only a replication controller, service or pod is accepted", kind) } return nil }, } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion(nil) }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion(nil) }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } switch mapping.GroupVersionKind.Group { case api.GroupName: return client.RESTClient, nil case extensions.GroupName: return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, absoluteTimestamps, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvk) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvk) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*client.Request, error) { c, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvk) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { client, err := clients.ClientForVersion(&version) if err != nil { return nil, err } return client.SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: return GetFirstPod(client, t.Namespace, t.Spec.Selector) case *extensions.Deployment: return GetFirstPod(client, t.Namespace, t.Spec.Selector) case *extensions.Job: return GetFirstPod(client, t.Namespace, t.Spec.Selector.MatchLabels) case *api.Pod: return t, nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk) } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() checkErrWithPrefix("failed to get client config: ", err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } mapper := registered.RESTMapper() discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) // if we can find the server version and it's current enough to have discovery information, use it. Otherwise, // fallback to our hardcoded list if err == nil { if serverVersion, err := discoveryClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) { // register third party resources with the api machinery groups. This probably should be done, but // its consistent with old code, so we'll start with it. if err := registerThirdPartyResources(discoveryClient); err != nil { fmt.Fprintf(os.Stderr, "Unable to register third party resources: %v\n", err) } // ThirdPartyResourceData is special. It's not discoverable, but needed for thirdparty resource listing // TODO eliminate this once we're truly generic. thirdPartyResourceDataMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{extensionsv1beta1.SchemeGroupVersion}, registered.InterfacesFor) thirdPartyResourceDataMapper.Add(extensionsv1beta1.SchemeGroupVersion.WithKind("ThirdPartyResourceData"), meta.RESTScopeNamespace) mapper = meta.FirstHitRESTMapper{ MultiRESTMapper: meta.MultiRESTMapper{ discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor), thirdPartyResourceDataMapper, }, } } } // wrap with shortcuts mapper = NewShortcutExpander(mapper, discoveryClient) // wrap with output preferences mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} return mapper, api.Scheme }, UnstructuredObject: func() (meta.RESTMapper, runtime.ObjectTyper, error) { cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return nil, nil, err } dc, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return nil, nil, err } groupResources, err := discovery.GetAPIGroupResources(dc) if err != nil { return nil, nil, err } // Register unknown APIs as third party for now to make // validation happy. TODO perhaps make a dynamic schema // validator to avoid this. for _, group := range groupResources { for _, version := range group.Group.Versions { gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version} if !registered.IsRegisteredVersion(gv) { registered.AddThirdPartyAPIGroupVersions(gv) } } } mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured) typer := discovery.NewUnstructuredObjectTyper(groupResources) return NewShortcutExpander(mapper, dc), typer, nil }, RESTClient: func() (*restclient.RESTClient, error) { clientConfig, err := clients.ClientConfigForVersion(nil) if err != nil { return nil, err } return restclient.RESTClientFor(clientConfig) }, ClientSet: func() (*internalclientset.Clientset, error) { return clients.ClientSetForVersion(nil) }, ClientConfig: func() (*restclient.Config, error) { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } if err := client.SetKubernetesDefaults(cfg); err != nil { return nil, err } gvk := mapping.GroupVersionKind switch gvk.Group { case federation.GroupName: mappingVersion := mapping.GroupVersionKind.GroupVersion() return clients.FederationClientForVersion(&mappingVersion) case api.GroupName: cfg.APIPath = "/api" default: cfg.APIPath = "/apis" } gv := gvk.GroupVersion() cfg.GroupVersion = &gv if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv) } return restclient.RESTClientFor(cfg) }, UnstructuredClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } if err := restclient.SetKubernetesDefaults(cfg); err != nil { return nil, err } cfg.APIPath = "/apis" if mapping.GroupVersionKind.Group == api.GroupName { cfg.APIPath = "/api" } gv := mapping.GroupVersionKind.GroupVersion() cfg.ContentConfig = dynamic.ContentConfig() cfg.GroupVersion = &gv return restclient.RESTClientFor(cfg) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() if mapping.GroupVersionKind.Group == federation.GroupName { fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion) if err != nil { return nil, err } if mapping.GroupVersionKind.Kind == "Cluster" { return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil } } clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientset); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Decoder: func(toInternal bool) runtime.Decoder { var decoder runtime.Decoder if toInternal { decoder = api.Codecs.UniversalDecoder() } else { decoder = api.Codecs.UniversalDeserializer() } return thirdpartyresourcedata.NewDecoder(decoder, "") }, JSONEncoder: func() runtime.Encoder { return api.Codecs.LegacyCodec(registered.EnabledVersions()...) }, Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(options), nil }, MapBasedSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensions.ReplicaSet: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil case *extensions.Deployment: return getPorts(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) } }, ProtocolsForObject: func(object runtime.Object) (map[string]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getProtocols(t.Spec.Template.Spec), nil case *api.Pod: return getProtocols(t.Spec), nil case *api.Service: return getServiceProtocols(t.Spec), nil case *extensions.Deployment: return getProtocols(t.Spec.Template.Spec), nil case *extensions.ReplicaSet: return getProtocols(t.Spec.Template.Spec), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { clientset, err := clients.ClientSetForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return clientset.Core().Pods(t.Namespace).GetLogs(t.Name, opts), nil case *api.ReplicationController: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil case *extensions.ReplicaSet: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } if numPods > 1 { fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) } return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) } }, PauseObject: func(object runtime.Object) (bool, error) { clientset, err := clients.ClientSetForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := clientset.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } return false, fmt.Errorf("cannot pause %v", gvks[0]) } }, ResumeObject: func(object runtime.Object) (bool, error) { clientset, err := clients.ClientSetForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := clientset.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } return false, fmt.Errorf("cannot resume %v", gvks[0]) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset) }, HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() clientset, err := clients.ClientSetForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { clientConfig, err := clients.ClientConfigForVersion(nil) if err != nil { return nil, err } restclient, err := restclient.RESTClientFor(clientConfig) if err != nil { return nil, err } clientset, err := clients.ClientSetForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := clientset.Discovery().ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } fedClient, err := clients.FederationClientForVersion(nil) if err != nil { return nil, err } return &clientSwaggerSchema{ c: restclient, fedc: fedClient, cacheDir: dir, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { version := gvk.GroupVersion() clientset, err := clients.ClientSetForVersion(&version) if err != nil { return nil, err } return clientset.Discovery().SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { clientset, err := clients.ClientSetForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *batch.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *api.Pod: return t, nil default: gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) } }, // UpdatePodSpecForObject update the pod specification for the provided object UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) switch t := obj.(type) { case *api.Pod: return true, fn(&t.Spec) case *api.ReplicationController: if t.Spec.Template == nil { t.Spec.Template = &api.PodTemplateSpec{} } return true, fn(&t.Spec.Template.Spec) case *extensions.Deployment: return true, fn(&t.Spec.Template.Spec) case *extensions.DaemonSet: return true, fn(&t.Spec.Template.Spec) case *extensions.ReplicaSet: return true, fn(&t.Spec.Template.Spec) case *apps.PetSet: return true, fn(&t.Spec.Template.Spec) case *batch.Job: return true, fn(&t.Spec.Template.Spec) default: return false, fmt.Errorf("the object is not a pod or does not have a pod template") } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { switch obj := obj.(type) { case *api.Service: if obj.Spec.Type == api.ServiceTypeNodePort { msg := fmt.Sprintf( `You have exposed your service on an external port on all nodes in your cluster. If you want to expose this service to the external internet, you may need to set up firewall rules for the service port(s) (%s) to serve traffic. See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. `, makePortsString(obj.Spec.Ports, true)) out.Write([]byte(msg)) } if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { msg := fmt.Sprintf( `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. Please use the loadBalancerSourceRanges field instead. See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. `) out.Write([]byte(msg)) } } }, } }
}) It("should scale a job up", func() { startParallelism := int32(1) endParallelism := int32(2) By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) Expect(err).NotTo(HaveOccurred()) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should scale a job down", func() { startParallelism := int32(2) endParallelism := int32(1) By("Creating a job")
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) expClients := NewExperimentalClientCache(clientConfig) noClientErr := errors.New("could not get client") getBothClients := func(group string, version string) (*client.Client, *client.ExperimentalClient, error) { switch group { case "api": client, err := clients.ClientForVersion(version) return client, nil, err case "experimental": client, err := clients.ClientForVersion(version) if err != nil { return nil, nil, err } expClient, err := expClients.Client() if err != nil { return nil, nil, err } return client, expClient, err } return nil, nil, noClientErr } return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ExperimentalClient: func() (*client.ExperimentalClient, error) { return expClients.Client() }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } switch group { case "api": client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return client.RESTClient, nil case "experimental": client, err := expClients.Client() if err != nil { return nil, err } return client.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, expClient, err := getBothClients(group, mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.Kind, client, expClient); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, _, err := getBothClients(group, mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, kubectl.NewScalerClient(client)) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, expClient, err := getBothClients(group, mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client, expClient) }, Validator: func(validate bool) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } expClient, _ := expClients.Client() return &clientSwaggerSchema{client, expClient, api.Scheme}, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, "horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { switch kind { case "ReplicationController", "Service", "Pod": // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind string) error { switch kind { // TODO: support autoscale for deployments case "ReplicationController": // nothing to do here default: return fmt.Errorf("cannot autoscale a %s", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion("") if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: var pods *api.PodList for pods == nil || len(pods.Items) == 0 { var err error if pods, err = client.Pods(t.Namespace).List(labels.SelectorFromSet(t.Spec.Selector), fields.Everything()); err != nil { return nil, err } if len(pods.Items) == 0 { time.Sleep(2 * time.Second) } } pod := &pods.Items[0] return pod, nil case *api.Pod: return t, nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %s: not implemented", kind) } }, } }