// StopRC stops the rc via kubectl's stop library func StopRC(rc *api.ReplicationController, restClient *client.Client) error { reaper, err := kubectl.ReaperFor("ReplicationController", restClient) if err != nil || reaper == nil { return err } _, err = reaper.Stop(rc.Namespace, rc.Name, 0, nil) if err != nil { return err } return nil }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, "horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { if kind != "ReplicationController" && kind != "Service" && kind != "Pod" { return fmt.Errorf("invalid resource provided: %v, only a replication controller, service or pod is accepted", kind) } return nil }, CanBeAutoscaled: func(kind string) error { // TODO: support autoscale for deployments if kind != "ReplicationController" { return fmt.Errorf("invalid resource provided: %v, only a replication controller is accepted", kind) } return nil }, } }
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should stop a job", func() { By("Creating a job") job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == parallelism") err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism) Expect(err).NotTo(HaveOccurred()) By("scale job down") reaper, err := kubectl.ReaperFor("Job", f.Client) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute _, err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring job was deleted") _, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name) Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) }) }) // newTestJob returns a job which does one of several testing behaviors. func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int) *extensions.Job { job := &extensions.Job{
func testDaemonSets(f *Framework) { ns := f.Namespace.Name c := f.Client simpleDSName := "simple-daemon-set" image := "gcr.io/google_containers/serve_hostname:1.1" label := map[string]string{daemonsetNameLabel: simpleDSName} retryTimeout := 1 * time.Minute retryInterval := 5 * time.Second Logf("Creating simple daemon set %s", simpleDSName) _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: simpleDSName, }, Spec: extensions.DaemonSetSpec{ Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: label, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: simpleDSName, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) defer func() { Logf("Check that reaper kills all daemon pods for %s", simpleDSName) dsReaper, err := kubectl.ReaperFor("DaemonSet", c) Expect(err).NotTo(HaveOccurred()) _, err = dsReaper.Stop(ns, simpleDSName, 0, nil) Expect(err).NotTo(HaveOccurred()) err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") }() By("Check that daemon pods launch on every node of the cluster.") Expect(err).NotTo(HaveOccurred()) err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") By("Stop a daemon pod, check that the daemon pod is revived.") podClient := c.Pods(ns) podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything()) Expect(err).NotTo(HaveOccurred()) Expect(len(podList.Items)).To(BeNumerically(">", 0)) pod := podList.Items[0] err = podClient.Delete(pod.Name, nil) Expect(err).NotTo(HaveOccurred()) err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") complexDSName := "complex-daemon-set" complexLabel := map[string]string{daemonsetNameLabel: complexDSName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} Logf("Creating daemon with a node selector %s", complexDSName) _, err = c.DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: complexDSName, }, Spec: extensions.DaemonSetSpec{ Selector: complexLabel, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: complexLabel, }, Spec: api.PodSpec{ NodeSelector: nodeSelector, Containers: []api.Container{ { Name: complexDSName, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) By("Initially, daemon pods should not be running on any nodes.") err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") By("Change label of node, check that daemon pod is launched.") nodeClient := c.Nodes() nodeList, err := nodeClient.List(labels.Everything(), fields.Everything()) Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) Expect(err).NotTo(HaveOccurred(), "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) Expect(len(daemonSetLabels)).To(Equal(1)) err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name})) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") By("remove the node selector and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) Expect(err).NotTo(HaveOccurred(), "error removing labels on node") Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))). NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") By("We should now be able to delete the daemon set.") Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred()) }