// StopRC stops the rc via kubectl's stop library func StopRC(rc *api.ReplicationController, restClient *client.Client) error { reaper, err := kubectl.ReaperFor("ReplicationController", restClient) if err != nil || reaper == nil { return err } _, err = reaper.Stop(rc.Namespace, rc.Name, 0, nil) if err != nil { return err } return nil }
// Stop scales a replication controller via its deployment configuration down to // zero replicas, waits for all of them to get deleted and then deletes both the // replication controller and its deployment configuration. func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) (string, error) { // If the config is already deleted, it may still have associated // deployments which didn't get cleaned up during prior calls to Stop. If // the config can't be found, still make an attempt to clean up the // deployments. // // It's important to delete the config first to avoid an undesirable side // effect which can cause the deployment to be re-triggered upon the // config's deletion. See https://github.com/openshift/origin/issues/2721 // for more details. err := reaper.oc.DeploymentConfigs(namespace).Delete(name) configNotFound := kerrors.IsNotFound(err) if err != nil && !configNotFound { return "", err } // Clean up deployments related to the config. rcList, err := reaper.kc.ReplicationControllers(namespace).List(util.ConfigSelector(name)) if err != nil { return "", err } rcReaper, err := kubectl.ReaperFor("ReplicationController", reaper.kc) if err != nil { return "", err } // If there is neither a config nor any deployments, we can return NotFound. deployments := rcList.Items if configNotFound && len(deployments) == 0 { return "", kerrors.NewNotFound("DeploymentConfig", name) } for _, rc := range deployments { if _, err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { // Better not error out here... glog.Infof("Cannot delete ReplicationController %s/%s: %v", rc.Namespace, rc.Name, err) } } return fmt.Sprintf("%s stopped", name), nil }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{latest.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "service/v1": kubectl.ServiceGenerator{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return client.RESTClient, nil }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } describer, ok := kubectl.DescriberFor(mapping.Kind, client) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) } return describer, nil }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: kind, err := meta.NewAccessor().Kind(object) if err != nil { return "", err } return "", fmt.Errorf("it is not possible to get a pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil default: kind, err := meta.NewAccessor().Kind(object) if err != nil { return nil, err } return nil, fmt.Errorf("it is not possible to get ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, kubectl.NewScalerClient(client)) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func() (validation.Schema, error) { if flags.Lookup("validate").Value.String() == "true" { client, err := clients.ClientForVersion("") if err != nil { return nil, err } return &clientSwaggerSchema{client, api.Scheme}, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, } }
// NewFactory creates a factory with the default Kubernetes resources defined func NewFactory() *Factory { mapper := kubectl.ShortcutExpander{latest.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) clientConfig := DefaultClientConfig(flags) clients := &clientCache{ clients: make(map[string]*client.Client), loader: clientConfig, } return &Factory{ clients: clients, flags: flags, Object: func(cmd *cobra.Command) (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() checkErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme }, Client: func(cmd *cobra.Command) (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func(cmd *cobra.Command) (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(cmd *cobra.Command, mapping *meta.RESTMapping) (resource.RESTClient, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return client.RESTClient, nil }, Describer: func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Describer, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } describer, ok := kubectl.DescriberFor(mapping.Kind, client) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) } return describer, nil }, Printer: func(cmd *cobra.Command, mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders), nil }, Resizer: func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Resizer, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ResizerFor(mapping.Kind, client) }, Reaper: func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(cmd *cobra.Command) (validation.Schema, error) { if GetFlagBool(cmd, "validate") { client, err := clients.ClientForVersion("") if err != nil { return nil, err } return &clientSwaggerSchema{client, api.Scheme}, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func(cmd *cobra.Command) (string, error) { return clientConfig.Namespace() }, } }
Ports: []api.ContainerPort{{ContainerPort: 8080}}, }, }, }, }, }, }) if err != nil { Fail(fmt.Sprintf("unable to create test rc: %v", err)) } // Clean up rc defer func() { defer GinkgoRecover() By("Cleaning up the replication controller") // Resize the replication controller to zero to get rid of pods. rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err != nil { Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err)) } if _, err = rcReaper.Stop(ns, rc.Name); err != nil { Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err)) } }() By("Waiting for connectivity to be verified") const maxAttempts = 60 passed := false var body []byte for i := 0; i < maxAttempts && !passed; i++ { time.Sleep(2 * time.Second) body, err = c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("status").Do().Raw()
// ReaperFor returns the appropriate Reaper client depending on the provided // kind of resource (Replication controllers, pods, services, and deploymentConfigs // supported) func ReaperFor(kind string, oc *client.Client, kc *kclient.Client) (kubectl.Reaper, error) { if kind != "DeploymentConfig" { return kubectl.ReaperFor(kind, kc) } return &DeploymentConfigReaper{oc: oc, kc: kc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout}, nil }
// A basic test to check the deployment of an image using // a replication controller. The image serves its hostname // which is checked for each replica. func ServeImageOrFail(c *client.Client, test string, image string) { ns := api.NamespaceDefault name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 // Create a replication controller for a service // that serves its hostname. // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{ "name": name, }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: name, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. By("Cleaning up the replication controller") rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err != nil { Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } if _, err = rcReaper.Stop(ns, controller.Name, 0, nil); err != nil { Logf("Failed to stop replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. listTimeout := time.Minute label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) pods, err := c.Pods(ns).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) t := time.Now() for { Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas) if len(pods.Items) == replicas { break } if time.Since(t) > listTimeout { Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds", name, replicas, len(pods.Items), time.Since(t).Seconds()) } time.Sleep(5 * time.Second) pods, err = c.Pods(ns).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) } By("Ensuring each pod is running") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } // Verify that something is listening. By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, true, pods}.checkAllResponses) if err != nil { Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{latest.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := &clientCache{ clients: make(map[string]*client.Client), loader: clientConfig, } return &Factory{ clients: clients, flags: flags, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return client.RESTClient, nil }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } describer, ok := kubectl.DescriberFor(mapping.Kind, client) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) } return describer, nil }, Printer: func(mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders), nil }, PodSelectorForResource: func(mapping *meta.RESTMapping, namespace, name string) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) client, err := clients.ClientForVersion("") if err != nil { return "", err } switch mapping.Kind { case "ReplicationController": rc, err := client.ReplicationControllers(namespace).Get(name) if err != nil { return "", err } return kubectl.MakeLabels(rc.Spec.Selector), nil case "Pod": rc, err := client.Pods(namespace).Get(name) if err != nil { return "", err } if len(rc.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(rc.Labels), nil case "Service": rc, err := client.ReplicationControllers(namespace).Get(name) if err != nil { return "", err } if rc.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(rc.Spec.Selector), nil default: return "", fmt.Errorf("it is not possible to get a pod selector from %s", mapping.Kind) } }, PortsForResource: func(mapping *meta.RESTMapping, namespace, name string) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) client, err := clients.ClientForVersion("") if err != nil { return nil, err } switch mapping.Kind { case "ReplicationController": rc, err := client.ReplicationControllers(namespace).Get(name) if err != nil { return nil, err } return getPorts(rc.Spec.Template.Spec), nil case "Pod": pod, err := client.Pods(namespace).Get(name) if err != nil { return nil, err } return getPorts(pod.Spec), nil default: return nil, fmt.Errorf("it is not possible to get ports from %s", mapping.Kind) } }, Resizer: func(mapping *meta.RESTMapping) (kubectl.Resizer, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ResizerFor(mapping.Kind, kubectl.NewResizerClient(client)) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func() (validation.Schema, error) { if flags.Lookup("validate").Value.String() == "true" { client, err := clients.ClientForVersion("") if err != nil { return nil, err } return &clientSwaggerSchema{client, api.Scheme}, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, error) { return clientConfig.Namespace() }, } }
// A basic test to check the deployment of an image using // a replication controller. The image serves its hostname // which is checked for each replica. func ServeImageOrFail(c *client.Client, test string, image string) { ns := api.NamespaceDefault name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 // Create a replication controller for a service // that serves its hostname. // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{ "name": name, }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: name, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. By("Cleaning up the replication controller") rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err != nil { Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } if _, err = rcReaper.Stop(ns, controller.Name); err != nil { Logf("Failed to stop replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. listTimeout := time.Minute label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) pods, err := c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) t := time.Now() for { Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas) if len(pods.Items) == replicas { break } if time.Since(t) > listTimeout { Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds", name, replicas, len(pods.Items), time.Since(t).Seconds()) } time.Sleep(5 * time.Second) pods, err = c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) } By("Ensuring each pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t = time.Now() for i, pod := range pods.Items { for { p, err := c.Pods(ns).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { Logf("Controller %s: Replica %d has hostIP: %s", name, i+1, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { Failf("Controller %s: Gave up waiting for hostIP of replica %d after %v seconds", name, i, time.Since(t).Seconds()) } Logf("Controller %s: Retrying to get the hostIP of replica %d", name, i+1) time.Sleep(5 * time.Second) } } // Re-fetch the pod information to update the host port information. pods, err = c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) // Verify that something is listening. By("Trying to dial each unique pod") for i, pod := range pods.Items { body, err := c.Get(). Prefix("proxy"). Resource("pods"). Name(string(pod.Name)). Do(). Raw() if err != nil { Failf("Controller %s: Failed to GET from replica %d: %v", name, i+1, err) } // The body should be the pod name. if string(body) != pod.Name { Failf("Controller %s: Replica %d expected response %s but got %s", name, i+1, pod.Name, string(body)) } Logf("Controller %s: Got expected result from replica %d: %s", name, i+1, string(body)) } }