func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { expectedPods := []string{} // Iterate over the labels that identify the replication controllers that we // want to check. The rcLabels contains the value values for the k8s-app key // that identify the replication controllers that we want to check. Using a label // rather than an explicit name is preferred because the names will typically have // a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling // update e.g. to heapster-monitoring-v2. By using a label query we can check for the // situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { selector := labels.Set{"k8s-app": rcLabel}.AsSelector() options := api.ListOptions{LabelSelector: selector} deploymentList, err := c.Deployments(api.NamespaceSystem).List(options) if err != nil { return nil, err } rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(options) if err != nil { return nil, err } if (len(rcList.Items) + len(deploymentList.Items)) != 1 { return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d", rcLabel, len(rcList.Items)) } // Check all the replication controllers. for _, rc := range rcList.Items { selector := labels.Set(rc.Spec.Selector).AsSelector() options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue } expectedPods = append(expectedPods, string(pod.UID)) } } // Do the same for all deployments. for _, rc := range deploymentList.Items { selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue } expectedPods = append(expectedPods, string(pod.UID)) } } } return expectedPods, nil }
func validateConsoleDeployment(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) { ns, _, err := f.DefaultNamespace() if err != nil { return Failure, err } rc, err := c.Deployments(ns).Get("fabric8") if rc != nil { return Success, err } return Failure, err }
// checkDeploymentRevision checks if the input deployment's and its new RC's revision and images are as expected. func checkDeploymentRevision(c *client.Client, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *api.ReplicationController) { deployment, err := c.Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Check revision of the new RC of this deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRC.Annotations).NotTo(Equal(nil)) Expect(newRC.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision)) // Check revision of This deployment Expect(deployment.Annotations).NotTo(Equal(nil)) Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision)) if len(imageName) > 0 { // Check the image the new RC creates Expect(newRC.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName)) Expect(newRC.Spec.Template.Spec.Containers[0].Image).Should(Equal(image)) // Check the image the deployment creates Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName)) Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image)) } return deployment, newRC }
func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error { var c *unversioned.Client var err error if c, err = f.Client(); err != nil { return err } printer, _, err := kubectl.GetPrinter("json", "") if err != nil { return err } nodes, err := c.Nodes().List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil { return err } var namespaces []string if cmdutil.GetFlagBool(cmd, "all-namespaces") { namespaceList, err := c.Namespaces().List(api.ListOptions{}) if err != nil { return err } for ix := range namespaceList.Items { namespaces = append(namespaces, namespaceList.Items[ix].Name) } } else { namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces") if len(namespaces) == 0 { cmdNamespace, _, err := f.DefaultNamespace() if err != nil { return err } namespaces = []string{ api.NamespaceSystem, cmdNamespace, } } } for _, namespace := range namespaces { // TODO: this is repetitive in the extreme. Use reflection or // something to make this a for loop. events, err := c.Events(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil { return err } rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil { return err } svcs, err := c.Services(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil { return err } sets, err := c.DaemonSets(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil { return err } deps, err := c.Deployments(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil { return err } rps, err := c.ReplicaSets(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil { return err } pods, err := c.Pods(namespace).List(api.ListOptions{}) if err != nil { return err } if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil { return err } for ix := range pods.Items { pod := &pods.Items[ix] writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt")) writer.Write([]byte(fmt.Sprintf("==== START logs for %s/%s ====\n", pod.Namespace, pod.Name))) request, err := f.LogsForObject(pod, &api.PodLogOptions{}) if err != nil { return err } data, err := request.DoRaw() if err != nil { return err } writer.Write(data) writer.Write([]byte(fmt.Sprintf("==== END logs for %s/%s ====\n", pod.Namespace, pod.Name))) } } dir := cmdutil.GetFlagString(cmd, "output-directory") if len(dir) == 0 { dir = "." } if dir != "-" { fmt.Fprintf(out, "Cluster info dumped to %s", dir) } return nil }