// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber; // or else, the function will try to get kubelet metrics directly from the node. func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) { if c == nil { return metrics.GrabKubeletMetricsWithoutProxy(nodeName) } grabber, err := metrics.NewMetricsGrabber(c, true, false, false, false) if err != nil { return metrics.KubeletMetrics{}, err } return grabber.GrabFromKubelet(nodeName) }
func gatherMetrics(f *framework.Framework) { By("Gathering metrics") var summary framework.TestDataSummary grabber, err := metrics.NewMetricsGrabber(f.Client, false, false, true, false) if err != nil { framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab() if err != nil { framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summary = (*framework.MetricsForE2E)(&received) framework.Logf(summary.PrintHumanReadable()) } } }
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = framework.KubeDescribe("MetricsGrabber", func() { f := framework.NewDefaultFramework("metrics-grabber") var c clientset.Interface var grabber *metrics.MetricsGrabber BeforeEach(func() { var err error c = f.ClientSet framework.ExpectNoError(err) grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true) framework.ExpectNoError(err) }) It("should grab all metrics from API server.", func() { By("Connecting to /metrics endpoint") response, err := grabber.GrabFromApiServer() framework.ExpectNoError(err) Expect(response).NotTo(BeEmpty()) }) It("should grab all metrics from a Kubelet.", func() { By("Proxying to Node through the API server") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) Expect(nodes.Items).NotTo(BeEmpty()) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
// afterEach deletes the namespace, after reading its events. func (f *Framework) afterEach() { RemoveCleanupAction(f.cleanupHandle) // DeleteNamespace at the very end in defer, to avoid any // expectation failures preventing deleting the namespace. defer func() { if testContext.DeleteNamespace { for _, ns := range f.namespacesToDelete { By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } if err := deleteNS(f.Client, ns.Name, timeout); err != nil { if !apierrs.IsNotFound(err) { Failf("Couldn't delete ns %q: %s", ns.Name, err) } else { Logf("Namespace %v was already deleted", ns.Name) } } } f.namespacesToDelete = nil } else { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } // Paranoia-- prevent reuse! f.Namespace = nil f.Client = nil }() // Print events if the test failed. if CurrentGinkgoTestDescription().Failed { dumpAllNamespaceInfo(f.Client, f.Namespace.Name) } summaries := make([]TestDataSummary, 0) if testContext.GatherKubeSystemResourceUsageData && f.gatherer != nil { By("Collecting resource usage data") summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99}, f.addonResourceConstraints)) } if testContext.GatherLogsSizes { By("Gathering log sizes data") close(f.logsSizeCloseChannel) f.logsSizeWaitGroup.Wait() summaries = append(summaries, f.logsSizeVerifier.GetSummary()) } if testContext.GatherMetricsAfterTest { By("Gathering metrics") // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) if err != nil { Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab(nil) if err != nil { Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summaries = append(summaries, (*MetricsForE2E)(&received)) } } } outputTypes := strings.Split(testContext.OutputPrintType, ",") for _, printType := range outputTypes { switch printType { case "hr": for i := range summaries { Logf(summaries[i].PrintHumanReadable()) } case "json": for i := range summaries { typeName := reflect.TypeOf(summaries[i]).String() Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:len(typeName)], summaries[i].PrintJSON()) Logf("Finished") } default: Logf("Unknown output type: %v. Skipping.", printType) } } // Check whether all nodes are ready after the test. // This is explicitly done at the very end of the test, to avoid // e.g. not removing namespace in case of this failure. if err := allNodesReady(f.Client, time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } }
// AfterEach deletes the namespace, after reading its events. func (f *Framework) AfterEach() { RemoveCleanupAction(f.cleanupHandle) // DeleteNamespace at the very end in defer, to avoid any // expectation failures preventing deleting the namespace. defer func() { if TestContext.DeleteNamespace { for _, ns := range f.namespacesToDelete { By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } if err := deleteNS(f.Client, ns.Name, timeout); err != nil { if !apierrs.IsNotFound(err) { Failf("Couldn't delete ns %q: %s", ns.Name, err) } else { Logf("Namespace %v was already deleted", ns.Name) } } } f.namespacesToDelete = nil } else { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } // Paranoia-- prevent reuse! f.Namespace = nil f.Client = nil }() if f.federated { defer func() { if f.FederationClient == nil { Logf("Warning: framework is marked federated, but has no federation client") return } if f.FederationClientset == nil { Logf("Warning: framework is marked federated, but has no federation clientset") return } if err := f.FederationClient.Clusters().DeleteCollection(nil, api.ListOptions{}); err != nil { Logf("Error: failed to delete Clusters: %+v", err) } }() } // Print events if the test failed. if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { DumpAllNamespaceInfo(f.Client, f.Namespace.Name) By(fmt.Sprintf("Dumping a list of prepulled images on each node")) LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller") if f.federated { // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"}) // Print logs of kube-dns pod LogPodsWithLabels(f.Client, "kube-system", map[string]string{"k8s-app": "kube-dns"}) } } summaries := make([]TestDataSummary, 0) if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil { By("Collecting resource usage data") summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)) } if TestContext.GatherLogsSizes { By("Gathering log sizes data") close(f.logsSizeCloseChannel) f.logsSizeWaitGroup.Wait() summaries = append(summaries, f.logsSizeVerifier.GetSummary()) } if TestContext.GatherMetricsAfterTest { By("Gathering metrics") // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) if err != nil { Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab() if err != nil { Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summaries = append(summaries, (*MetricsForE2E)(&received)) } } } outputTypes := strings.Split(TestContext.OutputPrintType, ",") for _, printType := range outputTypes { switch printType { case "hr": for i := range summaries { Logf(summaries[i].PrintHumanReadable()) } case "json": for i := range summaries { typeName := reflect.TypeOf(summaries[i]).String() Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:], summaries[i].PrintJSON()) Logf("Finished") } default: Logf("Unknown output type: %v. Skipping.", printType) } } // Check whether all nodes are ready after the test. // This is explicitly done at the very end of the test, to avoid // e.g. not removing namespace in case of this failure. if err := AllNodesReady(f.Client, time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } }
// afterEach deletes the namespace, after reading its events. func (f *Framework) afterEach() { // Print events if the test failed. if CurrentGinkgoTestDescription().Failed { By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name)) events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } // Note that we don't wait for any cleanup to propagate, which means // that if you delete a bunch of pods right before ending your test, // you may or may not see the killing/deletion/cleanup events. dumpAllPodInfo(f.Client) dumpAllNodeInfo(f.Client) } // Check whether all nodes are ready after the test. if err := allNodesReady(f.Client, time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } summaries := make([]TestDataSummary, 0) if testContext.GatherKubeSystemResourceUsageData { summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99}, f.addonResourceConstraints)) } if testContext.GatherLogsSizes { close(f.logsSizeCloseChannel) f.logsSizeWaitGroup.Wait() summaries = append(summaries, f.logsSizeVerifier.GetSummary()) } if testContext.GatherMetricsAfterTest { // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) if err != nil { Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab(nil) if err != nil { Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summaries = append(summaries, (*MetricsForE2E)(&received)) } } } if testContext.DeleteNamespace { By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } if err := deleteNS(f.Client, f.Namespace.Name, timeout); err != nil { Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err) } } else { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } outputTypes := strings.Split(testContext.OutputPrintType, ",") for _, printType := range outputTypes { switch printType { case "hr": for i := range summaries { Logf(summaries[i].PrintHumanReadable()) } case "json": for i := range summaries { typeName := reflect.TypeOf(summaries[i]).String() Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:len(typeName)], summaries[i].PrintJSON()) Logf("Finished") } default: Logf("Unknown ouptut type: %v. Skipping.", printType) } } // Paranoia-- prevent reuse! f.Namespace = nil f.Client = nil }
// AfterEach deletes the namespace, after reading its events. func (f *Framework) AfterEach() { RemoveCleanupAction(f.cleanupHandle) // DeleteNamespace at the very end in defer, to avoid any // expectation failures preventing deleting the namespace. defer func() { nsDeletionErrors := map[string]error{} // Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result // if delete-namespace set to false, namespace will always be preserved. // if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed. if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !CurrentGinkgoTestDescription().Failed) { for _, ns := range f.namespacesToDelete { By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil { if !apierrs.IsNotFound(err) { nsDeletionErrors[ns.Name] = err } else { Logf("Namespace %v was already deleted", ns.Name) } } } // Delete the federation namespace. f.deleteFederationNs() } else { if TestContext.DeleteNamespace { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } else if TestContext.DeleteNamespaceOnFailure { Logf("Found DeleteNamespaceOnFailure=false, skipping namespace deletion!") } } // Paranoia-- prevent reuse! f.Namespace = nil f.FederationNamespace = nil f.ClientSet = nil f.namespacesToDelete = nil // if we had errors deleting, report them now. if len(nsDeletionErrors) != 0 { messages := []string{} for namespaceKey, namespaceErr := range nsDeletionErrors { messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr)) } Failf(strings.Join(messages, ",")) } }() if f.federated { defer func() { if f.FederationClientset_1_5 == nil { Logf("Warning: framework is marked federated, but has no federation 1.4 clientset") return } if err := f.FederationClientset_1_5.Federation().Clusters().DeleteCollection(nil, v1.ListOptions{}); err != nil { Logf("Error: failed to delete Clusters: %+v", err) } }() } // Print events if the test failed. if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) By(fmt.Sprintf("Dumping a list of prepulled images on each node")) LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) if f.federated { // Dump federation events in federation namespace. DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { return f.FederationClientset_1_5.Core().Events(ns).List(opts) }, f.FederationNamespace.Name) // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) LogPodsWithLabels(f.ClientSet, "federation", map[string]string{"app": "federated-cluster"}, Logf) // Print logs of kube-dns pod LogPodsWithLabels(f.ClientSet, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf) } } summaries := make([]TestDataSummary, 0) if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil { By("Collecting resource usage data") summary, resourceViolationError := f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints) defer ExpectNoError(resourceViolationError) summaries = append(summaries, summary) } if TestContext.GatherLogsSizes { By("Gathering log sizes data") close(f.logsSizeCloseChannel) f.logsSizeWaitGroup.Wait() summaries = append(summaries, f.logsSizeVerifier.GetSummary()) } if TestContext.GatherMetricsAfterTest { By("Gathering metrics") // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. grabber, err := metrics.NewMetricsGrabber(f.ClientSet, true, false, false, true) if err != nil { Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { received, err := grabber.Grab() if err != nil { Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summaries = append(summaries, (*MetricsForE2E)(&received)) } } } outputTypes := strings.Split(TestContext.OutputPrintType, ",") for _, printType := range outputTypes { switch printType { case "hr": for i := range summaries { Logf(summaries[i].PrintHumanReadable()) } case "json": for i := range summaries { typeName := reflect.TypeOf(summaries[i]).String() Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:], summaries[i].PrintJSON()) Logf("Finished") } default: Logf("Unknown output type: %v. Skipping.", printType) } } // Check whether all nodes are ready after the test. // This is explicitly done at the very end of the test, to avoid // e.g. not removing namespace in case of this failure. if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } }