Пример #1
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting node addresses")
	nodeList := framework.ListSchedulableNodesOrDie(config.f.Client)
	config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
}
Пример #2
0
func testReboot(c *client.Client, rebootCmd string) {
	// Get all nodes, and kick off the test on each.
	nodelist := framework.ListSchedulableNodesOrDie(c)
	result := make([]bool, len(nodelist.Items))
	wg := sync.WaitGroup{}
	wg.Add(len(nodelist.Items))

	failed := false
	for ix := range nodelist.Items {
		go func(ix int) {
			defer wg.Done()
			n := nodelist.Items[ix]
			result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
			if !result[ix] {
				failed = true
			}
		}(ix)
	}

	// Wait for all to finish and check the final result.
	wg.Wait()

	if failed {
		for ix := range nodelist.Items {
			n := nodelist.Items[ix]
			if !result[ix] {
				framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
			}
		}
		framework.Failf("Test failed; at least one node failed to reboot in the time given.")
	}
}
Пример #3
0
func pickNode(c *client.Client) (string, error) {
	// TODO: investigate why it doesn't work on master Node.
	nodes := framework.ListSchedulableNodesOrDie(c)
	if len(nodes.Items) == 0 {
		return "", fmt.Errorf("no nodes exist, can't test node proxy")
	}
	return nodes.Items[0].Name, nil
}
Пример #4
0
func clearDaemonSetNodeLabels(c *client.Client) error {
	nodeList := framework.ListSchedulableNodesOrDie(c)
	for _, node := range nodeList.Items {
		_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
		if err != nil {
			return err
		}
	}
	return nil
}
Пример #5
0
func checkNodesVersions(c *client.Client, want string) error {
	l := framework.ListSchedulableNodesOrDie(c)
	for _, n := range l.Items {
		// We do prefix trimming and then matching because:
		// want   looks like:  0.19.3-815-g50e67d4
		// kv/kvp look  like: v0.19.3-815-g50e67d4034e858-dirty
		kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
			strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
		if !strings.HasPrefix(kv, want) {
			return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
				n.ObjectMeta.Name, kv, want)
		}
		if !strings.HasPrefix(kpv, want) {
			return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
				n.ObjectMeta.Name, kpv, want)
		}
	}
	return nil
}
Пример #6
0
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
	nodes := framework.ListSchedulableNodesOrDie(config.f.Client)

	// create pods, one for each node
	createdPods := make([]*api.Pod, 0, len(nodes.Items))
	for i, n := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", podName, i)
		pod := config.createNetShellPodSpec(podName, n.Name)
		pod.ObjectMeta.Labels = selector
		createdPod := config.createPod(pod)
		createdPods = append(createdPods, createdPod)
	}

	// wait that all of them are up
	runningPods := make([]*api.Pod, 0, len(nodes.Items))
	for _, p := range createdPods {
		framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
		rp, err := config.getPodClient().Get(p.Name)
		framework.ExpectNoError(err)
		runningPods = append(runningPods, rp)
	}

	return runningPods
}
Пример #7
0
								Image: image,
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())

		By("Initially, daemon pods should not be running on any nodes.")
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

		By("Change label of node, check that daemon pod is launched.")
		nodeList := framework.ListSchedulableNodesOrDie(f.Client)
		Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
		newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
		Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
		daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
		Expect(len(daemonSetLabels)).To(Equal(1))
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")

		By("remove the node selector and wait for daemons to be unscheduled")
		_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
		Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
		Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
			NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")

		By("We should now be able to delete the daemon set.")
Пример #8
0
		framework.ExpectNoError(err)
	})

	It("should grab all metrics from API server.", func() {
		By("Connecting to /metrics endpoint")
		unknownMetrics := sets.NewString()
		response, err := grabber.GrabFromApiServer(unknownMetrics)
		framework.ExpectNoError(err)
		Expect(unknownMetrics).To(BeEmpty())

		checkMetrics(metrics.Metrics(response), metrics.KnownApiServerMetrics)
	})

	It("should grab all metrics from a Kubelet.", func() {
		By("Proxying to Node through the API server")
		nodes := framework.ListSchedulableNodesOrDie(c)
		Expect(nodes.Items).NotTo(BeEmpty())
		response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
		framework.ExpectNoError(err)
		checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics)
	})

	It("should grab all metrics from a Scheduler.", func() {
		By("Proxying to Pod through the API server")
		// Check if master Node is registered
		nodes, err := c.Nodes().List(api.ListOptions{})
		framework.ExpectNoError(err)

		var masterRegistered = false
		for _, node := range nodes.Items {
			if strings.HasSuffix(node.Name, "master") {
Пример #9
0
func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, maxBandwidthBits int64) {
	// TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity.
	numServer := 1

	It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
		nodes := framework.ListSchedulableNodesOrDie(f.Client)
		totalPods := len(nodes.Items)
		// for a single service, we expect to divide bandwidth between the network.  Very crude estimate.
		expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
		Expect(totalPods).NotTo(Equal(0))
		appName := "iperf-e2e"
		err, _ := f.CreateServiceForSimpleAppWithPods(
			8001,
			8002,
			appName,
			func(n api.Node) api.PodSpec {
				return api.PodSpec{
					Containers: []api.Container{{
						Name:  "iperf-server",
						Image: "gcr.io/google_containers/iperf:e2e",
						Args: []string{
							"/bin/sh",
							"-c",
							"/usr/local/bin/iperf -s -p 8001 ",
						},
						Ports: []api.ContainerPort{{ContainerPort: 8001}},
					}},
					NodeName:      n.Name,
					RestartPolicy: api.RestartPolicyOnFailure,
				}
			},
			// this will be used to generate the -service name which all iperf clients point at.
			numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB
			true,      // Make sure we wait, otherwise all the clients will die and need to restart.
		)

		if err != nil {
			framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
		}

		iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
			"iperf-e2e-cli",
			func(n api.Node) api.PodSpec {
				return api.PodSpec{
					Containers: []api.Container{
						{
							Name:  "iperf-client",
							Image: "gcr.io/google_containers/iperf:e2e",
							Args: []string{
								"/bin/sh",
								"-c",
								"/usr/local/bin/iperf -c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5",
							},
						},
					},
					RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die.
				}
			},
			numClient,
		)

		framework.Logf("Reading all perf results to stdout.")
		framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")

		// Calculate expected number of clients based on total nodes.
		expectedCli := func() int {
			nodes, err := framework.GetReadyNodes(f)
			framework.ExpectNoError(err)
			return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
		}()

		// Extra 1/10 second per client.
		iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
		iperfResults := &IPerfResults{}

		iperfClusterVerification := f.NewClusterVerification(
			framework.PodStateVerification{
				Selectors:   iperfClientPodLabels,
				ValidPhases: []api.PodPhase{api.PodSucceeded},
			},
		)

		pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
		if err2 != nil {
			framework.Failf("Error in wait...")
		} else if len(pods) < expectedCli {
			framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
		} else {
			// For each builds up a collection of IPerfRecords
			iperfClusterVerification.ForEach(
				func(p api.Pod) {
					resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
					if err == nil {
						framework.Logf(resultS)
						iperfResults.Add(NewIPerf(resultS))
					} else {
						framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
					}
				})
		}
		fmt.Println("[begin] Node,Bandwith CSV")
		fmt.Println(iperfResults.ToTSV())
		fmt.Println("[end] Node,Bandwith CSV")

		for ipClient, bandwidth := range iperfResults.BandwidthMap {
			framework.Logf("%v had bandwidth %v.  Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
		}
	})
}
Пример #10
0
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) {
	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 5 * time.Minute
	// ingestionTimeout is how long to keep retrying to wait for all the
	// logs to be ingested.
	const ingestionTimeout = 10 * time.Minute

	// Check for the existence of the Elasticsearch service.
	By("Checking the Elasticsearch service exists.")
	s := f.Client.Services(api.NamespaceSystem)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("elasticsearch-logging"); err == nil {
			break
		}
		framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Elasticsearch pods to enter the running state.
	By("Checking to make sure the Elasticsearch pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{k8sAppKey: esValue}))
	options := api.ListOptions{LabelSelector: label}
	pods, err := f.Client.Pods(api.NamespaceSystem).List(options)
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we are talking to an Elasticsearch service.")
	// Perform a few checks to make sure this looks like an Elasticsearch cluster.
	var statusCode float64
	var esResponse map[string]interface{}
	err = nil
	var body []byte
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
		proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
		if errProxy != nil {
			framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
			continue
		}
		// Query against the root URL for Elasticsearch.
		body, err = proxyRequest.Namespace(api.NamespaceSystem).
			Name("elasticsearch-logging").
			DoRaw()
		if err != nil {
			framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
			continue
		}
		esResponse, err = bodyToJSON(body)
		if err != nil {
			framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
			continue
		}
		statusIntf, ok := esResponse["status"]
		if !ok {
			framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
			continue
		}
		statusCode, ok = statusIntf.(float64)
		if !ok {
			// Assume this is a string returning Failure. Retry.
			framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
			continue
		}
		if int(statusCode) != 200 {
			framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
	if int(statusCode) != 200 {
		framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
	}
	// Check to see if have a cluster_name field.
	clusterName, ok := esResponse["cluster_name"]
	if !ok {
		framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
	}
	if clusterName != "kubernetes-logging" {
		framework.Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
	}

	// Now assume we really are talking to an Elasticsearch instance.
	// Check the cluster health.
	By("Checking health of Elasticsearch service.")
	healthy := false
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
		if errProxy != nil {
			framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
			continue
		}
		body, err = proxyRequest.Namespace(api.NamespaceSystem).
			Name("elasticsearch-logging").
			Suffix("_cluster/health").
			Param("level", "indices").
			DoRaw()
		if err != nil {
			continue
		}
		health, err := bodyToJSON(body)
		if err != nil {
			framework.Logf("Bad json response from elasticsearch: %v", err)
			continue
		}
		statusIntf, ok := health["status"]
		if !ok {
			framework.Logf("No status field found in cluster health response: %v", health)
			continue
		}
		status := statusIntf.(string)
		if status != "green" && status != "yellow" {
			framework.Logf("Cluster health has bad status: %v", health)
			continue
		}
		if err == nil && ok {
			healthy = true
			break
		}
	}
	if !healthy {
		framework.Failf("After %v elasticsearch cluster is not healthy", graceTime)
	}

	// Obtain a list of nodes so we can place one synthetic logger on each node.
	nodes := framework.ListSchedulableNodesOrDie(f.Client)
	nodeCount := len(nodes.Items)
	if nodeCount == 0 {
		framework.Failf("Failed to find any nodes")
	}
	framework.Logf("Found %d nodes.", len(nodes.Items))

	// Filter out unhealthy nodes.
	// Previous tests may have cause failures of some nodes. Let's skip
	// 'Not Ready' nodes, just in case (there is no need to fail the test).
	framework.FilterNodes(nodes, func(node api.Node) bool {
		return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
	})
	if len(nodes.Items) < 2 {
		framework.Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
	}
	framework.Logf("Found %d healthy nodes.", len(nodes.Items))

	// Wait for the Fluentd pods to enter the running state.
	By("Checking to make sure the Fluentd pod are running on each healthy node")
	label = labels.SelectorFromSet(labels.Set(map[string]string{k8sAppKey: fluentdValue}))
	options = api.ListOptions{LabelSelector: label}
	fluentdPods, err := f.Client.Pods(api.NamespaceSystem).List(options)
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range fluentdPods.Items {
		if nodeInNodeList(pod.Spec.NodeName, nodes) {
			err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
			Expect(err).NotTo(HaveOccurred())
		}
	}

	// Check if each healthy node has fluentd running on it
	for _, node := range nodes.Items {
		exists := false
		for _, pod := range fluentdPods.Items {
			if pod.Spec.NodeName == node.Name {
				exists = true
				break
			}
		}
		if !exists {
			framework.Failf("Node %v does not have fluentd pod running on it.", node.Name)
		}
	}

	// Create a unique root name for the resources in this test to permit
	// parallel executions of this test.
	// Use a unique namespace for the resources created in this test.
	ns := f.Namespace.Name
	name := "synthlogger"
	// Form a unique name to taint log lines to be collected.
	// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
	taintName := strings.Replace(ns+name, "-", "_", -1)
	framework.Logf("Tainting log lines with %v", taintName)
	// podNames records the names of the synthetic logging pods that are created in the
	// loop below.
	var podNames []string
	// countTo is the number of log lines emitted (and checked) for each synthetic logging pod.
	const countTo = 100
	// Instantiate a synthetic logger pod on each node.
	for i, node := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", name, i)
		_, err := f.Client.Pods(ns).Create(&api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "synth-logger",
						Image: "gcr.io/google_containers/ubuntu:14.04",
						// notice: the subshell syntax is escaped with `$$`
						Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$$(($i+1)); done", countTo, i, taintName, podName)},
					},
				},
				NodeName:      node.Name,
				RestartPolicy: api.RestartPolicyNever,
			},
		})
		Expect(err).NotTo(HaveOccurred())
		podNames = append(podNames, podName)
	}

	// Cleanup the pods when we are done.
	defer func() {
		for _, pod := range podNames {
			if err = f.Client.Pods(ns).Delete(pod, nil); err != nil {
				framework.Logf("Failed to delete pod %s: %v", pod, err)
			}
		}
	}()

	// Wait for the synthetic logging pods to finish.
	By("Waiting for the pods to succeed.")
	for _, pod := range podNames {
		err = framework.WaitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
		Expect(err).NotTo(HaveOccurred())
	}

	// Wait a bit for the log information to make it into Elasticsearch.
	time.Sleep(30 * time.Second)

	// Make several attempts to observe the logs ingested into Elasticsearch.
	By("Checking all the log lines were ingested into Elasticsearch")
	totalMissing := 0
	expected := nodeCount * countTo
	missingPerNode := []int{}
	for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(25 * time.Second) {

		// Debugging code to report the status of the elasticsearch logging endpoints.
		selector := labels.Set{k8sAppKey: esValue}.AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		esPods, err := f.Client.Pods(api.NamespaceSystem).List(options)
		if err != nil {
			framework.Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
			continue
		} else {
			for i, pod := range esPods.Items {
				framework.Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase,
					pod.Status.Conditions)
			}
		}

		proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
		if errProxy != nil {
			framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
			continue
		}
		// Ask Elasticsearch to return all the log lines that were tagged with the underscore
		// version of the name. Ask for twice as many log lines as we expect to check for
		// duplication bugs.
		body, err = proxyRequest.Namespace(api.NamespaceSystem).
			Name("elasticsearch-logging").
			Suffix("_search").
			Param("q", fmt.Sprintf("log:%s", taintName)).
			Param("size", strconv.Itoa(2*expected)).
			DoRaw()
		if err != nil {
			framework.Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err)
			continue
		}

		response, err := bodyToJSON(body)
		if err != nil {
			framework.Logf("After %v failed to unmarshal response: %v", time.Since(start), err)
			framework.Logf("Body: %s", string(body))
			continue
		}
		hits, ok := response["hits"].(map[string]interface{})
		if !ok {
			framework.Logf("response[hits] not of the expected type: %T", response["hits"])
			continue
		}
		totalF, ok := hits["total"].(float64)
		if !ok {
			framework.Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"])
			continue
		}
		total := int(totalF)
		if total != expected {
			framework.Logf("After %v expecting to find %d log lines but saw %d", time.Since(start), expected, total)
		}
		h, ok := hits["hits"].([]interface{})
		if !ok {
			framework.Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"])
			continue
		}
		// Initialize data-structure for observing counts.
		observed := make([][]int, nodeCount)
		for i := range observed {
			observed[i] = make([]int, countTo)
		}
		// Iterate over the hits and populate the observed array.
		for _, e := range h {
			l, ok := e.(map[string]interface{})
			if !ok {
				framework.Logf("element of hit not of expected type: %T", e)
				continue
			}
			source, ok := l["_source"].(map[string]interface{})
			if !ok {
				framework.Logf("_source not of the expected type: %T", l["_source"])
				continue
			}
			msg, ok := source["log"].(string)
			if !ok {
				framework.Logf("log not of the expected type: %T", source["log"])
				continue
			}
			words := strings.Split(msg, " ")
			if len(words) != 4 {
				framework.Logf("Malformed log line: %s", msg)
				continue
			}
			n, err := strconv.ParseUint(words[0], 10, 0)
			if err != nil {
				framework.Logf("Expecting numer of node as first field of %s", msg)
				continue
			}
			if n < 0 || int(n) >= nodeCount {
				framework.Logf("Node count index out of range: %d", nodeCount)
				continue
			}
			index, err := strconv.ParseUint(words[2], 10, 0)
			if err != nil {
				framework.Logf("Expecting number as third field of %s", msg)
				continue
			}
			if index < 0 || index >= countTo {
				framework.Logf("Index value out of range: %d", index)
				continue
			}
			if words[1] != taintName {
				framework.Logf("Elasticsearch query return unexpected log line: %s", msg)
				continue
			}
			// Record the observation of a log line from node n at the given index.
			observed[n][index]++
		}
		// Make sure we correctly observed the expected log lines from each node.
		totalMissing = 0
		missingPerNode = make([]int, nodeCount)
		incorrectCount := false
		for n := range observed {
			for i, c := range observed[n] {
				if c == 0 {
					totalMissing++
					missingPerNode[n]++
				}
				if c < 0 || c > 1 {
					framework.Logf("Got incorrect count for node %d index %d: %d", n, i, c)
					incorrectCount = true
				}
			}
		}
		if incorrectCount {
			framework.Logf("After %v es still return duplicated log lines", time.Since(start))
			continue
		}
		if totalMissing != 0 {
			framework.Logf("After %v still missing %d log lines", time.Since(start), totalMissing)
			continue
		}
		framework.Logf("After %s found all %d log lines", time.Since(start), expected)
		return
	}
	for n := range missingPerNode {
		if missingPerNode[n] > 0 {
			framework.Logf("Node %d %s is missing %d logs", n, nodes.Items[n].Name, missingPerNode[n])
			opts := &api.PodLogOptions{}
			body, err = f.Client.Pods(ns).GetLogs(podNames[n], opts).DoRaw()
			if err != nil {
				framework.Logf("Cannot get logs from pod %v", podNames[n])
				continue
			}
			framework.Logf("Pod %s has the following logs: %s", podNames[n], body)

			for _, pod := range fluentdPods.Items {
				if pod.Spec.NodeName == nodes.Items[n].Name {
					body, err = f.Client.Pods(api.NamespaceSystem).GetLogs(pod.Name, opts).DoRaw()
					if err != nil {
						framework.Logf("Cannot get logs from pod %v", pod.Name)
						break
					}
					framework.Logf("Fluentd Pod %s on node %s has the following logs: %s", pod.Name, nodes.Items[n].Name, body)
					break
				}
			}
		}
	}
	framework.Failf("Failed to find all %d log lines", expected)
}