Пример #1
0
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once.  The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {

	nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
	totalPods := len(nodes.Items)
	Expect(totalPods).NotTo(Equal(0))

	kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

	appName := "logging-soak" + podname
	podlables := f.CreatePodsPerNodeForSimpleApp(
		appName,
		func(n v1.Node) v1.PodSpec {
			return v1.PodSpec{
				Containers: []v1.Container{{
					Name:  "logging-soak",
					Image: "gcr.io/google_containers/busybox:1.24",
					Args: []string{
						"/bin/sh",
						"-c",
						fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
					},
				}},
				NodeName:      n.Name,
				RestartPolicy: v1.RestartPolicyAlways,
			}
		},
		totalPods,
	)

	logSoakVerification := f.NewClusterVerification(
		f.Namespace,
		framework.PodStateVerification{
			Selectors:   podlables,
			ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
			// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
			// instead, we just validate that some logs are being created in std out.
			Verify: func(p v1.Pod) (bool, error) {
				s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
				return s != "", err
			},
		},
	)

	largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
	pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)

	if err != nil {
		framework.Failf("Error in wait... %v", err)
	} else if len(pods) < totalPods {
		framework.Failf("Only got %v out of %v", len(pods), totalPods)
	}
}
Пример #2
0
func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, maxBandwidthBits int64) {
	// TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity.
	numServer := 1

	It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
		nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
		totalPods := len(nodes.Items)
		// for a single service, we expect to divide bandwidth between the network.  Very crude estimate.
		expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
		Expect(totalPods).NotTo(Equal(0))
		appName := "iperf-e2e"
		err, _ := f.CreateServiceForSimpleAppWithPods(
			8001,
			8002,
			appName,
			func(n api.Node) api.PodSpec {
				return api.PodSpec{
					Containers: []api.Container{{
						Name:  "iperf-server",
						Image: "gcr.io/google_containers/iperf:e2e",
						Args: []string{
							"/bin/sh",
							"-c",
							"/usr/local/bin/iperf -s -p 8001 ",
						},
						Ports: []api.ContainerPort{{ContainerPort: 8001}},
					}},
					NodeName:      n.Name,
					RestartPolicy: api.RestartPolicyOnFailure,
				}
			},
			// this will be used to generate the -service name which all iperf clients point at.
			numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB
			true,      // Make sure we wait, otherwise all the clients will die and need to restart.
		)

		if err != nil {
			framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
		}

		iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
			"iperf-e2e-cli",
			func(n api.Node) api.PodSpec {
				return api.PodSpec{
					Containers: []api.Container{
						{
							Name:  "iperf-client",
							Image: "gcr.io/google_containers/iperf:e2e",
							Args: []string{
								"/bin/sh",
								"-c",
								"/usr/local/bin/iperf -c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5",
							},
						},
					},
					RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die.
				}
			},
			numClient,
		)

		framework.Logf("Reading all perf results to stdout.")
		framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")

		// Calculate expected number of clients based on total nodes.
		expectedCli := func() int {
			nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
			return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
		}()

		// Extra 1/10 second per client.
		iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
		iperfResults := &IPerfResults{}

		iperfClusterVerification := f.NewClusterVerification(
			framework.PodStateVerification{
				Selectors:   iperfClientPodLabels,
				ValidPhases: []api.PodPhase{api.PodSucceeded},
			},
		)

		pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
		if err2 != nil {
			framework.Failf("Error in wait...")
		} else if len(pods) < expectedCli {
			framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
		} else {
			// For each builds up a collection of IPerfRecords
			iperfClusterVerification.ForEach(
				func(p api.Pod) {
					resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
					if err == nil {
						framework.Logf(resultS)
						iperfResults.Add(NewIPerf(resultS))
					} else {
						framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
					}
				})
		}
		fmt.Println("[begin] Node,Bandwith CSV")
		fmt.Println(iperfResults.ToTSV())
		fmt.Println("[end] Node,Bandwith CSV")

		for ipClient, bandwidth := range iperfResults.BandwidthMap {
			framework.Logf("%v had bandwidth %v.  Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
		}
	})
}