func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, maxBandwidthBits int64) { // TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity. numServer := 1 It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { nodes := framework.GetReadySchedulableNodesOrDie(f.Client) totalPods := len(nodes.Items) // for a single service, we expect to divide bandwidth between the network. Very crude estimate. expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) Expect(totalPods).NotTo(Equal(0)) appName := "iperf-e2e" err, _ := f.CreateServiceForSimpleAppWithPods( 8001, 8002, appName, func(n api.Node) api.PodSpec { return api.PodSpec{ Containers: []api.Container{{ Name: "iperf-server", Image: "gcr.io/google_containers/iperf:e2e", Args: []string{ "/bin/sh", "-c", "/usr/local/bin/iperf -s -p 8001 ", }, Ports: []api.ContainerPort{{ContainerPort: 8001}}, }}, NodeName: n.Name, RestartPolicy: api.RestartPolicyOnFailure, } }, // this will be used to generate the -service name which all iperf clients point at. numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB true, // Make sure we wait, otherwise all the clients will die and need to restart. ) if err != nil { framework.Failf("Fatal error waiting for iperf server endpoint : %v", err) } iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp( "iperf-e2e-cli", func(n api.Node) api.PodSpec { return api.PodSpec{ Containers: []api.Container{ { Name: "iperf-client", Image: "gcr.io/google_containers/iperf:e2e", Args: []string{ "/bin/sh", "-c", "/usr/local/bin/iperf -c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5", }, }, }, RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die. } }, numClient, ) framework.Logf("Reading all perf results to stdout.") framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits") // Calculate expected number of clients based on total nodes. expectedCli := func() int { nodes := framework.GetReadySchedulableNodesOrDie(f.Client) return int(math.Min(float64(len(nodes.Items)), float64(numClient))) }() // Extra 1/10 second per client. iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second) iperfResults := &IPerfResults{} iperfClusterVerification := f.NewClusterVerification( framework.PodStateVerification{ Selectors: iperfClientPodLabels, ValidPhases: []api.PodPhase{api.PodSucceeded}, }, ) pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout) if err2 != nil { framework.Failf("Error in wait...") } else if len(pods) < expectedCli { framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout) } else { // For each builds up a collection of IPerfRecords iperfClusterVerification.ForEach( func(p api.Pod) { resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second) if err == nil { framework.Logf(resultS) iperfResults.Add(NewIPerf(resultS)) } else { framework.Failf("Unexpected error, %v when running forEach on the pods.", err) } }) } fmt.Println("[begin] Node,Bandwith CSV") fmt.Println(iperfResults.ToTSV()) fmt.Println("[end] Node,Bandwith CSV") for ipClient, bandwidth := range iperfResults.BandwidthMap { framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth)) } }) }