Ejemplo n.º 1
0
func createRC(wg *sync.WaitGroup, config *testutils.RCConfig, creatingTime time.Duration) {
	defer GinkgoRecover()
	defer wg.Done()

	sleepUpTo(creatingTime)
	framework.ExpectNoError(framework.RunRC(*config), fmt.Sprintf("creating rc %s", config.Name))
}
Ejemplo n.º 2
0
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
	cfg := testutils.RCConfig{
		Client:       f.Client,
		Image:        framework.GetPauseImageName(f.Client),
		Name:         "svc-latency-rc",
		Namespace:    f.Namespace.Name,
		Replicas:     1,
		PollInterval: time.Second,
	}
	if err := framework.RunRC(cfg); err != nil {
		return nil, err
	}

	// Run a single watcher, to reduce the number of API calls we have to
	// make; this is to minimize the timing error. It's how kube-proxy
	// consumes the endpoints data, so it seems like the right thing to
	// test.
	endpointQueries := newQuerier()
	startEndpointWatcher(f, endpointQueries)
	defer close(endpointQueries.stop)

	// run one test and throw it away-- this is to make sure that the pod's
	// ready status has propagated.
	singleServiceLatency(f, cfg.Name, endpointQueries)

	// These channels are never closed, and each attempt sends on exactly
	// one of these channels, so the sum of the things sent over them will
	// be exactly total.
	errs := make(chan error, total)
	durations := make(chan time.Duration, total)

	blocker := make(chan struct{}, inParallel)
	for i := 0; i < total; i++ {
		go func() {
			defer GinkgoRecover()
			blocker <- struct{}{}
			defer func() { <-blocker }()
			if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
				errs <- err
			} else {
				durations <- d
			}
		}()
	}

	errCount := 0
	for i := 0; i < total; i++ {
		select {
		case e := <-errs:
			framework.Logf("Got error: %v", e)
			errCount += 1
		case d := <-durations:
			output = append(output, d)
		}
	}
	if errCount != 0 {
		return output, fmt.Errorf("got %v errors", errCount)
	}
	return output, nil
}
Ejemplo n.º 3
0
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
	expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(framework.RunRC(testutils.RCConfig{
		Client:         f.ClientSet,
		InternalClient: f.InternalClientset,
		Name:           rcName,
		Namespace:      f.Namespace.Name,
		Image:          framework.GetPauseImageName(f.ClientSet),
		Replicas:       totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(f.ClientSet, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(f.ClientSet, nodeNames.List())
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	// TODO(random-liu): Remove the original log when we migrate to new perfdash
	framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
	// Log perf result
	framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
	verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
	// Log perf result
	framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
	verifyCPULimits(expectedCPU, cpuSummary)

	By("Deleting the RC")
	framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
Ejemplo n.º 4
0
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
	By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": name,
			},
		},
	})
	framework.ExpectNoError(err)

	rcConfig := framework.RCConfig{
		Client:     c,
		Image:      resourceConsumerImage,
		Name:       name,
		Namespace:  ns,
		Timeout:    timeoutRC,
		Replicas:   replicas,
		CpuRequest: cpuLimitMillis,
		CpuLimit:   cpuLimitMillis,
		MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
		MemLimit:   memLimitMb * 1024 * 1024,
	}

	switch kind {
	case kindRC:
		framework.ExpectNoError(framework.RunRC(rcConfig))
		break
	case kindDeployment:
		dpConfig := framework.DeploymentConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunDeployment(dpConfig))
		break
	case kindReplicaSet:
		rsConfig := framework.ReplicaSetConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
		break
	default:
		framework.Failf(invalidKind)
	}

	// Make sure endpoints are propagated.
	// TODO(piosz): replace sleep with endpoints watch.
	time.Sleep(10 * time.Second)
}
func ReserveMemory(f *framework.Framework, id string, megabytes int) {
	By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
	config := &framework.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    10 * time.Minute,
		Image:      "gcr.io/google_containers/pause:2.0",
		Replicas:   megabytes / 500,
		MemRequest: 500 * 1024 * 1024,
	}
	framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveCpu(f *framework.Framework, id string, millicores int) {
	By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
	config := &framework.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    10 * time.Minute,
		Image:      "gcr.io/google_containers/pause:2.0",
		Replicas:   millicores / 100,
		CpuRequest: 100,
	}
	framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
	By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
	request := int64(millicores / replicas)
	config := &testutils.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    defaultTimeout,
		Image:      framework.GetPauseImageName(f.Client),
		Replicas:   replicas,
		CpuRequest: request,
	}
	framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
	By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
	request := int64(millicores / replicas)
	config := &framework.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    scaleTimeout,
		Image:      "gcr.io/google_containers/pause-amd64:3.0",
		Replicas:   replicas,
		CpuRequest: request,
	}
	framework.ExpectNoError(framework.RunRC(*config))
}
Ejemplo n.º 9
0
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
	defer GinkgoRecover()

	// Start all replication controllers.
	startTime := time.Now()
	wg := sync.WaitGroup{}
	wg.Add(len(dtc.Configs))
	for i := range dtc.Configs {
		rcConfig := dtc.Configs[i]
		go func() {
			defer GinkgoRecover()
			// Call wg.Done() in defer to avoid blocking whole test
			// in case of error from RunRC.
			defer wg.Done()
			framework.ExpectNoError(framework.RunRC(rcConfig))
		}()
	}
	logStopCh := make(chan struct{})
	go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
	wg.Wait()
	startupTime := time.Now().Sub(startTime)
	close(logStopCh)
	framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
	framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))

	// Print some data about Pod to Node allocation
	By("Printing Pod to Node allocation data")
	podList, err := dtc.ClientSet.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
	framework.ExpectNoError(err)
	pausePodAllocation := make(map[string]int)
	systemPodAllocation := make(map[string][]string)
	for _, pod := range podList.Items {
		if pod.Namespace == api.NamespaceSystem {
			systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
		} else {
			pausePodAllocation[pod.Spec.NodeName]++
		}
	}
	nodeNames := make([]string, 0)
	for k := range pausePodAllocation {
		nodeNames = append(nodeNames, k)
	}
	sort.Strings(nodeNames)
	for _, node := range nodeNames {
		framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
	}
	return startupTime
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves host port"))
	config := &testutils.RCConfig{
		Client:    f.Client,
		Name:      id,
		Namespace: f.Namespace.Name,
		Timeout:   defaultTimeout,
		Image:     framework.GetPauseImageName(f.Client),
		Replicas:  replicas,
		HostPorts: map[string]int{"port1": 4321},
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
	request := int64(1024 * 1024 * megabytes / replicas)
	config := &testutils.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    defaultTimeout,
		Image:      framework.GetPauseImageName(f.Client),
		Replicas:   replicas,
		MemRequest: request,
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves host port"))
	config := &framework.RCConfig{
		Client:    f.Client,
		Name:      id,
		Namespace: f.Namespace.Name,
		Timeout:   scaleTimeout,
		Image:     "gcr.io/google_containers/pause-amd64:3.0",
		Replicas:  replicas,
		HostPorts: map[string]int{"port1": 4321},
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}

}
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
	request := int64(1024 * 1024 * megabytes / replicas)
	config := &framework.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    scaleTimeout,
		Image:      "gcr.io/google_containers/pause-amd64:3.0",
		Replicas:   replicas,
		MemRequest: request,
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))

	config := &testutils.RCConfig{
		Client:       f.Client,
		Name:         "node-selector",
		Namespace:    f.Namespace.Name,
		Timeout:      defaultTimeout,
		Image:        framework.GetPauseImageName(f.Client),
		Replicas:     replicas,
		HostPorts:    map[string]int{"port1": 4321},
		NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
Ejemplo n.º 15
0
func proxyContext(version string) {
	options := framework.FrameworkOptions{
		ClientQPS: -1.0,
	}
	f := framework.NewFramework("proxy", options, nil)
	prefix := "/api/" + version

	// Port here has to be kept in sync with default kubelet port.
	It("should proxy logs on node with explicit kubelet port [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
	It("should proxy logs on node [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
	It("should proxy to cadvisor [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })

	It("should proxy logs on node with explicit kubelet port using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
	It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
	It("should proxy to cadvisor using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })

	// using the porter image to serve content, access the content
	// (of multiple pods?) from multiple (endpoints/services?)
	It("should proxy through a service and a pod [Conformance]", func() {
		start := time.Now()
		labels := map[string]string{"proxy-service-target": "true"}
		service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				GenerateName: "proxy-service-",
			},
			Spec: api.ServiceSpec{
				Selector: labels,
				Ports: []api.ServicePort{
					{
						Name:       "portname1",
						Port:       80,
						TargetPort: intstr.FromString("dest1"),
					},
					{
						Name:       "portname2",
						Port:       81,
						TargetPort: intstr.FromInt(162),
					},
					{
						Name:       "tlsportname1",
						Port:       443,
						TargetPort: intstr.FromString("tlsdest1"),
					},
					{
						Name:       "tlsportname2",
						Port:       444,
						TargetPort: intstr.FromInt(462),
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func(name string) {
			err := f.Client.Services(f.Namespace.Name).Delete(name)
			if err != nil {
				framework.Logf("Failed deleting service %v: %v", name, err)
			}
		}(service.Name)

		// Make an RC with a single pod. The 'porter' image is
		// a simple server which serves the values of the
		// environmental variables below.
		By("starting an echo server on multiple ports")
		pods := []*api.Pod{}
		cfg := framework.RCConfig{
			Client:       f.Client,
			Image:        "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
			Name:         service.Name,
			Namespace:    f.Namespace.Name,
			Replicas:     1,
			PollInterval: time.Second,
			Env: map[string]string{
				"SERVE_PORT_80":   `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_160":  "foo",
				"SERVE_PORT_162":  "bar",

				"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`,
				"SERVE_TLS_PORT_460": `tls baz`,
				"SERVE_TLS_PORT_462": `tls qux`,
			},
			Ports: map[string]int{
				"dest1": 160,
				"dest2": 162,

				"tlsdest1": 460,
				"tlsdest2": 462,
			},
			ReadinessProbe: &api.Probe{
				Handler: api.Handler{
					HTTPGet: &api.HTTPGetAction{
						Port: intstr.FromInt(80),
					},
				},
				InitialDelaySeconds: 1,
				TimeoutSeconds:      5,
				PeriodSeconds:       10,
			},
			Labels:      labels,
			CreatedPods: &pods,
		}
		Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
		defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name)

		Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

		// table constructors
		// Try proxying through the service and directly to through the pod.
		svcProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
		}
		subresourceServiceProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
		}
		podProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
		}
		subresourcePodProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
		}

		// construct the table
		expectations := map[string]string{
			svcProxyURL("", "portname1") + "/": "foo",
			svcProxyURL("", "80") + "/":        "foo",
			svcProxyURL("", "portname2") + "/": "bar",
			svcProxyURL("", "81") + "/":        "bar",

			svcProxyURL("http", "portname1") + "/": "foo",
			svcProxyURL("http", "80") + "/":        "foo",
			svcProxyURL("http", "portname2") + "/": "bar",
			svcProxyURL("http", "81") + "/":        "bar",

			svcProxyURL("https", "tlsportname1") + "/": "tls baz",
			svcProxyURL("https", "443") + "/":          "tls baz",
			svcProxyURL("https", "tlsportname2") + "/": "tls qux",
			svcProxyURL("https", "444") + "/":          "tls qux",

			subresourceServiceProxyURL("", "portname1") + "/":         "foo",
			subresourceServiceProxyURL("http", "portname1") + "/":     "foo",
			subresourceServiceProxyURL("", "portname2") + "/":         "bar",
			subresourceServiceProxyURL("http", "portname2") + "/":     "bar",
			subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
			subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",

			podProxyURL("", "1080") + "/": `<a href="` + podProxyURL("", "1080") + `/rewriteme">test</a>`,
			podProxyURL("", "160") + "/":  "foo",
			podProxyURL("", "162") + "/":  "bar",

			podProxyURL("http", "1080") + "/": `<a href="` + podProxyURL("http", "1080") + `/rewriteme">test</a>`,
			podProxyURL("http", "160") + "/":  "foo",
			podProxyURL("http", "162") + "/":  "bar",

			subresourcePodProxyURL("", "") + "/":         `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "1080") + "/":     `<a href="` + subresourcePodProxyURL("", "1080") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("http", "1080") + "/": `<a href="` + subresourcePodProxyURL("http", "1080") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "160") + "/":      "foo",
			subresourcePodProxyURL("http", "160") + "/":  "foo",
			subresourcePodProxyURL("", "162") + "/":      "bar",
			subresourcePodProxyURL("http", "162") + "/":  "bar",

			subresourcePodProxyURL("https", "443") + "/": `<a href="` + subresourcePodProxyURL("https", "443") + `/tlsrewriteme">test</a>`,
			subresourcePodProxyURL("https", "460") + "/": "tls baz",
			subresourcePodProxyURL("https", "462") + "/": "tls qux",

			// TODO: below entries don't work, but I believe we should make them work.
			// podPrefix + ":dest1": "foo",
			// podPrefix + ":dest2": "bar",
		}

		wg := sync.WaitGroup{}
		errs := []string{}
		errLock := sync.Mutex{}
		recordError := func(s string) {
			errLock.Lock()
			defer errLock.Unlock()
			errs = append(errs, s)
		}
		d := time.Since(start)
		framework.Logf("setup took %v, starting test cases", d)
		numberTestCases := len(expectations)
		totalAttempts := numberTestCases * proxyAttempts
		By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))

		for i := 0; i < proxyAttempts; i++ {
			wg.Add(numberTestCases)
			for path, val := range expectations {
				go func(i int, path, val string) {
					defer wg.Done()
					// this runs the test case
					body, status, d, err := doProxy(f, path, i)

					if err != nil {
						if serr, ok := err.(*errors.StatusError); ok {
							recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v",
								i, status, d, path, serr.Status()))
						} else {
							recordError(fmt.Sprintf("%v: path %v gave error: %v", i, path, err))
						}
						return
					}
					if status != http.StatusOK {
						recordError(fmt.Sprintf("%v: path %v gave status: %v", i, path, status))
					}
					if e, a := val, string(body); e != a {
						recordError(fmt.Sprintf("%v: path %v: wanted %v, got %v", i, path, e, a))
					}
					if d > proxyHTTPCallTimeout {
						recordError(fmt.Sprintf("%v: path %v took %v > %v", i, path, d, proxyHTTPCallTimeout))
					}
				}(i, path, val)
			}
			wg.Wait()
		}

		if len(errs) != 0 {
			body, err := f.Client.Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw()
			if err != nil {
				framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
			} else {
				framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
			}

			Fail(strings.Join(errs, "\n"))
		}
	})
}
Ejemplo n.º 16
0
	BeforeEach(func() {
		// These tests require SSH
		framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
		ns = f.Namespace.Name

		// All the restart tests need an rc and a watch on pods of the rc.
		// Additionally some of them might scale the rc during the test.
		config = framework.RCConfig{
			Client:      f.Client,
			Name:        rcName,
			Namespace:   ns,
			Image:       framework.GetPauseImageName(f.Client),
			Replicas:    numPods,
			CreatedPods: &[]*api.Pod{},
		}
		Expect(framework.RunRC(config)).NotTo(HaveOccurred())
		replacePods(*config.CreatedPods, existingPods)

		stopCh = make(chan struct{})
		tracker = newPodTracker()
		newPods, controller = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					options.LabelSelector = labelSelector
					return f.Client.Pods(ns).List(options)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					options.LabelSelector = labelSelector
					return f.Client.Pods(ns).Watch(options)
				},
			},
Ejemplo n.º 17
0
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
	By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
	_, err := c.Core().Services(ns).Create(&v1.Service{
		ObjectMeta: v1.ObjectMeta{
			Name: name,
		},
		Spec: v1.ServiceSpec{
			Ports: []v1.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": name,
			},
		},
	})
	framework.ExpectNoError(err)

	rcConfig := testutils.RCConfig{
		Client:         c,
		InternalClient: internalClient,
		Image:          resourceConsumerImage,
		Name:           name,
		Namespace:      ns,
		Timeout:        timeoutRC,
		Replicas:       replicas,
		CpuRequest:     cpuLimitMillis,
		CpuLimit:       cpuLimitMillis,
		MemRequest:     memLimitMb * 1024 * 1024, // MemLimit is in bytes
		MemLimit:       memLimitMb * 1024 * 1024,
	}

	switch kind {
	case kindRC:
		framework.ExpectNoError(framework.RunRC(rcConfig))
		break
	case kindDeployment:
		dpConfig := testutils.DeploymentConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunDeployment(dpConfig))
		break
	case kindReplicaSet:
		rsConfig := testutils.ReplicaSetConfig{
			RCConfig: rcConfig,
		}
		By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
		framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
		break
	default:
		framework.Failf(invalidKind)
	}

	By(fmt.Sprintf("Running controller"))
	controllerName := name + "-ctrl"
	_, err = c.Core().Services(ns).Create(&v1.Service{
		ObjectMeta: v1.ObjectMeta{
			Name: controllerName,
		},
		Spec: v1.ServiceSpec{
			Ports: []v1.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": controllerName,
			},
		},
	})
	framework.ExpectNoError(err)

	dnsClusterFirst := v1.DNSClusterFirst
	controllerRcConfig := testutils.RCConfig{
		Client:    c,
		Image:     resourceConsumerControllerImage,
		Name:      controllerName,
		Namespace: ns,
		Timeout:   timeoutRC,
		Replicas:  1,
		Command:   []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
		DNSPolicy: &dnsClusterFirst,
	}
	framework.ExpectNoError(framework.RunRC(controllerRcConfig))

	// Wait for endpoints to propagate for the controller service.
	framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
		c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
Ejemplo n.º 18
0
						uLock.Lock()
						defer uLock.Unlock()
						updateCount++
					},
				},
			)
			go updateController.Run(stop)

			// Start all replication controllers.
			startTime := time.Now()
			wg := sync.WaitGroup{}
			wg.Add(len(RCConfigs))
			for i := range RCConfigs {
				rcConfig := RCConfigs[i]
				go func() {
					framework.ExpectNoError(framework.RunRC(rcConfig))
					wg.Done()
				}()
			}
			logStopCh := make(chan struct{})
			go logPodStartupStatus(c, totalPods, ns, map[string]string{"type": "densityPod"}, itArg.interval, logStopCh)
			wg.Wait()
			e2eStartupTime = time.Now().Sub(startTime)
			close(logStopCh)
			framework.Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)
			framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(totalPods)/float32(e2eStartupTime/time.Second))

			By("Waiting for all events to be recorded")
			last := -1
			current := len(events)
			lastCount := -1
Ejemplo n.º 19
0
var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {

	f := framework.NewDefaultFramework("etcd-failure")

	BeforeEach(func() {
		// This test requires:
		// - SSH
		// - master access
		// ... so the provider check should be identical to the intersection of
		// providers that provide those capabilities.
		framework.SkipUnlessProviderIs("gce")

		Expect(framework.RunRC(framework.RCConfig{
			Client:    f.Client,
			Name:      "baz",
			Namespace: f.Namespace.Name,
			Image:     "gcr.io/google_containers/pause:2.0",
			Replicas:  1,
		})).NotTo(HaveOccurred())
	})

	It("should recover from network partition with master", func() {
		etcdFailTest(
			f,
			"sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP",
			"sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP",
		)
	})

	It("should recover from SIGKILL", func() {
		etcdFailTest(
Ejemplo n.º 20
0
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
	By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": name,
			},
		},
	})
	framework.ExpectNoError(err)

	rcConfig := testutils.RCConfig{
		Client:     c,
		Image:      resourceConsumerImage,
		Name:       name,
		Namespace:  ns,
		Timeout:    timeoutRC,
		Replicas:   replicas,
		CpuRequest: cpuLimitMillis,
		CpuLimit:   cpuLimitMillis,
		MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
		MemLimit:   memLimitMb * 1024 * 1024,
	}

	switch kind {
	case kindRC:
		framework.ExpectNoError(framework.RunRC(rcConfig))
		break
	case kindDeployment:
		dpConfig := testutils.DeploymentConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunDeployment(dpConfig))
		break
	case kindReplicaSet:
		rsConfig := testutils.ReplicaSetConfig{
			RCConfig: rcConfig,
		}
		By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
		framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
		break
	default:
		framework.Failf(invalidKind)
	}

	By(fmt.Sprintf("Running controller"))
	controllerName := name + "-ctrl"
	_, err = c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: controllerName,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": controllerName,
			},
		},
	})
	framework.ExpectNoError(err)

	dnsClusterFirst := api.DNSClusterFirst
	controllerRcConfig := testutils.RCConfig{
		Client:    c,
		Image:     resourceConsumerControllerImage,
		Name:      controllerName,
		Namespace: ns,
		Timeout:   timeoutRC,
		Replicas:  1,
		Command:   []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
		DNSPolicy: &dnsClusterFirst,
	}
	framework.ExpectNoError(framework.RunRC(controllerRcConfig))

	// Make sure endpoints are propagated.
	// TODO(piosz): replace sleep with endpoints watch.
	time.Sleep(10 * time.Second)
}
Ejemplo n.º 21
0
		}
		deleteTests := []DeleteTest{
			{podsPerNode: 10, timeout: 1 * time.Minute},
		}
		for _, itArg := range deleteTests {
			name := fmt.Sprintf(
				"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
			It(name, func() {
				totalPods := itArg.podsPerNode * numNodes
				By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
				rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID()))

				Expect(framework.RunRC(framework.RCConfig{
					Client:    f.Client,
					Name:      rcName,
					Namespace: f.Namespace.Name,
					Image:     framework.GetPauseImageName(f.Client),
					Replicas:  totalPods,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				resourceMonitor.LogLatest()

				By("Deleting the RC")
				framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
				// Check that the pods really are gone by querying /runningpods on the
				// node. The /runningpods handler checks the container runtime (or its
Ejemplo n.º 22
0
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
	defer GinkgoRecover()
	// Create a listener for events.
	// eLock is a lock protects the events
	var eLock sync.Mutex
	events := make([](*api.Event), 0)
	_, controller := controllerframework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dtc.Client.Events(dtc.Namespace).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dtc.Client.Events(dtc.Namespace).Watch(options)
			},
		},
		&api.Event{},
		0,
		controllerframework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				eLock.Lock()
				defer eLock.Unlock()
				events = append(events, obj.(*api.Event))
			},
		},
	)
	stop := make(chan struct{})
	go controller.Run(stop)

	// Create a listener for api updates
	// uLock is a lock protects the updateCount
	var uLock sync.Mutex
	updateCount := 0
	label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
	_, updateController := controllerframework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.LabelSelector = label
				return dtc.Client.Pods(dtc.Namespace).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.LabelSelector = label
				return dtc.Client.Pods(dtc.Namespace).Watch(options)
			},
		},
		&api.Pod{},
		0,
		controllerframework.ResourceEventHandlerFuncs{
			UpdateFunc: func(_, _ interface{}) {
				uLock.Lock()
				defer uLock.Unlock()
				updateCount++
			},
		},
	)
	go updateController.Run(stop)

	// Start all replication controllers.
	startTime := time.Now()
	wg := sync.WaitGroup{}
	wg.Add(len(dtc.Configs))
	for i := range dtc.Configs {
		rcConfig := dtc.Configs[i]
		go func() {
			framework.ExpectNoError(framework.RunRC(rcConfig))
			wg.Done()
		}()
	}
	logStopCh := make(chan struct{})
	go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
	wg.Wait()
	startupTime := time.Now().Sub(startTime)
	close(logStopCh)
	framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
	framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))

	By("Waiting for all events to be recorded")
	last := -1
	current := len(events)
	lastCount := -1
	currentCount := updateCount
	for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
		func() {
			eLock.Lock()
			defer eLock.Unlock()
			last = current
			current = len(events)
		}()
		func() {
			uLock.Lock()
			defer uLock.Unlock()
			lastCount = currentCount
			currentCount = updateCount
		}()
	}
	close(stop)

	if current != last {
		framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
	}
	framework.Logf("Found %d events", current)
	if currentCount != lastCount {
		framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
	}
	framework.Logf("Found %d updates", currentCount)

	// Tune the threshold for allowed failures.
	badEvents := framework.BadEvents(events)
	Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
	// Print some data about Pod to Node allocation
	By("Printing Pod to Node allocation data")
	podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
	framework.ExpectNoError(err)
	pausePodAllocation := make(map[string]int)
	systemPodAllocation := make(map[string][]string)
	for _, pod := range podList.Items {
		if pod.Namespace == api.NamespaceSystem {
			systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
		} else {
			pausePodAllocation[pod.Spec.NodeName]++
		}
	}
	nodeNames := make([]string, 0)
	for k := range pausePodAllocation {
		nodeNames = append(nodeNames, k)
	}
	sort.Strings(nodeNames)
	for _, node := range nodeNames {
		framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
	}
	return startupTime
}
Ejemplo n.º 23
0
		deleteTests := []DeleteTest{
			{podsPerNode: 10, timeout: 1 * time.Minute},
		}
		for _, itArg := range deleteTests {
			name := fmt.Sprintf(
				"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
			It(name, func() {
				totalPods := itArg.podsPerNode * numNodes
				By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
				rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))

				Expect(framework.RunRC(testutils.RCConfig{
					Client:         f.ClientSet,
					InternalClient: f.InternalClientset,
					Name:           rcName,
					Namespace:      f.Namespace.Name,
					Image:          framework.GetPauseImageName(f.ClientSet),
					Replicas:       totalPods,
					NodeSelector:   nodeLabels,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				if resourceMonitor != nil {
					resourceMonitor.LogLatest()
				}

				By("Deleting the RC")
Ejemplo n.º 24
0
		}
		deleteTests := []DeleteTest{
			{podsPerNode: 10, timeout: 1 * time.Minute},
		}
		for _, itArg := range deleteTests {
			name := fmt.Sprintf(
				"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
			It(name, func() {
				totalPods := itArg.podsPerNode * numNodes
				By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
				rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID()))

				Expect(framework.RunRC(framework.RCConfig{
					Client:    f.Client,
					Name:      rcName,
					Namespace: f.Namespace.Name,
					Image:     "gcr.io/google_containers/pause-amd64:3.0",
					Replicas:  totalPods,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				resourceMonitor.LogLatest()

				By("Deleting the RC")
				framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
				// Check that the pods really are gone by querying /runningpods on the
				// node. The /runningpods handler checks the container runtime (or its
Ejemplo n.º 25
0
var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {

	f := framework.NewDefaultFramework("etcd-failure")

	BeforeEach(func() {
		// This test requires:
		// - SSH
		// - master access
		// ... so the provider check should be identical to the intersection of
		// providers that provide those capabilities.
		framework.SkipUnlessProviderIs("gce")

		Expect(framework.RunRC(testutils.RCConfig{
			Client:    f.ClientSet,
			Name:      "baz",
			Namespace: f.Namespace.Name,
			Image:     framework.GetPauseImageName(f.ClientSet),
			Replicas:  1,
		})).NotTo(HaveOccurred())
	})

	It("should recover from network partition with master", func() {
		etcdFailTest(
			f,
			"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
			"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
		)
	})

	It("should recover from SIGKILL", func() {
		etcdFailTest(
Ejemplo n.º 26
0
func proxyContext(version string) {
	f := framework.NewDefaultFramework("proxy")
	prefix := "/api/" + version

	// Port here has to be kept in sync with default kubelet port.
	It("should proxy logs on node with explicit kubelet port [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
	It("should proxy logs on node [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
	It("should proxy to cadvisor [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })

	It("should proxy logs on node with explicit kubelet port using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
	It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
	It("should proxy to cadvisor using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })

	It("should proxy through a service and a pod [Conformance]", func() {
		labels := map[string]string{"proxy-service-target": "true"}
		service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				GenerateName: "proxy-service-",
			},
			Spec: api.ServiceSpec{
				Selector: labels,
				Ports: []api.ServicePort{
					{
						Name:       "portname1",
						Port:       80,
						TargetPort: intstr.FromString("dest1"),
					},
					{
						Name:       "portname2",
						Port:       81,
						TargetPort: intstr.FromInt(162),
					},
					{
						Name:       "tlsportname1",
						Port:       443,
						TargetPort: intstr.FromString("tlsdest1"),
					},
					{
						Name:       "tlsportname2",
						Port:       444,
						TargetPort: intstr.FromInt(462),
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func(name string) {
			err := f.Client.Services(f.Namespace.Name).Delete(name)
			if err != nil {
				framework.Logf("Failed deleting service %v: %v", name, err)
			}
		}(service.Name)

		// Make an RC with a single pod.
		pods := []*api.Pod{}
		cfg := framework.RCConfig{
			Client:       f.Client,
			Image:        "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
			Name:         service.Name,
			Namespace:    f.Namespace.Name,
			Replicas:     1,
			PollInterval: time.Second,
			Env: map[string]string{
				"SERVE_PORT_80":  `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_160": "foo",
				"SERVE_PORT_162": "bar",

				"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`,
				"SERVE_TLS_PORT_460": `tls baz`,
				"SERVE_TLS_PORT_462": `tls qux`,
			},
			Ports: map[string]int{
				"dest1": 160,
				"dest2": 162,

				"tlsdest1": 460,
				"tlsdest2": 462,
			},
			ReadinessProbe: &api.Probe{
				Handler: api.Handler{
					HTTPGet: &api.HTTPGetAction{
						Port: intstr.FromInt(80),
					},
				},
				InitialDelaySeconds: 1,
				TimeoutSeconds:      5,
				PeriodSeconds:       10,
			},
			Labels:      labels,
			CreatedPods: &pods,
		}
		Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
		defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name)

		Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

		// Try proxying through the service and directly to through the pod.
		svcProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
		}
		subresourceServiceProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
		}
		podProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
		}
		subresourcePodProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
		}
		expectations := map[string]string{
			svcProxyURL("", "portname1") + "/": "foo",
			svcProxyURL("", "80") + "/":        "foo",
			svcProxyURL("", "portname2") + "/": "bar",
			svcProxyURL("", "81") + "/":        "bar",

			svcProxyURL("http", "portname1") + "/": "foo",
			svcProxyURL("http", "80") + "/":        "foo",
			svcProxyURL("http", "portname2") + "/": "bar",
			svcProxyURL("http", "81") + "/":        "bar",

			svcProxyURL("https", "tlsportname1") + "/": "tls baz",
			svcProxyURL("https", "443") + "/":          "tls baz",
			svcProxyURL("https", "tlsportname2") + "/": "tls qux",
			svcProxyURL("https", "444") + "/":          "tls qux",

			subresourceServiceProxyURL("", "portname1") + "/":         "foo",
			subresourceServiceProxyURL("http", "portname1") + "/":     "foo",
			subresourceServiceProxyURL("", "portname2") + "/":         "bar",
			subresourceServiceProxyURL("http", "portname2") + "/":     "bar",
			subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
			subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",

			podProxyURL("", "80") + "/":  `<a href="` + podProxyURL("", "80") + `/rewriteme">test</a>`,
			podProxyURL("", "160") + "/": "foo",
			podProxyURL("", "162") + "/": "bar",

			podProxyURL("http", "80") + "/":  `<a href="` + podProxyURL("http", "80") + `/rewriteme">test</a>`,
			podProxyURL("http", "160") + "/": "foo",
			podProxyURL("http", "162") + "/": "bar",

			subresourcePodProxyURL("", "") + "/":        `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "80") + "/":      `<a href="` + subresourcePodProxyURL("", "80") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("http", "80") + "/":  `<a href="` + subresourcePodProxyURL("http", "80") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "160") + "/":     "foo",
			subresourcePodProxyURL("http", "160") + "/": "foo",
			subresourcePodProxyURL("", "162") + "/":     "bar",
			subresourcePodProxyURL("http", "162") + "/": "bar",

			subresourcePodProxyURL("https", "443") + "/": `<a href="` + subresourcePodProxyURL("https", "443") + `/tlsrewriteme">test</a>`,
			subresourcePodProxyURL("https", "460") + "/": "tls baz",
			subresourcePodProxyURL("https", "462") + "/": "tls qux",

			// TODO: below entries don't work, but I believe we should make them work.
			// podPrefix + ":dest1": "foo",
			// podPrefix + ":dest2": "bar",
		}

		wg := sync.WaitGroup{}
		errors := []string{}
		errLock := sync.Mutex{}
		recordError := func(s string) {
			errLock.Lock()
			defer errLock.Unlock()
			errors = append(errors, s)
		}
		for i := 0; i < proxyAttempts; i++ {
			for path, val := range expectations {
				wg.Add(1)
				go func(i int, path, val string) {
					defer wg.Done()
					body, status, d, err := doProxy(f, path)
					if err != nil {
						recordError(fmt.Sprintf("%v: path %v gave error: %v", i, path, err))
						return
					}
					if status != http.StatusOK {
						recordError(fmt.Sprintf("%v: path %v gave status: %v", i, path, status))
					}
					if e, a := val, string(body); e != a {
						recordError(fmt.Sprintf("%v: path %v: wanted %v, got %v", i, path, e, a))
					}
					if d > proxyHTTPCallTimeout {
						recordError(fmt.Sprintf("%v: path %v took %v > %v", i, path, d, proxyHTTPCallTimeout))
					}
				}(i, path, val)
				// default QPS is 5
				time.Sleep(200 * time.Millisecond)
			}
		}
		wg.Wait()

		if len(errors) != 0 {
			Fail(strings.Join(errors, "\n"))
		}
	})
}