コード例 #1
0
// setup includes setupCore and also sets up services
func (config *NetworkingTestConfig) setup(selector map[string]string) {
	config.setupCore(selector)

	By("Getting node addresses")
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
	config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.ExternalAddrs) < 2 {
		// fall back to legacy IPs
		config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.Nodes = nodeList.Items

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(selector)

	for _, p := range config.NodePortService.Spec.Ports {
		switch p.Protocol {
		case api.ProtocolUDP:
			config.NodeUdpPort = int(p.NodePort)
		case api.ProtocolTCP:
			config.NodeHttpPort = int(p.NodePort)
		default:
			continue
		}
	}
	config.ClusterIP = config.NodePortService.Spec.ClusterIP
	config.NodeIP = config.ExternalAddrs[0]
}
コード例 #2
0
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)

	// To make this test work reasonably fast in large clusters,
	// we limit the number of NetProxyPods to no more than 100 ones
	// on random nodes.
	nodes := shuffleNodes(nodeList.Items)
	if len(nodes) > maxNetProxyPodsCount {
		nodes = nodes[:maxNetProxyPodsCount]
	}

	// create pods, one for each node
	createdPods := make([]*api.Pod, 0, len(nodes))
	for i, n := range nodes {
		podName := fmt.Sprintf("%s-%d", podName, i)
		pod := config.createNetShellPodSpec(podName, n.Name)
		pod.ObjectMeta.Labels = selector
		createdPod := config.createPod(pod)
		createdPods = append(createdPods, createdPod)
	}

	// wait that all of them are up
	runningPods := make([]*api.Pod, 0, len(nodes))
	for _, p := range createdPods {
		framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
		rp, err := config.getPodClient().Get(p.Name)
		framework.ExpectNoError(err)
		runningPods = append(runningPods, rp)
	}

	return runningPods
}
コード例 #3
0
ファイル: kubeproxy.go プロジェクト: FlyWings/kubernetes
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)

	// create pods, one for each node
	createdPods := make([]*api.Pod, 0, len(nodes.Items))
	for i, n := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", podName, i)
		pod := config.createNetShellPodSpec(podName, n.Name)
		pod.ObjectMeta.Labels = selector
		createdPod := config.createPod(pod)
		createdPods = append(createdPods, createdPod)
	}

	// wait that all of them are up
	runningPods := make([]*api.Pod, 0, len(nodes.Items))
	for _, p := range createdPods {
		framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
		rp, err := config.getPodClient().Get(p.Name)
		framework.ExpectNoError(err)
		runningPods = append(runningPods, rp)
	}

	return runningPods
}
コード例 #4
0
ファイル: kubeproxy.go プロジェクト: FlyWings/kubernetes
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting node addresses")
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
	config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
}
コード例 #5
0
func (config *NetworkingTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(uuid.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting node addresses")
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
	config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
	for _, p := range config.nodePortService.Spec.Ports {
		switch p.Protocol {
		case api.ProtocolUDP:
			config.nodeUdpPort = int(p.NodePort)
		case api.ProtocolTCP:
			config.nodeHttpPort = int(p.NodePort)
		default:
			continue
		}
	}

	epCount := len(config.endpointPods)
	config.maxTries = epCount*epCount + testTries
	config.clusterIP = config.nodePortService.Spec.ClusterIP
	config.nodeIP = config.externalAddrs[0]
}
コード例 #6
0
ファイル: density.go プロジェクト: CodeJuan/kubernetes
		framework.ExpectNoError(framework.VerifySchedulerLatency(c))
	})

	// Explicitly put here, to delete namespace at the end of the test
	// (after measuring latency metrics, etc.).
	f := framework.NewDefaultFramework("density")
	f.NamespaceDeletionTimeout = time.Hour

	BeforeEach(func() {
		c = f.Client
		ns = f.Namespace.Name

		// In large clusters we may get to this point but still have a bunch
		// of nodes without Routes created. Since this would make a node
		// unschedulable, we need to wait until all of them are schedulable.
		framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
		masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
		nodeCount = len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())
		if nodeCount == 30 {
			f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
		}

		nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
		nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()

		// Terminating a namespace (deleting the remaining objects from it - which
		// generally means events) can affect the current run. Thus we wait for all
		// terminating namespace to be finally deleted before starting this test.
		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)
コード例 #7
0
	// lingering resources are left over from a previous test run.
	if framework.TestContext.CleanStart {
		deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault})
		if err != nil {
			framework.Failf("Error deleting orphaned namespaces: %v", err)
		}
		glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
		if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
			framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
		}
	}

	// In large clusters we may get to this point but still have a bunch
	// of nodes without Routes created. Since this would make a node
	// unschedulable, we need to wait until all of them are schedulable.
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.NodeSchedulableTimeout))

	// Ensure all pods are running and ready before starting tests (otherwise,
	// cluster infrastructure pods that are being pulled or started can block
	// test pods from running, and tests that ensure all pods are running and
	// ready will fail).
	podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
	if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
		framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
		framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf)
		framework.RunKubernetesServiceTestContainer(c, v1.NamespaceDefault)
		framework.Failf("Error waiting for all pods to be running and ready: %v", err)
	}

	if err := framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
		// There is no guarantee that the image pulling will succeed in 3 minutes
コード例 #8
0
ファイル: networking.go プロジェクト: juanluisvaladas/origin
		})
		if err != nil {
			framework.Failf("unable to create test service named [%s] %v", svc.Name, err)
		}

		// Clean up service
		defer func() {
			By("Cleaning up the service")
			if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil {
				framework.Failf("unable to delete svc %v: %v", svc.Name, err)
			}
		}()

		By("Creating a webserver (pending) pod on each node")

		framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.Client))
		nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
		// This test is super expensive in terms of network usage - large services
		// result in huge "Endpoint" objects and all underlying pods read them
		// periodically. Moreover, all KubeProxies watch all of them.
		// Thus we limit the maximum number of pods under a service.
		//
		// TODO: Remove this limitation once services, endpoints and data flows
		// between nodes and master are better optimized.
		maxNodeCount := 250
		if len(nodes.Items) > maxNodeCount {
			nodes.Items = nodes.Items[:maxNodeCount]
		}

		if len(nodes.Items) == 1 {
			// in general, the test requires two nodes. But for local development, often a one node cluster