func (s *ingManager) start(namespace string) (err error) {
	// Create rcs
	for _, rcPath := range s.rcCfgPaths {
		rc := rcFromManifest(rcPath)
		rc.Namespace = namespace
		rc.Spec.Template.Labels["name"] = rc.Name
		rc, err = s.client.ReplicationControllers(rc.Namespace).Create(rc)
		if err != nil {
			return
		}
		if err = framework.WaitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil {
			return
		}
	}
	// Create services.
	// Note that it's up to the caller to make sure the service actually matches
	// the pods of the rc.
	for _, svcPath := range s.svcCfgPaths {
		svc := svcFromManifest(svcPath)
		svc.Namespace = namespace
		svc, err = s.client.Services(svc.Namespace).Create(svc)
		if err != nil {
			return
		}
		// TODO: This is short term till we have an Ingress.
		s.svcNames = append(s.svcNames, svc.Name)
	}
	s.name = s.svcNames[0]
	s.namespace = namespace
	return nil
}
func (h *haproxyControllerTester) start(namespace string) (err error) {

	// Create a replication controller with the given configuration.
	rc := rcFromManifest(h.cfg)
	rc.Namespace = namespace
	rc.Spec.Template.Labels["name"] = rc.Name

	// Add the --namespace arg.
	// TODO: Remove this when we have proper namespace support.
	for i, c := range rc.Spec.Template.Spec.Containers {
		rc.Spec.Template.Spec.Containers[i].Args = append(
			c.Args, fmt.Sprintf("--namespace=%v", namespace))
		framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
	}

	rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc)
	if err != nil {
		return
	}
	if err = framework.WaitForRCPodsRunning(h.client, namespace, rc.Name); err != nil {
		return
	}
	h.rcName = rc.Name
	h.rcNamespace = rc.Namespace

	// Find the pods of the rc we just created.
	labelSelector := labels.SelectorFromSet(
		labels.Set(map[string]string{"name": h.rcName}))
	options := api.ListOptions{LabelSelector: labelSelector}
	pods, err := h.client.Pods(h.rcNamespace).List(options)
	if err != nil {
		return err
	}

	// Find the external addresses of the nodes the pods are running on.
	for _, p := range pods.Items {
		wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) {
			address, err := framework.GetHostExternalAddress(h.client, &p)
			if err != nil {
				framework.Logf("%v", err)
				return false, nil
			}
			h.address = append(h.address, address)
			return true, nil
		})
	}
	if len(h.address) == 0 {
		return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
	}
	return nil
}
Beispiel #3
0
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
	defer GinkgoRecover()
	defer wg.Done()
	labels := map[string]string{
		"type": podType,
		"name": name,
	}
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: 1,
			Selector: labels,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: labels,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Resources: api.ResourceRequirements{
								Requests: api.ResourceList{
									api.ResourceCPU:    cpuRequest,
									api.ResourceMemory: memRequest,
								},
							},
						},
					},
					DNSPolicy: api.DNSDefault,
				},
			},
		},
	}
	_, err := c.ReplicationControllers(ns).Create(rc)
	framework.ExpectNoError(err)
	framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
	framework.Logf("Found pod '%s' running", name)
}
Beispiel #4
0
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
	defer GinkgoRecover()
	defer wg.Done()
	labels := map[string]string{
		"type": podType,
		"name": name,
	}
	rc := &v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
			Selector: labels,
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: labels,
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  name,
							Image: image,
							Resources: v1.ResourceRequirements{
								Requests: v1.ResourceList{
									v1.ResourceCPU:    cpuRequest,
									v1.ResourceMemory: memRequest,
								},
							},
						},
					},
					DNSPolicy: v1.DNSDefault,
				},
			},
		},
	}
	_, err := c.Core().ReplicationControllers(ns).Create(rc)
	framework.ExpectNoError(err)
	framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
	framework.Logf("Found pod '%s' running", name)
}
			var err error
			namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
			Expect(err).NotTo(HaveOccurred())
		}

		for _, ns := range namespaces {
			framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
		}

		for _, ns := range namespaces {
			framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
		}

		// wait for objects
		for _, ns := range namespaces {
			framework.WaitForRCPodsRunning(c, ns.Name, backendRcName)
			framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
		}
		// it is not enough that pods are running because they may be set to running, but
		// the application itself may have not been initialized. Just query the application.
		for _, ns := range namespaces {
			label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
			options := api.ListOptions{LabelSelector: label}
			pods, err := c.Pods(ns.Name).List(options)
			Expect(err).NotTo(HaveOccurred())
			err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
			Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
			framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)

			err = framework.ServiceResponding(c, ns.Name, backendSvcName)
			Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
Beispiel #6
0
func (cont *IngressController) create() {

	// TODO: This cop out is because it would be *more* brittle to duplicate all
	// the name construction logic from the controller cross-repo. We will not
	// need to be so paranoid about leaked resources once we figure out a solution
	// for issues like #16337. Currently, all names should fall within 63 chars.
	testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
	if len(testName) > nameLenLimit {
		framework.Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v",
			cont.ns, cont.UID, nameLenLimit)
	}

	if cont.defaultSvcPath != "" {
		svc := svcFromManifest(cont.defaultSvcPath)
		svc.Namespace = cont.ns
		svc.Labels = controllerLabels
		svc.Spec.Selector = controllerLabels
		cont.svc = svc
		_, err := cont.c.Services(cont.ns).Create(cont.svc)
		Expect(err).NotTo(HaveOccurred())
	}
	rc := rcFromManifest(cont.rcPath)

	listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(clusterAddonLBLabels))}
	existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts)
	Expect(err).NotTo(HaveOccurred())
	if len(existingRCs.Items) != 1 {
		framework.Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels)
	}

	// Merge the existing spec and new spec. The modifications should not
	// manifest as functional changes to the controller. Most importantly, the
	// podTemplate shouldn't change (but for the additional test cmd line flags)
	// to ensure we test actual cluster functionality across upgrades.
	rc.Spec = existingRCs.Items[0].Spec
	rc.Name = "glbc"
	rc.Namespace = cont.ns
	rc.Labels = controllerLabels
	rc.Spec.Selector = controllerLabels
	rc.Spec.Template.Labels = controllerLabels
	rc.Spec.Replicas = 1

	// These command line params are only recognized by v0.51 and above.
	testArgs := []string{
		// Pass namespace uid so the controller will tag resources with it.
		fmt.Sprintf("--cluster-uid=%v", cont.UID),
		// Tell the controller to delete all resources as it quits.
		fmt.Sprintf("--delete-all-on-quit=true"),
		// Don't use the default Service from kube-system.
		fmt.Sprintf("--default-backend-service=%v/%v", cont.svc.Namespace, cont.svc.Name),
	}
	for i, c := range rc.Spec.Template.Spec.Containers {
		if c.Name == lbContainerName {
			rc.Spec.Template.Spec.Containers[i].Args = append(c.Args, testArgs...)
		}
	}
	cont.rc = rc
	_, err = cont.c.ReplicationControllers(cont.ns).Create(cont.rc)
	Expect(err).NotTo(HaveOccurred())
	Expect(framework.WaitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred())
}