Esempio n. 1
0
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
		if err != nil {
			return nil, err
		}
		if len(rcList.Items) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		for _, rc := range rcList.Items {
			podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}
Esempio n. 2
0
func validateRouter(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	requirement, err := labels.NewRequirement("router", labels.EqualsOperator, kutil.NewStringSet("router"))
	if err != nil {
		return Failure, err
	}
	label := labels.LabelSelector{*requirement}

	rc, err := c.ReplicationControllers(ns).List(label)
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		if len(items) > 0 {
			return Success, err
		}
	}
	//util.Fatalf("No router running in namespace %s\n", ns)
	// TODO lets create a router
	return Failure, err
}
Esempio n. 3
0
func NewTurboScheduler(kubeClient *client.Client, vmturboMeta *vmtmeta.VMTMeta) *TurboScheduler {
	scheduledPodLister := &cache.StoreToPodLister{}
	podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)

	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: podQueue}, scheduledPodLister)

	bindPodsQPS := float32(15.0)
	bindPodsBurst := 20
	rateLimiter := util.NewTokenBucketRateLimiter(bindPodsQPS, bindPodsBurst)

	config := &Config{
		Modeler:             modeler,
		Binder:              &binder{kubeClient},
		BindPodsRateLimiter: rateLimiter,
	}
	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "turboscheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	vmtSched := vmtscheduler.NewVMTScheduler(kubeClient, vmturboMeta)
	glog.V(3).Infof("VMTScheduler is set: %++v", vmtSched)

	defaultSched := defaultscheduler.NewDefaultScheduler(kubeClient)
	glog.V(3).Infof("DefaultScheduler is set: %++v", defaultSched)

	return &TurboScheduler{
		config:           config,
		vmtScheduler:     vmtSched,
		defaultScheduler: defaultSched,
	}
}
Esempio n. 4
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
Esempio n. 5
0
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
	podClient := c.Pods(ns)
	By("creating pod")
	defer podClient.Delete(pod.Name, nil)
	_, err := podClient.Create(pod)
	if err != nil {
		Failf("Failed to create pod: %v", err)
	}
	By("ensuring that pod is running and has a hostIP")
	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	err = waitForPodRunningInNamespace(c, pod.Name, ns)
	Expect(err).NotTo(HaveOccurred())
	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t := time.Now()
	for {
		p, err := podClient.Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		if p.Status.HostIP != "" {
			Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
			break
		}
		if time.Since(t) >= hostIPTimeout {
			Failf("Gave up waiting for hostIP of pod %s after %v seconds",
				p.Name, time.Since(t).Seconds())
		}
		Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}
Esempio n. 6
0
func createRoutesForDomain(ns string, domain string, c *k8sclient.Client, oc *oclient.Client, fac *cmdutil.Factory) error {
	rc, err := c.Services(ns).List(labels.Everything())
	if err != nil {
		util.Errorf("Failed to load services in namespace %s with error %v", ns, err)
		return err
	}
	items := rc.Items
	for _, service := range items {
		// TODO use the external load balancer as a way to know if we should create a route?
		name := service.ObjectMeta.Name
		if name != "kubernetes" {
			routes := oc.Routes(ns)
			_, err = routes.Get(name)
			if err != nil {
				hostName := name + "." + domain
				route := rapi.Route{
					ObjectMeta: kapi.ObjectMeta{
						Name: name,
					},
					Host:        hostName,
					ServiceName: name,
				}
				// lets create the route
				_, err = routes.Create(&route)
				if err != nil {
					util.Errorf("Failed to create the route %s with error %v", name, err)
					return err
				}
			}
		}
	}
	return nil
}
Esempio n. 7
0
func verifyResult(c *client.Client, podName string, ns string, oldNotRunning int) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	_, notRunningPods := getPodsNumbers(allPods)

	schedEvents, err := c.Events(ns).List(
		labels.Everything(),
		fields.Set{
			"involvedObject.kind":      "Pod",
			"involvedObject.name":      podName,
			"involvedObject.namespace": ns,
			"source":                   "scheduler",
			"reason":                   "FailedScheduling",
		}.AsSelector())
	expectNoError(err)

	printed := false
	printOnce := func(msg string) string {
		if !printed {
			printed = true
			return msg
		} else {
			return ""
		}
	}

	Expect(notRunningPods).To(Equal(1+oldNotRunning), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
	Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
}
Esempio n. 8
0
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
	By("getting list of nodes")
	nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
	expectNoError(err)
	var errors []error
	retries := maxRetries
	for {
		errors = []error{}
		for _, node := range nodeList.Items {
			// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
			// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
			statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
			By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
			_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
			if err != nil {
				errors = append(errors, err)
			}
		}
		if len(errors) == 0 {
			return
		}
		if retries--; retries <= 0 {
			break
		}
		Logf("failed to retrieve kubelet stats -\n %v", errors)
		time.Sleep(sleepDuration)
	}
	Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
Esempio n. 9
0
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(namespace).Get(oldName)
	if err != nil {
		return nil, err
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 {
		// TODO: support multi-container image update.
		return nil, goerrors.New("Image update is not supported for multi-container pods")
	}
	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[0].Image = image

	newHash, err := api.HashObject(newRc, c.Codec)
	if err != nil {
		return nil, err
	}

	if len(newName) == 0 {
		newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = newName

	newRc.Spec.Selector[deploymentKey] = newHash
	newRc.Spec.Template.Labels[deploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
Esempio n. 10
0
func runMasterServiceTest(client *client.Client) {
	time.Sleep(12 * time.Second)
	svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything())
	if err != nil {
		glog.Fatalf("unexpected error listing services: %v", err)
	}
	var foundRW bool
	found := util.StringSet{}
	for i := range svcList.Items {
		found.Insert(svcList.Items[i].Name)
		if svcList.Items[i].Name == "kubernetes" {
			foundRW = true
		}
	}
	if foundRW {
		ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
		if err != nil {
			glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err)
		}
		if countEndpoints(ep) == 0 {
			glog.Fatalf("no endpoints for kubernetes service: %v", ep)
		}
	} else {
		glog.Errorf("no RW service found: %v", found)
		glog.Fatal("Kubernetes service test failed")
	}
	glog.Infof("Master service test passed.")
}
Esempio n. 11
0
// Creates a replication controller that serves its hostname and a service on top of it.
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
	podNames := make([]string, replicas)

	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: util.NewIntOrStringFromInt(9376),
				Protocol:   "TCP",
			}},
			Selector: map[string]string{
				"name": name,
			},
		},
	})
	if err != nil {
		return podNames, "", err
	}

	var createdPods []*api.Pod
	maxContainerFailures := 0
	config := RCConfig{
		Client:               c,
		Image:                "gcr.io/google_containers/serve_hostname:1.1",
		Name:                 name,
		Namespace:            ns,
		PollInterval:         3 * time.Second,
		Timeout:              30 * time.Second,
		Replicas:             replicas,
		CreatedPods:          &createdPods,
		MaxContainerFailures: &maxContainerFailures,
	}
	err = RunRC(config)
	if err != nil {
		return podNames, "", err
	}

	if len(createdPods) != replicas {
		return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods))
	}

	for i := range createdPods {
		podNames[i] = createdPods[i].ObjectMeta.Name
	}
	sort.StringSlice(podNames).Sort()

	service, err := c.Services(ns).Get(name)
	if err != nil {
		return podNames, "", err
	}
	if service.Spec.ClusterIP == "" {
		return podNames, "", fmt.Errorf("Service IP is blank for %v", name)
	}
	serviceIP := service.Spec.ClusterIP
	return podNames, serviceIP, nil
}
Esempio n. 12
0
// Performs a get on a node proxy endpoint given the nodename and rest client.
func nodeProxyRequest(c *client.Client, node, endpoint string) client.Result {
	return c.Get().
		Prefix("proxy").
		Resource("nodes").
		Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
		Suffix(endpoint).
		Do()
}
Esempio n. 13
0
func newProjectAuthorizationCache(openshiftClient *osclient.Client, kubeClient *kclient.Client,
	policyClient policyclient.ReadOnlyPolicyClient) *projectauth.AuthorizationCache {
	return projectauth.NewAuthorizationCache(
		projectauth.NewReviewer(openshiftClient),
		kubeClient.Namespaces(),
		policyClient,
	)
}
Esempio n. 14
0
func stopServeHostnameService(c *client.Client, ns, name string) error {
	if err := DeleteRC(c, ns, name); err != nil {
		return err
	}
	if err := c.Services(ns).Delete(name); err != nil {
		return err
	}
	return nil
}
Esempio n. 15
0
// generateSecretsConfig generates any Secret and Volume objects, such
// as SSH private keys, that are necessary for the router container.
func generateSecretsConfig(cfg *RouterConfig, kClient *kclient.Client,
	namespace string) ([]*kapi.Secret, []kapi.Volume, []kapi.VolumeMount,
	error) {
	secrets := []*kapi.Secret{}
	volumes := []kapi.Volume{}
	mounts := []kapi.VolumeMount{}

	if len(cfg.ExternalHostPrivateKey) != 0 {
		privkeyData, err := loadKey(cfg.ExternalHostPrivateKey)
		if err != nil {
			return secrets, volumes, mounts, fmt.Errorf("error reading private key"+
				" for external host: %v", err)
		}

		serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount)
		if err != nil {
			return secrets, volumes, mounts, fmt.Errorf("error looking up"+
				" service account %s: %v", cfg.ServiceAccount, err)
		}

		privkeySecret := &kapi.Secret{
			ObjectMeta: kapi.ObjectMeta{
				Name: privkeySecretName,
				Annotations: map[string]string{
					kapi.ServiceAccountNameKey: serviceAccount.Name,
					kapi.ServiceAccountUIDKey:  string(serviceAccount.UID),
				},
			},
			Data: map[string][]byte{privkeyName: privkeyData},
		}

		secrets = append(secrets, privkeySecret)
	}

	// We need a secrets volume and mount iff we have secrets.
	if len(secrets) != 0 {
		secretsVolume := kapi.Volume{
			Name: secretsVolumeName,
			VolumeSource: kapi.VolumeSource{
				Secret: &kapi.SecretVolumeSource{
					SecretName: privkeySecretName,
				},
			},
		}

		secretsMount := kapi.VolumeMount{
			Name:      secretsVolumeName,
			ReadOnly:  true,
			MountPath: secretsPath,
		}

		volumes = []kapi.Volume{secretsVolume}
		mounts = []kapi.VolumeMount{secretsMount}
	}

	return secrets, volumes, mounts, nil
}
Esempio n. 16
0
func resizeRC(c *client.Client, ns, name string, replicas int) error {
	rc, err := c.ReplicationControllers(ns).Get(name)
	if err != nil {
		return err
	}
	rc.Spec.Replicas = replicas
	_, err = c.ReplicationControllers(rc.Namespace).Update(rc)
	return err
}
Esempio n. 17
0
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
	pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
	if err == nil {
		Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
	} else {
		Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
	}
	return err
}
Esempio n. 18
0
func countRemaining(c *client.Client, withName string) (int, error) {
	var cnt = 0
	nsList, err := c.Namespaces().List(labels.Everything(), fields.Everything())
	for _, item := range nsList.Items {
		if strings.Contains(item.Name, "nslifetest") {
			cnt++
		}
	}
	return cnt, err
}
Esempio n. 19
0
func LoadExistingNextReplicationController(c *client.Client, namespace, newName string) (*api.ReplicationController, error) {
	if len(newName) == 0 {
		return nil, nil
	}
	newRc, err := c.ReplicationControllers(namespace).Get(newName)
	if err != nil && errors.IsNotFound(err) {
		return nil, nil
	}
	return newRc, err
}
Esempio n. 20
0
func runSchedulerNoPhantomPodsTest(client *client.Client) {
	pod := &api.Pod{
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "c1",
					Image: "kubernetes/pause",
					Ports: []api.ContainerPort{
						{ContainerPort: 1234, HostPort: 9999},
					},
					ImagePullPolicy: api.PullIfNotPresent,
				},
			},
		},
	}

	// Assuming we only have two kublets, the third pod here won't schedule
	// if the scheduler doesn't correctly handle the delete for the second
	// pod.
	pod.ObjectMeta.Name = "phantom.foo"
	foo, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	pod.ObjectMeta.Name = "phantom.bar"
	bar, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	// Delete a pod to free up room.
	glog.Infof("Deleting pod %v", bar.Name)
	err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
	if err != nil {
		glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
	}

	pod.ObjectMeta.Name = "phantom.baz"
	baz, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
		glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
	}

	glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
Esempio n. 21
0
func getAllNodesInCluster(c *client.Client) ([]string, error) {
	nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}
	result := []string{}
	for _, node := range nodeList.Items {
		result = append(result, node.Name)
	}
	return result, nil
}
Esempio n. 22
0
func validateTemplateService(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	svc, err := c.Services(ns).Get("templates")
	if svc != nil {
		return Success, err
	}
	return Failure, err
}
Esempio n. 23
0
func validateConsoleDeployment(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.ReplicationControllers(ns).Get("fabric8")
	if rc != nil {
		return Success, err
	}
	return Failure, err
}
Esempio n. 24
0
func validateServiceAccount(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	sa, err := c.ServiceAccounts(ns).Get("fabric8")
	if sa != nil {
		return Success, err
	}
	return Failure, err
}
Esempio n. 25
0
func createSecret(c *k8sclient.Client, f *cmdutil.Factory, flags *flag.FlagSet, secretDataIdentifiers string, secretType string, keysNames []string) (Result, error) {
	var secret = secret(secretDataIdentifiers, secretType, keysNames, flags)
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rs, err := c.Secrets(ns).Create(&secret)
	if rs != nil {
		return Success, err
	}
	return Failure, err
}
Esempio n. 26
0
func getMinionPublicIps(c *client.Client) ([]string, error) {
	nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}

	ips := collectAddresses(nodes, api.NodeExternalIP)
	if len(ips) == 0 {
		ips = collectAddresses(nodes, api.NodeLegacyHostIP)
	}
	return ips, nil
}
Esempio n. 27
0
func getReferencedServiceAccountToken(c *client.Client, ns string, name string, shouldWait bool) (string, string, error) {
	tokenName := ""
	token := ""

	findToken := func() (bool, error) {
		user, err := c.ServiceAccounts(ns).Get(name)
		if errors.IsNotFound(err) {
			return false, nil
		}
		if err != nil {
			return false, err
		}

		for _, ref := range user.Secrets {
			secret, err := c.Secrets(ns).Get(ref.Name)
			if errors.IsNotFound(err) {
				continue
			}
			if err != nil {
				return false, err
			}
			if secret.Type != api.SecretTypeServiceAccountToken {
				continue
			}
			name := secret.Annotations[api.ServiceAccountNameKey]
			uid := secret.Annotations[api.ServiceAccountUIDKey]
			tokenData := secret.Data[api.ServiceAccountTokenKey]
			if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
				tokenName = secret.Name
				token = string(tokenData)
				return true, nil
			}
		}

		return false, nil
	}

	if shouldWait {
		err := wait.Poll(time.Second, 10*time.Second, findToken)
		if err != nil {
			return "", "", err
		}
	} else {
		ok, err := findToken()
		if err != nil {
			return "", "", err
		}
		if !ok {
			return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
		}
	}
	return tokenName, token, nil
}
Esempio n. 28
0
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
	created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
	if err != nil {
		glog.Infof("Rc %v doesn't exist, creating", controller.Name)
		created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
		if err != nil {
			return nil, err
		}
	}
	// If we just created an rc, wait till it creates its replicas.
	return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
Esempio n. 29
0
func getServiceAccountPullSecret(client *kclient.Client, ns, name string) (string, error) {
	secrets, err := client.Secrets(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	for _, secret := range secrets.Items {
		if secret.Type == api.SecretTypeDockercfg && secret.Annotations[api.ServiceAccountNameKey] == name {
			return string(secret.Data[api.DockerConfigKey]), nil
		}
	}
	return "", nil
}
Esempio n. 30
0
// Clean both server and client pods.
func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	podClient := client.Pods(config.namespace)
	serviceClient := client.Services(config.namespace)

	// ignore all errors, the pods may not be even created
	podClient.Delete(config.prefix+"-client", nil)
	serviceClient.Delete(config.prefix + "-server")
	podClient.Delete(config.prefix+"-server", nil)
}