コード例 #1
0
ファイル: validate.go プロジェクト: gashcrumb/gofabric8
func validateRouter(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	requirement, err := labels.NewRequirement("router", labels.EqualsOperator, sets.NewString("router"))
	if err != nil {
		return Failure, err
	}
	label := labels.NewSelector().Add(*requirement)

	rc, err := c.ReplicationControllers(ns).List(api.ListOptions{LabelSelector: label})
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		if len(items) > 0 {
			return Success, err
		}
	}
	//util.Fatalf("No router running in namespace %s\n", ns)
	// TODO lets create a router
	return Failure, err
}
コード例 #2
0
ファイル: monitoring.go プロジェクト: pologood/kubernetes
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector(), fields.Everything())
		if err != nil {
			return nil, err
		}
		if len(rcList.Items) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		for _, rc := range rcList.Items {
			podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}
コード例 #3
0
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(namespace).Get(oldName)
	if err != nil {
		return nil, err
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 {
		// TODO: support multi-container image update.
		return nil, goerrors.New("Image update is not supported for multi-container pods")
	}
	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[0].Image = image

	newHash, err := api.HashObject(newRc, c.Codec)
	if err != nil {
		return nil, err
	}

	if len(newName) == 0 {
		newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = newName

	newRc.Spec.Selector[deploymentKey] = newHash
	newRc.Spec.Template.Labels[deploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
コード例 #4
0
ファイル: main.go プロジェクト: coreos/kscale
func createRC(c *client.Client, nsID, rcID, podNum int) {
	var args []string
	if podMarkerSize != 0 {
		args = []string{string(garbage)}
	}
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: makeRCName(rcID),
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: int32(podNum),
			Selector: makeLabel(nsID, rcID),
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: makeLabel(nsID, rcID),
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  "none",
							Image: "none",
							Args:  args,
						},
					},
				},
			},
		},
	}
	if _, err := c.ReplicationControllers(makeNS(nsID)).Create(rc); err != nil {
		ExitError("create rc (%s/%s), failed: %v", makeNS(nsID), makeRCName(rcID), err)
	}
	fmt.Printf("created rc (%s'%s)\n", makeNS(nsID), makeRCName(rcID))
}
コード例 #5
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
コード例 #6
0
ファイル: model.go プロジェクト: krancour/deis-router
func getRC(kubeClient *client.Client) (*api.ReplicationController, error) {
	rcClient := kubeClient.ReplicationControllers(namespace)
	rc, err := rcClient.Get("deis-router")
	if err != nil {
		return nil, err
	}
	return rc, nil
}
コード例 #7
0
ファイル: resize_nodes.go プロジェクト: remoteur/kubernetes
func resizeRC(c *client.Client, ns, name string, replicas int) error {
	rc, err := c.ReplicationControllers(ns).Get(name)
	if err != nil {
		return err
	}
	rc.Spec.Replicas = replicas
	_, err = c.ReplicationControllers(rc.Namespace).Update(rc)
	return err
}
コード例 #8
0
ファイル: rolling_updater.go プロジェクト: ngbinh/kubernetes
func LoadExistingNextReplicationController(c *client.Client, namespace, newName string) (*api.ReplicationController, error) {
	if len(newName) == 0 {
		return nil, nil
	}
	newRc, err := c.ReplicationControllers(namespace).Get(newName)
	if err != nil && errors.IsNotFound(err) {
		return nil, nil
	}
	return newRc, err
}
コード例 #9
0
ファイル: validate.go プロジェクト: gashcrumb/gofabric8
func validateConsoleDeployment(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.ReplicationControllers(ns).Get("fabric8")
	if rc != nil {
		return Success, err
	}
	return Failure, err
}
コード例 #10
0
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
	created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
	if err != nil {
		glog.Infof("Rc %v doesn't exist, creating", controller.Name)
		created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
		if err != nil {
			return nil, err
		}
	}
	// If we just created an rc, wait till it creates its replicas.
	return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
コード例 #11
0
ファイル: cleanup.go プロジェクト: rawlingsj/gofabric8
func deleteReplicationControllers(c *k8sclient.Client, ns string, selector labels.Selector) error {
	rcs, err := c.ReplicationControllers(ns).List(api.ListOptions{LabelSelector: selector})
	if err != nil {
		return err
	}
	for _, rc := range rcs.Items {
		err := c.ReplicationControllers(ns).Delete(rc.Name)
		if err != nil {
			return errors.Wrap(err, fmt.Sprintf("failed to delete ReplicationController %s", rc.Name))
		}
	}
	return nil
}
コード例 #12
0
// Returns a list of all microservices in the cluster.
func GetMicroserviceList(client *client.Client) (*MicroserviceList, error) {
	list, err := client.ReplicationControllers(api.NamespaceAll).
		List(labels.Everything(), fields.Everything())

	if err != nil {
		return nil, err
	}

	microserviceList := &MicroserviceList{}

	for _, element := range list.Items {
		var containerImages []string

		for _, container := range element.Spec.Template.Spec.Containers {
			containerImages = append(containerImages, container.Image)
		}

		microserviceList.Microservices = append(microserviceList.Microservices, Microservice{
			Name: element.ObjectMeta.Name,
			ReplicaSet: ReplicaSet{
				ContainerImages: containerImages,
				PodsRunning:     element.Status.Replicas,
				PodsDesired:     element.Spec.Replicas,
			},
		})
	}

	return microserviceList, nil
}
コード例 #13
0
ファイル: events.go プロジェクト: lohmander/dashboard
// Gets events associated to pods in replication controller.
func GetReplicationControllerPodsEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
	error) {
	replicationController, err := client.ReplicationControllers(namespace).Get(replicationControllerName)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(api.ListOptions{
		LabelSelector: labels.SelectorFromSet(replicationController.Spec.Selector),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	events, err := GetPodsEvents(client, pods)

	if err != nil {
		return nil, err
	}

	return events, nil
}
コード例 #14
0
// Returns a list of all Replica Sets in the cluster.
func GetReplicaSetList(client *client.Client) (*ReplicaSetList, error) {
	list, err := client.ReplicationControllers(api.NamespaceAll).
		List(labels.Everything(), fields.Everything())

	if err != nil {
		return nil, err
	}

	replicaSetList := &ReplicaSetList{}

	for _, replicaSet := range list.Items {
		var containerImages []string

		for _, container := range replicaSet.Spec.Template.Spec.Containers {
			containerImages = append(containerImages, container.Image)
		}

		replicaSetList.ReplicaSets = append(replicaSetList.ReplicaSets, ReplicaSet{
			Name:            replicaSet.ObjectMeta.Name,
			ContainerImages: containerImages,
			PodsRunning:     replicaSet.Status.Replicas,
			PodsDesired:     replicaSet.Spec.Replicas,
		})
	}

	return replicaSetList, nil
}
コード例 #15
0
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
	scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient))
	if err != nil {
		return nil, err
	}
	retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
	waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
	err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
	if err != nil {
		return nil, err
	}
	scaled, err := restClient.ReplicationControllers(ns).Get(name)
	if err != nil {
		return nil, err
	}
	return scaled, nil
}
コード例 #16
0
// GetReplicationControllerList returns a list of all Replication Controllers in the cluster.
func GetReplicationControllerList(client *client.Client) (*ReplicationControllerList, error) {
	log.Printf("Getting list of all replication controllers in the cluster")

	listEverything := api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything(),
	}

	replicationControllers, err := client.ReplicationControllers(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	services, err := client.Services(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	eventsList, err := client.Events(api.NamespaceAll).List(api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	// Anonymous callback function to get pods warnings.
	// Function fulfils GetPodsEventWarningsFunc type contract.
	// Based on list of api pods returns list of pod related warning events
	getPodsEventWarningsFn := func(pods []api.Pod) []Event {
		return GetPodsEventWarnings(eventsList, pods)
	}

	// Anonymous callback function to get nodes by their names.
	getNodeFn := func(nodeName string) (*api.Node, error) {
		return client.Nodes().Get(nodeName)
	}

	result, err := getReplicationControllerList(replicationControllers.Items, services.Items,
		pods.Items, getPodsEventWarningsFn, getNodeFn)

	if err != nil {
		return nil, err
	}

	return result, nil
}
コード例 #17
0
ファイル: integration.go プロジェクト: koori02/kubernetes
func runReplicationControllerTest(c *client.Client) {
	t := time.Now()
	clientAPIVersion := c.APIVersion().String()
	data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done reading config file, took %v", time.Since(t))
	t = time.Now()
	var controller api.ReplicationController
	if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	updated, err := c.ReplicationControllers("test").Create(&controller)
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers, took %v", time.Since(t))
	t = time.Now()

	// In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI
	// systems have been observed to vary unpredictably, so give the controller enough time to create pods.
	// Our e2e scalability tests will catch controllers that are *actually* slow.
	if err := wait.Poll(time.Second, longTestTimeout, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}
	glog.Infof("Done creating replicas, took %v", time.Since(t))
	t = time.Now()

	// Poll till we can retrieve the status of all pods matching the given label selector from their nodes.
	// This involves 3 operations:
	//	- The scheduler must assign all pods to a node
	//	- The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
	//  - We need to be able to query the kubelet on that node for information about the pod
	if err := wait.Poll(
		time.Second, longTestTimeout, podsOnNodes(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods verified on nodes, took %v", time.Since(t))
}
コード例 #18
0
ファイル: kubectl.go プロジェクト: niniwzw/kubernetes
func forEachReplicationController(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) {
	var rcs *api.ReplicationControllerList
	var err error
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		rcs, err = c.ReplicationControllers(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})), fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		if len(rcs.Items) > 0 {
			break
		}
	}

	if rcs == nil || len(rcs.Items) == 0 {
		Failf("No replication controllers found")
	}

	for _, rc := range rcs.Items {
		fn(rc)
	}
}
コード例 #19
0
ファイル: density.go プロジェクト: CodeJuan/kubernetes
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
	defer GinkgoRecover()
	defer wg.Done()
	labels := map[string]string{
		"type": podType,
		"name": name,
	}
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: 1,
			Selector: labels,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: labels,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Resources: api.ResourceRequirements{
								Requests: api.ResourceList{
									api.ResourceCPU:    cpuRequest,
									api.ResourceMemory: memRequest,
								},
							},
						},
					},
					DNSPolicy: api.DNSDefault,
				},
			},
		},
	}
	_, err := c.ReplicationControllers(ns).Create(rc)
	framework.ExpectNoError(err)
	framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
	framework.Logf("Found pod '%s' running", name)
}
コード例 #20
0
ファイル: ingress.go プロジェクト: ethernetdan/kubernetes
// createApp will create a single RC and Svc. The Svc will match pods of the
// RC using the selector: 'name'=<name arg>
func createApp(c *client.Client, ns string, i int) {
	name := fmt.Sprintf("%v%d", appPrefix, i)
	l := map[string]string{}

	Logf("Creating svc %v", name)
	svc := svcByName(name, httpContainerPort)
	svc.Spec.Type = api.ServiceTypeNodePort
	_, err := c.Services(ns).Create(svc)
	Expect(err).NotTo(HaveOccurred())

	Logf("Creating rc %v", name)
	rc := rcByNamePort(name, 1, testImage, httpContainerPort, api.ProtocolTCP, l)
	rc.Spec.Template.Spec.Containers[0].Args = []string{
		"--num=1",
		fmt.Sprintf("--start=%d", i),
		fmt.Sprintf("--prefix=%v", pathPrefix),
		fmt.Sprintf("--port=%d", httpContainerPort),
	}
	_, err = c.ReplicationControllers(ns).Create(rc)
	Expect(err).NotTo(HaveOccurred())
}
コード例 #21
0
// GetReplicationControllerList returns a list of all Replication Controllers in the cluster.
func GetReplicationControllerList(client *client.Client) (*ReplicationControllerList, error) {
	log.Printf("Getting list of all replication controllers in the cluster")

	listEverything := unversioned.ListOptions{
		LabelSelector: unversioned.LabelSelector{labels.Everything()},
		FieldSelector: unversioned.FieldSelector{fields.Everything()},
	}

	replicationControllers, err := client.ReplicationControllers(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	services, err := client.Services(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	// Anonymous callback function to get pods warnings.
	// Function fulfils GetPodsEventWarningsFunc type contract.
	// Based on list of api pods returns list of pod related warning events
	getPodsEventWarningsFn := func(pods []api.Pod) ([]PodEvent, error) {
		errors, err := GetPodsEventWarnings(client, pods)

		if err != nil {
			return nil, err
		}

		return errors, nil
	}

	result, err := getReplicationControllerList(replicationControllers.Items, services.Items, pods.Items, getPodsEventWarningsFn)

	if err != nil {
		return nil, err
	}

	return result, nil
}
コード例 #22
0
ファイル: replicasetdetail.go プロジェクト: tacy/dashboard
// TODO(floreks): This should be transactional to make sure that RC will not be deleted without
// TODO(floreks): Should related services be deleted also?
// Deletes replica set with given name in given namespace and related pods
func DeleteReplicaSetWithPods(client *client.Client, namespace string, name string) error {
	pods, err := getRawReplicaSetPods(client, namespace, name)
	if err != nil {
		return err
	}

	if err := client.ReplicationControllers(namespace).Delete(name); err != nil {
		return err
	}

	for _, pod := range pods.Items {
		if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
			return err
		}
	}

	return nil
}
コード例 #23
0
ファイル: events.go プロジェクト: tacy/dashboard
// Gets events associated to pods in replica set.
func GetReplicaSetPodsEvents(client *client.Client, namespace, replicaSetName string) ([]api.Event,
	error) {
	replicaSet, err := client.ReplicationControllers(namespace).Get(replicaSetName)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(unversioned.ListOptions{
		LabelSelector: unversioned.LabelSelector{labels.SelectorFromSet(replicaSet.Spec.Selector)},
		FieldSelector: unversioned.FieldSelector{fields.Everything()},
	})

	if err != nil {
		return nil, err
	}

	events := make([]api.Event, 0, 0)

	for _, pod := range pods.Items {
		fieldSelector, err := fields.ParseSelector("involvedObject.name=" + pod.Name)

		if err != nil {
			return nil, err
		}

		list, err := client.Events(namespace).List(unversioned.ListOptions{
			LabelSelector: unversioned.LabelSelector{labels.Everything()},
			FieldSelector: unversioned.FieldSelector{fieldSelector},
		})

		if err != nil {
			return nil, err
		}

		for _, event := range list.Items {
			events = append(events, event)
		}

	}

	return events, nil
}
コード例 #24
0
ファイル: replicasetlist.go プロジェクト: tacy/dashboard
// Returns a list of all Replica Sets in the cluster.
func GetReplicaSetList(client *client.Client) (*ReplicaSetList, error) {
	replicaSets, err := client.ReplicationControllers(api.NamespaceAll).List(
		unversioned.ListOptions{
			LabelSelector: unversioned.LabelSelector{labels.Everything()},
			FieldSelector: unversioned.FieldSelector{fields.Everything()},
		})

	if err != nil {
		return nil, err
	}

	services, err := client.Services(api.NamespaceAll).List(
		unversioned.ListOptions{
			LabelSelector: unversioned.LabelSelector{labels.Everything()},
			FieldSelector: unversioned.FieldSelector{fields.Everything()},
		})

	if err != nil {
		return nil, err
	}

	return getReplicaSetList(replicaSets.Items, services.Items), nil
}
コード例 #25
0
// Returns detailed information about the given replica set in the given namespace.
func GetReplicaSetDetail(client *client.Client, namespace string, name string) (
	*ReplicaSetDetail, error) {

	replicaSet, err := client.ReplicationControllers(namespace).Get(name)

	if err != nil {
		return nil, err
	}

	replicaSetDetail := &ReplicaSetDetail{
		Name:          replicaSet.Name,
		Namespace:     replicaSet.Namespace,
		Labels:        replicaSet.ObjectMeta.Labels,
		LabelSelector: replicaSet.Spec.Selector,
		PodsRunning:   replicaSet.Status.Replicas,
		PodsDesired:   replicaSet.Spec.Replicas,
	}

	for _, container := range replicaSet.Spec.Template.Spec.Containers {
		replicaSetDetail.ContainerImages = append(replicaSetDetail.ContainerImages, container.Image)
	}

	labelSelector := labels.SelectorFromSet(replicaSet.Spec.Selector)
	pods, err := client.Pods(namespace).List(labelSelector, fields.Everything())

	for _, pod := range pods.Items {
		podDetail := ReplicaSetPod{
			Name:      pod.Name,
			StartTime: pod.Status.StartTime,
			PodIP:     pod.Status.PodIP,
			NodeName:  pod.Spec.NodeName,
		}
		replicaSetDetail.Pods = append(replicaSetDetail.Pods, podDetail)
	}

	return replicaSetDetail, nil
}
コード例 #26
0
// Returns a list of all Replica Sets in the cluster.
func GetReplicaSetList(client *client.Client) (*ReplicaSetList, error) {
	list, err := client.ReplicationControllers(api.NamespaceAll).
		List(labels.Everything(), fields.Everything())

	if err != nil {
		return nil, err
	}

	replicaSetList := &ReplicaSetList{}

	for _, replicaSet := range list.Items {
		var containerImages []string

		for _, container := range replicaSet.Spec.Template.Spec.Containers {
			containerImages = append(containerImages, container.Image)
		}

		replicaSetList.ReplicaSets = append(replicaSetList.ReplicaSets, ReplicaSet{
			Name:      replicaSet.ObjectMeta.Name,
			Namespace: replicaSet.ObjectMeta.Namespace,
			// TODO(bryk): This field contains test value. Implement it.
			Description: "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " +
				"Nulla metus nibh, iaculis a consectetur vitae, imperdiet pellentesque turpis.",
			Labels:          replicaSet.ObjectMeta.Labels,
			PodsRunning:     replicaSet.Status.Replicas,
			PodsPending:     replicaSet.Spec.Replicas - replicaSet.Status.Replicas,
			ContainerImages: containerImages,
			CreationTime:    replicaSet.ObjectMeta.CreationTimestamp,
			// TODO(bryk): This field contains test value. Implement it.
			InternalEndpoints: []string{"webapp"},
			// TODO(bryk): This field contains test value. Implement it.
			ExternalEndpoints: []string{"81.76.02.198:80"},
		})
	}

	return replicaSetList, nil
}
コード例 #27
0
ファイル: replicasetcommon.go プロジェクト: tacy/dashboard
// Returns structure containing ReplicaSet and Pods for the given replica set.
func getRawReplicaSetWithPods(client *client.Client, namespace string, name string) (
	*ReplicaSetWithPods, error) {
	replicaSet, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	labelSelector := labels.SelectorFromSet(replicaSet.Spec.Selector)
	pods, err := client.Pods(namespace).List(
		unversioned.ListOptions{
			LabelSelector: unversioned.LabelSelector{labelSelector},
			FieldSelector: unversioned.FieldSelector{fields.Everything()},
		})

	if err != nil {
		return nil, err
	}

	replicaSetAndPods := &ReplicaSetWithPods{
		ReplicaSet: replicaSet,
		Pods:       pods,
	}
	return replicaSetAndPods, nil
}
コード例 #28
0
ファイル: density.go プロジェクト: CodeJuan/kubernetes
				framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
				framework.PrintLatencies(e2eLag, "worst e2e total latencies")

				// Test whether e2e pod startup time is acceptable.
				podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
				framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))

				framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
			}

			cleanupDensityTest(dConfig)

			By("Removing additional replication controllers if any")
			for i := 1; i <= nodeCount; i++ {
				name := additionalPodsPrefix + "-" + strconv.Itoa(i)
				c.ReplicationControllers(ns).Delete(name, nil)
			}
		})
	}

	// Calculate total number of pods from each node's max-pod
	It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
		totalPods = 0
		for _, n := range nodes.Items {
			totalPods += int(n.Status.Capacity.Pods().Value())
		}
		totalPods -= framework.WaitForStableCluster(c, masters)

		fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
		framework.ExpectNoError(err)
		defer fileHndl.Close()
コード例 #29
0
		nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
		expectNoError(err)
		nodeCount = len(nodeList.Items)
		Expect(nodeCount).NotTo(BeZero())

		err = checkTestingNSDeletedExcept(c, "")
		expectNoError(err)

		nsForTesting, err := createTestingNS("sched-pred", c)
		ns = nsForTesting.Name
		expectNoError(err)
		uuid = string(util.NewUUID())
	})

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := DeleteRC(c, ns, RCName)
			expectNoError(err)
		}

		By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
		if err := deleteNS(c, ns, 10*time.Minute /* namespace deletion timeout */); err != nil {
			Failf("Couldn't delete ns %s", err)
		}
	})

	// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
	// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
	// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
コード例 #30
0
ファイル: resize_nodes.go プロジェクト: remoteur/kubernetes
// newRCByName creates a replication controller with a selector by name of name.
func newRCByName(c *client.Client, ns, name string, replicas int) (*api.ReplicationController, error) {
	By(fmt.Sprintf("creating replication controller %s", name))
	return c.ReplicationControllers(ns).Create(rcByNamePort(
		name, replicas, serveHostnameImage, 9376, map[string]string{}))
}