Exemplo n.º 1
0
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller.
func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) {
	rcs, err := c.List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error getting replication controllers: %v", err)
	}
	var matchingRCs []api.ReplicationController
	rcLabels := labels.Set(rc.Spec.Selector)
	for _, controller := range rcs.Items {
		newRCLabels := labels.Set(controller.Spec.Selector)
		if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) {
			matchingRCs = append(matchingRCs, controller)
		}
	}
	return matchingRCs, nil
}
Exemplo n.º 2
0
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
	if len(pod.Spec.NodeSelector) == 0 {
		return true
	}
	selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
	return selector.Matches(labels.Set(node.Labels))
}
Exemplo n.º 3
0
// Get all replication controllers whose selectors would match a given set of
// labels.
// TODO Move this to pkg/client and ideally implement it server-side (instead
// of getting all RC's and searching through them manually).
func getReplicationControllersForLabels(c client.ReplicationControllerInterface, labelsToMatch labels.Labels) string {
	// Get all replication controllers.
	// TODO this needs a namespace scope as argument
	rcs, err := c.List(labels.Everything())
	if err != nil {
		glog.Fatalf("Error getting replication controllers: %v\n", err)
	}

	// Find the ones that match labelsToMatch.
	var matchingRCs []api.ReplicationController
	for _, controller := range rcs.Items {
		selector := labels.SelectorFromSet(controller.Spec.Selector)
		if selector.Matches(labelsToMatch) {
			matchingRCs = append(matchingRCs, controller)
		}
	}

	// Format the matching RC's into strings.
	var rcStrings []string
	for _, controller := range matchingRCs {
		rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas))
	}

	list := strings.Join(rcStrings, ", ")
	if list == "" {
		return "<none>"
	}
	return list
}
Exemplo n.º 4
0
func TestEtcdWatchEndpoints(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	watching, err := registry.WatchEndpoints(
		labels.Everything(),
		labels.SelectorFromSet(labels.Set{"ID": "foo"}),
		1,
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
Exemplo n.º 5
0
func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
	if len(pod.Spec.NodeSelector) == 0 {
		return true, nil
	}
	// check whitelist
	if whitelist, exists := pod.Spec.NodeSelector["whitelist"]; exists {
		for _, hostIP := range strings.Split(whitelist, ",") {
			if hostIP == node {
				return true, nil
			}
		}
		return false, nil
	}

	selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
	minion, err := n.info.GetNodeInfo(node)
	if err != nil {
		return false, err
	}
	// check blacklist and model
	active := true
	if val, exists := minion.Labels["active"]; exists {
		if val == "false" {
			active = false
		}
	}

	if _, e1 := pod.Spec.NodeSelector["sriov"]; !e1 {
		if sriov, e2 := minion.Labels["sriov"]; e2 && sriov == "1" {
			return false, nil
		}
	}

	return selector.Matches(labels.Set(minion.Labels)) && active, nil
}
Exemplo n.º 6
0
// CalculateSpreadPriority spreads pods by minimizing the number of pods on the same machine with the same labels.
// Importantly, if there are services in the system that span multiple heterogenous sets of pods, this spreading priority
// may not provide optimal spreading for the members of that Service.
// TODO: consider if we want to include Service label sets in the scheduling priority.
func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
	pods, err := podLister.List(labels.SelectorFromSet(pod.Labels))
	if err != nil {
		return nil, err
	}
	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	var maxCount int
	var fScore float32 = 10.0
	counts := map[string]int{}
	if len(pods) > 0 {
		for _, pod := range pods {
			counts[pod.Status.Host]++
			// Compute the maximum number of pods hosted on any minion
			if counts[pod.Status.Host] > maxCount {
				maxCount = counts[pod.Status.Host]
			}
		}
	}

	result := []HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for _, minion := range minions.Items {
		if maxCount > 0 {
			fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
		}
		result = append(result, HostPriority{host: minion.Name, score: int(fScore)})
	}
	return result, nil
}
Exemplo n.º 7
0
func TestEtcdListImagesFiltered(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	key := "/images"
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(api.Image{
							JSONBase: kubeapi.JSONBase{ID: "foo"},
							Labels:   map[string]string{"env": "prod"},
						}),
					},
					{
						Value: runtime.EncodeOrDie(api.Image{
							JSONBase: kubeapi.JSONBase{ID: "bar"},
							Labels:   map[string]string{"env": "dev"},
						}),
					},
				},
			},
		},
		E: nil,
	}
	registry := NewTestEtcdRegistry(fakeClient)
	images, err := registry.ListImages(labels.SelectorFromSet(labels.Set{"env": "dev"}))
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if len(images.Items) != 1 || images.Items[0].ID != "bar" {
		t.Errorf("Unexpected images list: %#v", images)
	}
}
Exemplo n.º 8
0
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
	By("trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	return wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
}
func TestEtcdWatchNodesNotMatch(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	node := validNewNode()

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": "bar"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	nodeBytes, _ := latest.Codec.Encode(node)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(nodeBytes),
		},
	}

	select {
	case <-watching.ResultChan():
		t.Error("unexpected result from result channel")
	case <-time.After(time.Millisecond * 100):
		// expected case
	}
}
Exemplo n.º 10
0
// CoverServices ensures that a directed edge exists between all deployment configs and the
// services that expose them (via label selectors).
func CoverServices(g Graph) Graph {
	nodes := g.NodeList()
	for _, node := range nodes {
		switch svc := node.(type) {
		case *ServiceNode:
			if svc.Service.Spec.Selector == nil {
				continue
			}
			query := labels.SelectorFromSet(svc.Service.Spec.Selector)
			for _, n := range nodes {
				switch target := n.(type) {
				case *DeploymentConfigNode:
					template := target.DeploymentConfig.Template.ControllerTemplate.Template
					if template == nil {
						continue
					}
					if query.Matches(labels.Set(template.Labels)) {
						g.AddEdge(target, svc, ExposedThroughServiceGraphEdgeKind)
					}
				}
			}
		}
	}
	return g
}
Exemplo n.º 11
0
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
	var pods []api.Pod

	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err = podLister.List(selector)
		if err != nil {
			return nil, err
		}
	}

	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	// separate out the minions that have the label from the ones that don't
	otherMinions := []string{}
	labeledMinions := map[string]string{}
	for _, minion := range minions.Items {
		if labels.Set(minion.Labels).Has(s.label) {
			label := labels.Set(minion.Labels).Get(s.label)
			labeledMinions[minion.Name] = label
		} else {
			otherMinions = append(otherMinions, minion.Name)
		}
	}

	podCounts := map[string]int{}
	for _, pod := range pods {
		label, exists := labeledMinions[pod.Status.Host]
		if !exists {
			continue
		}
		podCounts[label]++
	}

	numServicePods := len(pods)
	result := []HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for minion := range labeledMinions {
		// initializing to the default/max minion score of 10
		fScore := float32(10)
		if numServicePods > 0 {
			fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
		}
		result = append(result, HostPriority{host: minion, score: int(fScore)})
	}
	// add the open minions with a score of 0
	for _, minion := range otherMinions {
		result = append(result, HostPriority{host: minion, score: 0})
	}

	return result, nil
}
Exemplo n.º 12
0
func TestPodUpdate(c *client.Client) bool {
	podClient := c.Pods(api.NamespaceDefault)

	pod := loadPodOrDie(assetPath("api", "examples", "pod.json"))
	value := strconv.Itoa(time.Now().Nanosecond())
	pod.Labels["time"] = value

	_, err := podClient.Create(pod)
	if err != nil {
		glog.Errorf("Failed to create pod: %v", err)
		return false
	}
	defer podClient.Delete(pod.Name)
	waitForPodRunning(c, pod.Name)
	pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
	if len(pods.Items) != 1 {
		glog.Errorf("Failed to find the correct pod")
		return false
	}

	podOut, err := podClient.Get(pod.Name)
	if err != nil {
		glog.Errorf("Failed to get pod: %v", err)
		return false
	}
	value = "time" + value
	pod.Labels["time"] = value
	pod.ResourceVersion = podOut.ResourceVersion
	pod.UID = podOut.UID
	pod, err = podClient.Update(pod)
	if err != nil {
		glog.Errorf("Failed to update pod: %v", err)
		return false
	}
	waitForPodRunning(c, pod.Name)
	pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
	if len(pods.Items) != 1 {
		glog.Errorf("Failed to find the correct pod after update.")
		return false
	}
	glog.Infof("pod update OK")
	return true
}
Exemplo n.º 13
0
func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
	if len(pod.NodeSelector) == 0 {
		return true, nil
	}
	selector := labels.SelectorFromSet(pod.NodeSelector)
	minion, err := n.info.GetNodeInfo(node)
	if err != nil {
		return false, err
	}
	return selector.Matches(labels.Set(minion.Labels)), nil
}
Exemplo n.º 14
0
func TestEtcdWatchEndpointsBadSelector(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	_, err := registry.WatchEndpoints(
		labels.Everything(),
		labels.SelectorFromSet(labels.Set{"Field.Selector": "foo"}),
		0,
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}

	_, err = registry.WatchEndpoints(
		labels.SelectorFromSet(labels.Set{"Label.Selector": "foo"}),
		labels.Everything(),
		0,
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}
}
Exemplo n.º 15
0
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (api.HostPriorityList, error) {
	var maxCount int
	var nsServicePods []*api.Pod

	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err := podLister.List(selector)
		if err != nil {
			return nil, err
		}
		// consider only the pods that belong to the same namespace
		for _, nsPod := range pods {
			if nsPod.Namespace == pod.Namespace {
				nsServicePods = append(nsServicePods, nsPod)
			}
		}
	}

	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	counts := map[string]int{}
	if len(nsServicePods) > 0 {
		for _, pod := range nsServicePods {
			counts[pod.Spec.NodeName]++
			// Compute the maximum number of pods hosted on any minion
			if counts[pod.Spec.NodeName] > maxCount {
				maxCount = counts[pod.Spec.NodeName]
			}
		}
	}

	result := []api.HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for _, minion := range minions.Items {
		// initializing to the default/max minion score of 10
		fScore := float32(10)
		if maxCount > 0 {
			fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
		}
		result = append(result, api.HostPriority{Host: minion.Name, Score: int(fScore)})
		glog.V(10).Infof(
			"%v -> %v: ServiceSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore),
		)
	}
	return result, nil
}
Exemplo n.º 16
0
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig) {
	defer GinkgoRecover()
	defer wg.Done()
	resizingTime := 3 * time.Minute

	sleepUpTo(resizingTime)
	newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
	expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize),
		fmt.Sprintf("scaling rc %s for the first time", config.Name))
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
	_, err := config.Client.Pods(config.Namespace).List(selector, fields.Everything())
	expectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
}
Exemplo n.º 17
0
// AddExposedPodEdges ensures that a directed edge exists between a service and all the pods
// in the graph that match the service selector
func AddExposedPodEdges(g osgraph.MutableUniqueGraph, node *kubegraph.ServiceNode) {
	if node.Service.Spec.Selector == nil {
		return
	}
	query := labels.SelectorFromSet(node.Service.Spec.Selector)
	for _, n := range g.(graph.Graph).Nodes() {
		switch target := n.(type) {
		case *kubegraph.PodNode:
			if query.Matches(labels.Set(target.Labels)) {
				g.AddEdge(target, node, ExposedThroughServiceEdgeKind)
			}
		}
	}
}
Exemplo n.º 18
0
// AddManagedByRCPodEdges ensures that a directed edge exists between an RC and all the pods
// in the graph that match the label selector
func AddManagedByRCPodEdges(g osgraph.MutableUniqueGraph, rcNode *kubegraph.ReplicationControllerNode) {
	if rcNode.Spec.Selector == nil {
		return
	}
	query := labels.SelectorFromSet(rcNode.Spec.Selector)
	for _, n := range g.(graph.Graph).Nodes() {
		switch target := n.(type) {
		case *kubegraph.PodNode:
			if query.Matches(labels.Set(target.Labels)) {
				g.AddEdge(target, rcNode, ManagedByRCEdgeKind)
			}
		}
	}
}
Exemplo n.º 19
0
func (c *Controller) updatePod(pod *api.Pod) {
	glog.Infof("Pod %s", pod.Name)

	c.ensureNamespace(pod.Namespace)
	instance := c.instanceMgr.LocateInstance(pod.Namespace, pod.Name, string(pod.ObjectMeta.UID))

	network := c.getPodNetwork(pod)
	if network == nil {
		return
	}
	nic := c.instanceMgr.LocateInterface(network, instance)
	if nic == nil {
		return
	}
	address := c.instanceMgr.LocateInstanceIp(network, string(pod.ObjectMeta.UID), nic)
	if address == nil {
		return
	}
	gateway, err := c.networkMgr.GetGatewayAddress(network)
	if err != nil {
		return
	}
	c.updateInstanceMetadata(pod, nic, address.GetInstanceIpAddress(), gateway)

	policyTag, ok := pod.Labels[c.config.NetworkAccessTag]
	if ok {
		serviceList := decodeAccessTag(policyTag)
		for _, srv := range serviceList {
			c.serviceMgr.Connect(pod.Namespace, srv, network)
		}
	}
	// TODO(prm): Disconnect from any policy that the network is associated with other than the
	// policies above.

	for _, item := range c.serviceStore.List() {
		service := item.(*api.Service)
		if service.Namespace != pod.Namespace {
			continue
		}
		if len(service.Spec.Selector) == 0 {
			continue
		}
		selector := labels.SelectorFromSet(service.Spec.Selector)
		if selector.Matches(labels.Set(pod.Labels)) {
			glog.Infof("Pod %s is a member of service %s", pod.Name, service.Name)
			c.updatePodServiceIp(service, pod)
			c.updatePodPublicIp(service, pod)
		}
	}
}
Exemplo n.º 20
0
func TestEtcdWatchServicesBadSelector(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	_, err := registry.WatchServices(
		ctx,
		labels.Everything(),
		labels.SelectorFromSet(labels.Set{"Field.Selector": "foo"}),
		"",
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}

	_, err = registry.WatchServices(
		ctx,
		labels.SelectorFromSet(labels.Set{"Label.Selector": "foo"}),
		labels.Everything(),
		"",
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}
}
Exemplo n.º 21
0
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		Logf("Pod name %s: Found %d pods out of %d", name, len(pods.Items), replicas)
		if len(pods.Items) == replicas {
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting for %d pods to come up", name, replicas)
}
Exemplo n.º 22
0
func TestEtcdWatchImageRepositories(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	filterFields := labels.SelectorFromSet(labels.Set{"ID": "foo"})

	watching, err := registry.WatchImageRepositories(1, func(repo *api.ImageRepository) bool {
		fields := labels.Set{
			"ID": repo.ID,
		}
		return filterFields.Matches(fields)
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	repo := &api.ImageRepository{JSONBase: kubeapi.JSONBase{ID: "foo"}}
	repoBytes, _ := runtime.Codec.Encode(repo)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "set",
		Node: &etcd.Node{
			Value: string(repoBytes),
		},
	}

	event := <-watching.ResultChan()
	if e, a := watch.Added, event.Type; e != a {
		t.Errorf("Expected %v, got %v", e, a)
	}
	if e, a := repo, event.Object; !reflect.DeepEqual(e, a) {
		t.Errorf("Expected %v, got %v", e, a)
	}

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}

	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
Exemplo n.º 23
0
// ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive.
func ClusterLevelLoggingWithKibana(f *Framework) {
	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 2 * time.Minute

	// Check for the existence of the Kibana service.
	By("Checking the Kibana service exists.")
	s := f.Client.Services(api.NamespaceSystem)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("kibana-logging"); err == nil {
			break
		}
		Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Kibana pod(s) to enter the running state.
	By("Checking to make sure the Kibana pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
	pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we get a response from the Kibana UI.")
	err = nil
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		// Query against the root URL for Kibana.
		_, err = f.Client.Get().
			Namespace(api.NamespaceSystem).
			Prefix("proxy").
			Resource("services").
			Name("kibana-logging").
			DoRaw()
		if err != nil {
			Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
}
Exemplo n.º 24
0
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
	var maxCount int
	var pods []api.Pod
	var err error

	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err = podLister.List(selector)
		if err != nil {
			return nil, err
		}
	}

	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	counts := map[string]int{}
	if len(pods) > 0 {
		for _, pod := range pods {
			counts[pod.Status.Host]++
			// Compute the maximum number of pods hosted on any minion
			if counts[pod.Status.Host] > maxCount {
				maxCount = counts[pod.Status.Host]
			}
		}
	}

	result := []HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for _, minion := range minions.Items {
		// initializing to the default/max minion score of 10
		fScore := float32(10)
		if maxCount > 0 {
			fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
		}
		result = append(result, HostPriority{host: minion.Name, score: int(fScore)})
	}
	return result, nil
}
Exemplo n.º 25
0
// Get all replication controllers whose selectors would match a given set of
// labels.
// TODO Move this to pkg/client and ideally implement it server-side (instead
// of getting all RC's and searching through them manually).
func getReplicationControllersForLabels(c client.ReplicationControllerInterface, labelsToMatch labels.Labels) ([]api.ReplicationController, error) {
	// Get all replication controllers.
	// TODO this needs a namespace scope as argument
	rcs, err := c.List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error getting replication controllers: %v", err)
	}

	// Find the ones that match labelsToMatch.
	var matchingRCs []api.ReplicationController
	for _, controller := range rcs.Items {
		selector := labels.SelectorFromSet(controller.Spec.Selector)
		if selector.Matches(labelsToMatch) {
			matchingRCs = append(matchingRCs, controller)
		}
	}
	return matchingRCs, nil
}
Exemplo n.º 26
0
func TestEtcdListNodesMatch(t *testing.T) {
	ctx := api.NewContext()
	storage, fakeClient := newStorage(t)
	key := storage.KeyRootFunc(ctx)
	key = etcdtest.AddPrefix(key)
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.Node{
							ObjectMeta: api.ObjectMeta{Name: "foo",
								Labels: map[string]string{
									"name": "foo",
								},
							},
						}),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.Node{
							ObjectMeta: api.ObjectMeta{Name: "bar",
								Labels: map[string]string{
									"name": "bar",
								},
							},
						}),
					},
				},
			},
		},
		E: nil,
	}
	label := labels.SelectorFromSet(labels.Set{"name": "bar"})
	nodesObj, err := storage.List(ctx, label, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	nodes := nodesObj.(*api.NodeList)
	if len(nodes.Items) != 1 || nodes.Items[0].Name != "bar" {
		t.Errorf("Unexpected nodes list: %#v", nodes)
	}
}
Exemplo n.º 27
0
func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) {
	var pods *api.PodList
	var err error
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		pods, err = c.Pods(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})), fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		if len(pods.Items) > 0 {
			break
		}
	}
	if pods == nil || len(pods.Items) == 0 {
		Failf("No pods found")
	}
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(c, pod.Name, ns)
		Expect(err).NotTo(HaveOccurred())
		fn(pod)
	}
}
Exemplo n.º 28
0
func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) {
	rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything())
	if err != nil {
		return
	}
	for _, pod := range rcPods.Items {
		switch pod.Status.Phase {
		case api.PodRunning:
			running++
		case api.PodPending:
			waiting++
		case api.PodSucceeded:
			succeeded++
		case api.PodFailed:
			failed++
		}
	}
	return
}
Exemplo n.º 29
0
func getPodStatusForDeployment(deployment *kapi.ReplicationController, client deploymentDescriberClient) (running, waiting, succeeded, failed int, err error) {
	rcPods, err := client.listPods(deployment.Namespace, labels.SelectorFromSet(deployment.Spec.Selector))
	if err != nil {
		return
	}
	for _, pod := range rcPods.Items {
		switch pod.Status.Phase {
		case kapi.PodRunning:
			running++
		case kapi.PodPending:
			waiting++
		case kapi.PodSucceeded:
			succeeded++
		case kapi.PodFailed:
			failed++
		}
	}
	return
}
Exemplo n.º 30
0
func TestEtcdWatchControllersMatch(t *testing.T) {
	ctx := api.WithNamespace(api.NewDefaultContext(), validController.Namespace)
	storage, fakeClient := newStorage(t)
	fakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, "/registry/pods"))

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(validController.Spec.Selector),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	// The watcher above is waiting for these Labels, on receiving them it should
	// apply the ControllerStatus decorator, which lists pods, causing a query against
	// the /registry/pods endpoint of the etcd client.
	controller := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:      "foo",
			Labels:    validController.Spec.Selector,
			Namespace: "default",
		},
	}
	controllerBytes, _ := latest.Codec.Encode(controller)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(controllerBytes),
		},
	}
	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	case <-time.After(time.Millisecond * 100):
		t.Error("unexpected timeout from result channel")
	}
	watching.Stop()
}