Пример #1
0
func (d *DeploymentController) getTargetedRCs(deployment *experimental.Deployment) ([]api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := d.client.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	targetedRCs := map[string]api.ReplicationController{}
	rcList, err := d.client.ReplicationControllers(namespace).List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				targetedRCs[rc.ObjectMeta.Name] = rc
				continue
			}
		}
	}
	requiredRCs := []api.ReplicationController{}
	for _, value := range targetedRCs {
		requiredRCs = append(requiredRCs, value)
	}
	return requiredRCs, nil
}
Пример #2
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	newRCTemplate := GetNewRCTemplate(deployment)
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	for _, value := range oldRCs {
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}
Пример #3
0
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller.
func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) {
	rcs, err := rcClient.List(api.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error getting replication controllers: %v", err)
	}
	var matchingRCs []api.ReplicationController
	rcLabels := labels.Set(rc.Spec.Selector)
	for _, controller := range rcs.Items {
		newRCLabels := labels.Set(controller.Spec.Selector)
		if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) {
			matchingRCs = append(matchingRCs, controller)
		}
	}
	return matchingRCs, nil
}
Пример #4
0
// Gets events associated to pods in replication controller.
func GetReplicationControllerPodsEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
	error) {
	replicationController, err := client.ReplicationControllers(namespace).Get(replicationControllerName)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(api.ListOptions{
		LabelSelector: labels.SelectorFromSet(replicationController.Spec.Selector),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	events, err := GetPodsEvents(client, pods)

	if err != nil {
		return nil, err
	}

	return events, nil
}
Пример #5
0
// NewNodeProcurement returns a Procurement that checks whether the given pod task and offer
// have valid node informations available and wehther the pod spec node selector matches
// the pod labels.
// If the check is successful the slave ID and assigned slave is set in the given Spec.
func NewNodeProcurement() Procurement {
	return ProcurementFunc(func(t *T, n *api.Node, ps *ProcureState) error {
		// if the user has specified a target host, make sure this offer is for that host
		if t.Pod.Spec.NodeName != "" && ps.offer.GetHostname() != t.Pod.Spec.NodeName {
			return fmt.Errorf(
				"NodeName %q does not match offer hostname %q",
				t.Pod.Spec.NodeName, ps.offer.GetHostname(),
			)
		}

		// check the NodeSelector
		if len(t.Pod.Spec.NodeSelector) > 0 {
			// *api.Node is optional for procurement
			if n == nil || n.Labels == nil {
				return fmt.Errorf(
					"NodeSelector %v does not match empty labels of pod %s/%s",
					t.Pod.Spec.NodeSelector, t.Pod.Namespace, t.Pod.Name,
				)
			}
			selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
			if !selector.Matches(labels.Set(n.Labels)) {
				return fmt.Errorf(
					"NodeSelector %v does not match labels %v of pod %s/%s",
					t.Pod.Spec.NodeSelector, t.Pod.Labels, t.Pod.Namespace, t.Pod.Name,
				)
			}
		}

		ps.spec.SlaveID = ps.offer.GetSlaveId().GetValue()
		ps.spec.AssignedSlave = ps.offer.GetHostname()

		return nil
	})
}
Пример #6
0
func TestFiltering(t *testing.T) {
	server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix())
	defer server.Terminate(t)
	cacher := newTestCacher(etcdStorage)
	defer cacher.Stop()

	// Ensure that the cacher is initialized, before creating any pods,
	// so that we are sure that all events will be present in cacher.
	syncWatcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", "0", storage.Everything)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	syncWatcher.Stop()

	podFoo := makeTestPod("foo")
	podFoo.Labels = map[string]string{"filter": "foo"}
	podFooFiltered := makeTestPod("foo")
	podFooPrime := makeTestPod("foo")
	podFooPrime.Labels = map[string]string{"filter": "foo"}
	podFooPrime.Spec.NodeName = "fakeNode"

	podFooNS2 := makeTestPod("foo")
	podFooNS2.Namespace += "2"
	podFooNS2.Labels = map[string]string{"filter": "foo"}

	// Create in another namespace first to make sure events from other namespaces don't get delivered
	updatePod(t, etcdStorage, podFooNS2, nil)

	fooCreated := updatePod(t, etcdStorage, podFoo, nil)
	fooFiltered := updatePod(t, etcdStorage, podFooFiltered, fooCreated)
	fooUnfiltered := updatePod(t, etcdStorage, podFoo, fooFiltered)
	_ = updatePod(t, etcdStorage, podFooPrime, fooUnfiltered)

	deleted := api.Pod{}
	if err := etcdStorage.Delete(context.TODO(), etcdtest.AddPrefix("pods/ns/foo"), &deleted, nil); err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// Set up Watch for object "podFoo" with label filter set.
	selector := labels.SelectorFromSet(labels.Set{"filter": "foo"})
	filterFunc := func(obj runtime.Object) bool {
		metadata, err := meta.Accessor(obj)
		if err != nil {
			t.Errorf("Unexpected error: %v", err)
			return false
		}
		return selector.Matches(labels.Set(metadata.GetLabels()))
	}
	filter := storage.NewSimpleFilter(filterFunc, storage.NoTriggerFunc)
	watcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", fooCreated.ResourceVersion, filter)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	defer watcher.Stop()

	verifyWatchEvent(t, watcher, watch.Deleted, podFooFiltered)
	verifyWatchEvent(t, watcher, watch.Added, podFoo)
	verifyWatchEvent(t, watcher, watch.Modified, podFooPrime)
	verifyWatchEvent(t, watcher, watch.Deleted, podFooPrime)
}
Пример #7
0
func reportLogsFromFluentdPod(f *framework.Framework) error {
	synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
	if err != nil {
		return fmt.Errorf("Failed to get synth logger pod due to %v", err)
	}

	synthLoggerNodeName := synthLoggerPod.Spec.NodeName
	if synthLoggerNodeName == "" {
		return errors.New("Synthlogger pod is not assigned to the node")
	}

	label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
	options := v1.ListOptions{LabelSelector: label.String()}
	fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)

	for _, fluentdPod := range fluentdPods.Items {
		if fluentdPod.Spec.NodeName == synthLoggerNodeName {
			containerName := fluentdPod.Spec.Containers[0].Name
			logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
			if err != nil {
				return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
			}
			framework.Logf("Logs from fluentd pod %s:\n%s", fluentdPod.Name, logs)
			return nil
		}
	}

	return fmt.Errorf("Failed to find fluentd pod running on node %s", synthLoggerNodeName)
}
Пример #8
0
// GetPodsEvents gets pods events associated to resource targeted by given resource selector.
func GetPodsEvents(client client.Interface, namespace string, resourceSelector map[string]string) (
	[]api.Event, error) {

	channels := &common.ResourceChannels{
		PodList: common.GetPodListChannelWithOptions(
			client,
			common.NewSameNamespaceQuery(namespace),
			api.ListOptions{
				LabelSelector: labels.SelectorFromSet(resourceSelector),
				FieldSelector: fields.Everything(),
			},
			1),
		EventList: common.GetEventListChannel(client, common.NewSameNamespaceQuery(namespace), 1),
	}

	podList := <-channels.PodList.List
	if err := <-channels.PodList.Error; err != nil {
		return nil, err
	}

	eventList := <-channels.EventList.List
	if err := <-channels.EventList.Error; err != nil {
		return nil, err
	}

	events := filterEventsByPodsUID(eventList.Items, podList.Items)

	return events, nil
}
Пример #9
0
// GetServicePods gets list of pods targeted by given label selector in given namespace.
func GetServicePods(client k8sClient.Interface, heapsterClient client.HeapsterClient, namespace,
	name string, dsQuery *dataselect.DataSelectQuery) (*pod.PodList, error) {

	service, err := client.Core().Services(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	labelSelector := labels.SelectorFromSet(service.Spec.Selector)
	channels := &common.ResourceChannels{
		PodList: common.GetPodListChannelWithOptions(client,
			common.NewSameNamespaceQuery(namespace),
			api.ListOptions{
				LabelSelector: labelSelector,
				FieldSelector: fields.Everything(),
			},
			1),
	}

	apiPodList := <-channels.PodList.List
	if err := <-channels.PodList.Error; err != nil {
		return nil, err
	}

	podList := pod.CreatePodList(apiPodList.Items, dsQuery, heapsterClient)
	return &podList, nil
}
Пример #10
0
func (c *kregistry) GetService(name string) (*registry.Service, error) {
	c.mtx.RLock()
	svc, ok := c.services[name]
	c.mtx.RUnlock()

	if ok {
		return svc, nil
	}

	selector := labels.SelectorFromSet(labels.Set{"name": name})

	services, err := c.client.Services(c.namespace).List(selector, fields.Everything())
	if err != nil {
		return nil, err
	}

	if len(services.Items) == 0 {
		return nil, fmt.Errorf("Service not found")
	}

	ks := &registry.Service{
		Name: name,
	}

	for _, item := range services.Items {
		ks.Nodes = append(ks.Nodes, &registry.Node{
			Address: item.Spec.ClusterIP,
			Port:    item.Spec.Ports[0].Port,
		})
	}

	return ks, nil
}
Пример #11
0
// GetCustomMetric returns the average value of the given custom metric from the
// pods picked using the namespace and selector passed as arguments.
func (h *HeapsterMetricsClient) GetCustomMetric(customMetricName string, namespace string, selector map[string]string) (*float64, time.Time, error) {
	metricSpec := getHeapsterCustomMetricDefinition(customMetricName)

	labelSelector := labels.SelectorFromSet(labels.Set(selector))
	podList, err := h.client.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labelSelector})

	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
	}
	podNames := []string{}
	for _, pod := range podList.Items {
		if pod.Status.Phase == api.PodPending {
			// Skip pending pods.
			continue
		}
		podNames = append(podNames, pod.Name)
	}
	if len(podNames) == 0 && len(podList.Items) > 0 {
		return nil, time.Time{}, fmt.Errorf("no running pods")
	}

	value, timestamp, err := h.getForPods(metricSpec, namespace, podNames)
	if err != nil {
		return nil, time.Time{}, err
	}
	return &value.floatValue, timestamp, nil
}
Пример #12
0
// get all pod services
func (cluster *CouchdbCluster) GetAllPodServices() (*[]api.Service, error) {
	// service special label
	serviceLabels := make(map[string]string)
	for k, v := range cluster.Labels {
		serviceLabels[k] = v
	}
	// add special pod-service label
	serviceLabels[LABEL_POD_SERVICE] = "true"

	// list options
	listOptions := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(serviceLabels))}

	// get a new kube client
	c, err := KubeClient(KUBE_API)
	// check for errors
	if err != nil {
		ErrorLog("spawner_rc: get all pod Service: Cannot connect to Kubernetes api ")
		ErrorLog(err)
		return nil, err
	} else {
		//  get list service
		svcList, err := c.Services(cluster.Namespace).List(listOptions)
		if err != nil {
			ErrorLog("spawner_rc: get all pod Service: get svc list fail ")
			ErrorLog(err)
			return nil, err
		}
		// fine, no errors
		return &svcList.Items, nil
	}
}
Пример #13
0
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	timeout := 2 * time.Minute
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		created := []api.Pod{}
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp != nil {
				continue
			}
			created = append(created, pod)
		}
		Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)

		if len(created) == replicas {
			pods.Items = created
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func TestToLabelSelector(t *testing.T) {
	selector, _ := unversioned.LabelSelectorAsSelector(
		&unversioned.LabelSelector{MatchLabels: map[string]string{"app": "test"}})

	cases := []struct {
		selector map[string]string
		expected labels.Selector
	}{
		{
			map[string]string{},
			labels.SelectorFromSet(nil),
		},
		{
			map[string]string{"app": "test"},
			selector,
		},
	}

	for _, c := range cases {
		actual, _ := toLabelSelector(c.selector)
		if !reflect.DeepEqual(actual, c.expected) {
			t.Errorf("toLabelSelector(%#v) == \n%#v\nexpected \n%#v\n",
				c.selector, actual, c.expected)
		}
	}
}
Пример #15
0
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
	pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
	// no pod to start
	if replicas < 1 {
		panic("StartPods: number of replicas must be non-zero")
	}
	startPodsID := string(uuid.NewUUID()) // So that we can label and find them
	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.ObjectMeta.Labels["startPodsID"] = startPodsID
		pod.Spec.Containers[0].Name = podName
		_, err := c.Core().Pods(namespace).Create(&pod)
		if err != nil {
			return err
		}
	}
	logFunc("Waiting for running...")
	if waitForRunning {
		label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
		err := WaitForPodsWithLabelRunning(c, namespace, label)
		if err != nil {
			return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
		}
	}
	return nil
}
Пример #16
0
func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) {
	pods := []*api.Pod{}
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		selector := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
		options := api.ListOptions{LabelSelector: selector}
		podList, err := c.Pods(ns).List(options)
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range podList.Items {
			if pod.Status.Phase == api.PodPending || pod.Status.Phase == api.PodRunning {
				pods = append(pods, &pod)
			}
		}
		if len(pods) > 0 {
			break
		}
	}
	if pods == nil || len(pods) == 0 {
		Failf("No pods found")
	}
	for _, pod := range pods {
		err := waitForPodRunningInNamespace(c, pod.Name, ns)
		Expect(err).NotTo(HaveOccurred())
		fn(*pod)
	}
}
Пример #17
0
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()
	testLabels := map[string]string{"key": "value"}

	foo3 := copyOrDie(obj)
	t.setObjectMeta(foo3, "foo3")
	foo4 := copyOrDie(obj)
	foo4Meta := t.getObjectMetaOrFail(foo4)
	foo4Meta.Name = "foo4"
	foo4Meta.Namespace = api.NamespaceValue(ctx)
	foo4Meta.Labels = testLabels

	objs := ([]runtime.Object{foo3, foo4})

	assignFn(objs)
	filtered := []runtime.Object{objs[1]}

	selector := labels.SelectorFromSet(labels.Set(testLabels))
	options := &api.ListOptions{LabelSelector: selector}
	listObj, err := t.storage.(rest.Lister).List(ctx, options)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(filtered) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(filtered, items) {
		t.Errorf("expected: %#v, got: %#v", filtered, items)
	}
}
Пример #18
0
func (d *ClusterRegistry) getRegistryPods(service *kapi.Service, r types.DiagnosticResult) []*kapi.Pod {
	runningPods := []*kapi.Pod{}
	pods, err := d.KubeClient.Pods(kapi.NamespaceDefault).List(labels.SelectorFromSet(service.Spec.Selector), fields.Everything())
	if err != nil {
		r.Error("DClu1005", err, fmt.Sprintf("Finding pods for '%s' service failed. This should never happen. Error: (%T) %[2]v", registryName, err))
		return runningPods
	} else if len(pods.Items) < 1 {
		r.Error("DClu1006", nil, fmt.Sprintf(clRegNoPods, registryName))
		return runningPods
	} else if len(pods.Items) > 1 {
		// multiple registry pods using EmptyDir will be inconsistent
		for _, volume := range pods.Items[0].Spec.Volumes {
			if volume.Name == registryVolume && volume.EmptyDir != nil {
				r.Error("DClu1007", nil, fmt.Sprintf(clRegMultiPods, registryName))
				break
			}
		}
	}
	for _, pod := range pods.Items {
		r.Debug("DClu1008", fmt.Sprintf("Found %s pod with name %s", registryName, pod.ObjectMeta.Name))
		if pod.Status.Phase != kapi.PodRunning {
			r.Warn("DClu1009", nil, fmt.Sprintf(clRegPodDown, pod.ObjectMeta.Name, registryName))
		} else {
			runningPods = append(runningPods, &pod)
			// Check the logs for that pod for common issues (credentials, DNS resolution failure)
			d.checkRegistryLogs(&pod, r)
		}
	}
	return runningPods
}
Пример #19
0
func TestScaleGet(t *testing.T) {
	storage, server := newStorage(t)
	defer server.Terminate(t)

	ctx := api.WithNamespace(api.NewContext(), namespace)
	rc, err := createController(storage.Controller, *validController, t)
	if err != nil {
		t.Fatalf("error setting new replication controller %v: %v", *validController, err)
	}

	want := &autoscaling.Scale{
		ObjectMeta: api.ObjectMeta{
			Name:              name,
			Namespace:         namespace,
			UID:               rc.UID,
			ResourceVersion:   rc.ResourceVersion,
			CreationTimestamp: rc.CreationTimestamp,
		},
		Spec: autoscaling.ScaleSpec{
			Replicas: validController.Spec.Replicas,
		},
		Status: autoscaling.ScaleStatus{
			Replicas: validController.Status.Replicas,
			Selector: labels.SelectorFromSet(validController.Spec.Template.Labels).String(),
		},
	}
	obj, err := storage.Scale.Get(ctx, name)
	if err != nil {
		t.Fatalf("error fetching scale for %s: %v", name, err)
	}
	got := obj.(*autoscaling.Scale)
	if !api.Semantic.DeepEqual(want, got) {
		t.Errorf("unexpected scale: %s", util.ObjectDiff(want, got))
	}
}
Пример #20
0
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
	if len(pod.Spec.NodeSelector) == 0 {
		return true
	}
	selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
	return selector.Matches(labels.Set(node.Labels))
}
Пример #21
0
func TestEtcdWatchNodesMatch(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	node := validNewNode()

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": node.Name}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	nodeBytes, _ := testapi.Codec().Encode(node)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(nodeBytes),
		},
	}
	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	case <-time.After(time.Millisecond * 100):
		t.Error("unexpected timeout from result channel")
	}
	watching.Stop()
}
Пример #22
0
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()
	testLabels := map[string]string{"key": "value"}

	foo1 := copyOrDie(obj)
	t.setObjectMeta(foo1, "foo1")
	foo2 := copyOrDie(obj)
	foo2Meta := t.getObjectMetaOrFail(foo2)
	foo2Meta.Name = "foo2"
	foo2Meta.Namespace = api.NamespaceValue(ctx)
	foo2Meta.Labels = testLabels

	existing := assignFn([]runtime.Object{foo1, foo2})
	filtered := []runtime.Object{existing[1]}

	selector := labels.SelectorFromSet(labels.Set(testLabels))
	listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(filtered) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(filtered, items) {
		t.Errorf("expected: %#v, got: %#v", filtered, items)
	}
}
Пример #23
0
// tries get ReplicationControllers from kubernetes with specified cluster tag
// @return *[]api.ReplicationController - found rc array, return nil if rc  was not found
// @return error - any error that occurs during fetching rc
func (cluster *CouchdbCluster) GetReplicationControllers() (*[]api.ReplicationController, error) {
	// get kube extensions api
	c, err := KubeClient(KUBE_API)
	// check for errors
	if err != nil {
		ErrorLog("spawner_rc: getReplicationControllers: Cannot connect to Kubernetes api ")
		ErrorLog(err)

		return nil, err
	} else {
		// list options
		listOptions := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(cluster.Labels))}
		// get all deployments for this user
		rcList, err := c.ReplicationControllers(cluster.Namespace).List(listOptions)
		if err != nil {
			ErrorLog("spawner_rc: getReplicationControllers:  getReplicationCOntrollers list error ")
			ErrorLog(err)

			return nil, err
		}
		return &rcList.Items, nil
	}
	// nothing matches, return fail
	return nil, errors.New("spawner_rc: deployment not found")
}
Пример #24
0
// geHTTPProbe returns the http readiness probe from the first container
// that matches targetPort, from the set of pods matching the given labels.
func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntOrString) (*api.Probe, error) {
	// Lookup any container with a matching targetPort from the set of pods
	// with a matching label selector.
	pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l)))
	if err != nil {
		return nil, err
	}

	// If multiple endpoints have different health checks, take the first
	sort.Sort(PodsByCreationTimestamp(pl))

	for _, pod := range pl {
		logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
		for _, c := range pod.Spec.Containers {
			if !isSimpleHTTPProbe(c.ReadinessProbe) {
				continue
			}
			for _, p := range c.Ports {
				cPort := intstr.IntOrString{IntVal: p.ContainerPort, StrVal: p.Name}
				if isPortEqual(cPort, targetPort) {
					if isPortEqual(c.ReadinessProbe.Handler.HTTPGet.Port, targetPort) {
						return c.ReadinessProbe, nil
					}
					glog.Infof("%v: found matching targetPort on container %v, but not on readinessProbe (%+v)",
						logStr, c.Name, c.ReadinessProbe.Handler.HTTPGet.Port)
				}
			}
		}
		glog.V(4).Infof("%v: lacks a matching HTTP probe for use in health checks.", logStr)
	}
	return nil, nil
}
Пример #25
0
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, err error) {
	podList, err := h.client.Pods(namespace).
		List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything())

	if err != nil {
		return nil, nil, fmt.Errorf("failed to get pod list: %v", err)
	}
	podNames := []string{}
	sum := resource.MustParse("0")
	missing := false
	for _, pod := range podList.Items {
		podNames = append(podNames, pod.Name)
		for _, container := range pod.Spec.Containers {
			containerRequest := container.Resources.Requests[resourceName]
			if containerRequest.Amount != nil {
				sum.Add(containerRequest)
			} else {
				missing = true
			}
		}
	}
	if missing || sum.Cmp(resource.MustParse("0")) == 0 {
		return nil, nil, fmt.Errorf("some pods do not have request for %s", resourceName)
	}
	glog.Infof("Sum of %s requested: %v", resourceName, sum)
	avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items))))
	request = &avg
	consumption, err = h.getForPods(resourceName, namespace, podNames)
	if err != nil {
		return nil, nil, err
	}
	return consumption, request, nil
}
Пример #26
0
func TestEtcdWatchNodesNotMatch(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	node := validNewNode()

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": "bar"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	nodeBytes, _ := latest.Codec.Encode(node)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(nodeBytes),
		},
	}

	select {
	case <-watching.ResultChan():
		t.Error("unexpected result from result channel")
	case <-time.After(time.Millisecond * 100):
		// expected case
	}
}
Пример #27
0
func (f *factory) LogsForObject(object, options runtime.Object) (*restclient.Request, error) {
	clientset, err := f.clients.ClientSetForVersion(nil)
	if err != nil {
		return nil, err
	}

	switch t := object.(type) {
	case *api.Pod:
		opts, ok := options.(*api.PodLogOptions)
		if !ok {
			return nil, errors.New("provided options object is not a PodLogOptions")
		}
		return clientset.Core().Pods(t.Namespace).GetLogs(t.Name, opts), nil

	case *api.ReplicationController:
		opts, ok := options.(*api.PodLogOptions)
		if !ok {
			return nil, errors.New("provided options object is not a PodLogOptions")
		}
		selector := labels.SelectorFromSet(t.Spec.Selector)
		sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
		pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
		if err != nil {
			return nil, err
		}
		if numPods > 1 {
			fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
		}

		return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

	case *extensions.ReplicaSet:
		opts, ok := options.(*api.PodLogOptions)
		if !ok {
			return nil, errors.New("provided options object is not a PodLogOptions")
		}
		selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
		if err != nil {
			return nil, fmt.Errorf("invalid label selector: %v", err)
		}
		sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
		pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
		if err != nil {
			return nil, err
		}
		if numPods > 1 {
			fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
		}

		return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

	default:
		gvks, _, err := api.Scheme.ObjectKinds(object)
		if err != nil {
			return nil, err
		}
		return nil, fmt.Errorf("cannot get the logs from %v", gvks[0])
	}
}
Пример #28
0
func (t *T) AcceptOffer(offer *mesos.Offer) bool {
	if offer == nil {
		return false
	}

	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}

	// check ports
	if _, err := t.mapper.Generate(t, offer); err != nil {
		log.V(3).Info(err)
		return false
	}

	// find offered cpu and mem
	var (
		offeredCpus mresource.CPUShares
		offeredMem  mresource.MegaBytes
	)
	for _, resource := range offer.Resources {
		if resource.GetName() == "cpus" {
			offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
		}

		if resource.GetName() == "mem" {
			offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
		}
	}

	// calculate cpu and mem sum over all containers of the pod
	// TODO (@sttts): also support pod.spec.resources.limit.request
	// TODO (@sttts): take into account the executor resources
	cpu := mresource.PodCPULimit(&t.Pod)
	mem := mresource.PodMemLimit(&t.Pod)
	log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
	if (cpu > offeredCpus) || (mem > offeredMem) {
		log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
		return false
	}

	return true
}
Пример #29
0
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
	var nsServicePods []*api.Pod
	if services, err := s.serviceLister.GetPodServices(pod); err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err := s.podLister.List(selector)
		if err != nil {
			return nil, err
		}
		// consider only the pods that belong to the same namespace
		for _, nsPod := range pods {
			if nsPod.Namespace == pod.Namespace {
				nsServicePods = append(nsServicePods, nsPod)
			}
		}
	}

	// separate out the nodes that have the label from the ones that don't
	otherNodes := []string{}
	labeledNodes := map[string]string{}
	for _, node := range nodes {
		if labels.Set(node.Labels).Has(s.label) {
			label := labels.Set(node.Labels).Get(s.label)
			labeledNodes[node.Name] = label
		} else {
			otherNodes = append(otherNodes, node.Name)
		}
	}

	podCounts := map[string]int{}
	for _, pod := range nsServicePods {
		label, exists := labeledNodes[pod.Spec.NodeName]
		if !exists {
			continue
		}
		podCounts[label]++
	}

	numServicePods := len(nsServicePods)
	result := []schedulerapi.HostPriority{}
	//score int - scale of 0-maxPriority
	// 0 being the lowest priority and maxPriority being the highest
	for node := range labeledNodes {
		// initializing to the default/max node score of maxPriority
		fScore := float32(maxPriority)
		if numServicePods > 0 {
			fScore = maxPriority * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods))
		}
		result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)})
	}
	// add the open nodes with a score of 0
	for _, node := range otherNodes {
		result = append(result, schedulerapi.HostPriority{Host: node, Score: 0})
	}

	return result, nil
}
Пример #30
0
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n api.Node) api.PodSpec, count int, block bool) (error, *api.Service) {
	var err error = nil
	theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
	f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
	if block {
		err = WaitForPodsWithLabelRunning(f.Client, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
	}
	return err, theService
}