Пример #1
0
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
	if len(pod.Spec.NodeSelector) == 0 {
		return true
	}
	selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
	return selector.Matches(labels.Set(node.Labels))
}
Пример #2
0
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig) {
	defer GinkgoRecover()
	defer wg.Done()
	resizingTime := 3 * time.Minute

	sleepUpTo(resizingTime)
	newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
	expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize),
		fmt.Sprintf("scaling rc %s for the first time", config.Name))
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
	_, err := config.Client.Pods(config.Namespace).List(selector, fields.Everything())
	expectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
}
Пример #3
0
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
	var maxCount int
	var nsServicePods []*api.Pod

	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err := podLister.List(selector)
		if err != nil {
			return nil, err
		}
		// consider only the pods that belong to the same namespace
		for _, nsPod := range pods {
			if nsPod.Namespace == pod.Namespace {
				nsServicePods = append(nsServicePods, nsPod)
			}
		}
	}

	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	counts := map[string]int{}
	if len(nsServicePods) > 0 {
		for _, pod := range nsServicePods {
			counts[pod.Spec.NodeName]++
			// Compute the maximum number of pods hosted on any minion
			if counts[pod.Spec.NodeName] > maxCount {
				maxCount = counts[pod.Spec.NodeName]
			}
		}
	}

	result := []algorithm.HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for _, minion := range minions.Items {
		// initializing to the default/max minion score of 10
		fScore := float32(10)
		if maxCount > 0 {
			fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
		}
		result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
	}
	return result, nil
}
Пример #4
0
// Get all replication controllers whose selectors would match a given set of
// labels.
// TODO Move this to pkg/client and ideally implement it server-side (instead
// of getting all RC's and searching through them manually).
func getReplicationControllersForLabels(c client.ReplicationControllerInterface, labelsToMatch labels.Labels) ([]api.ReplicationController, error) {
	// Get all replication controllers.
	// TODO this needs a namespace scope as argument
	rcs, err := c.List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error getting replication controllers: %v", err)
	}

	// Find the ones that match labelsToMatch.
	var matchingRCs []api.ReplicationController
	for _, controller := range rcs.Items {
		selector := labels.SelectorFromSet(controller.Spec.Selector)
		if selector.Matches(labelsToMatch) {
			matchingRCs = append(matchingRCs, controller)
		}
	}
	return matchingRCs, nil
}
Пример #5
0
func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) {
	rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything())
	if err != nil {
		return
	}
	for _, pod := range rcPods.Items {
		switch pod.Status.Phase {
		case api.PodRunning:
			running++
		case api.PodPending:
			waiting++
		case api.PodSucceeded:
			succeeded++
		case api.PodFailed:
			failed++
		}
	}
	return
}
Пример #6
0
func TestEtcdWatchControllersMatch(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistryWithPods(fakeClient)
	path := etcdgeneric.NamespaceKeyRootFunc(ctx, "/pods")
	path = etcdtest.AddPrefix(path)
	fakeClient.ExpectNotFoundGet(path)
	watching, err := registry.WatchControllers(ctx,
		labels.SelectorFromSet(labels.Set{"name": "foo"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	controller := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: "foo",
			Labels: map[string]string{
				"name": "foo",
			},
		},
	}
	controllerBytes, _ := latest.Codec.Encode(controller)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(controllerBytes),
		},
	}
	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	case <-time.After(time.Millisecond * 100):
		t.Error("unexpected timeout from result channel")
	}
	watching.Stop()
}
Пример #7
0
func TestEtcdWatchControllersMatch(t *testing.T) {
	ctx := api.WithNamespace(api.NewDefaultContext(), validController.Namespace)
	storage, fakeClient := newStorage(t)
	fakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, "/registry/pods"))

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(validController.Spec.Selector),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	// The watcher above is waiting for these Labels, on receiving them it should
	// apply the ControllerStatus decorator, which lists pods, causing a query against
	// the /registry/pods endpoint of the etcd client.
	controller := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:      "foo",
			Labels:    validController.Spec.Selector,
			Namespace: "default",
		},
	}
	controllerBytes, _ := latest.Codec.Encode(controller)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(controllerBytes),
		},
	}
	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	case <-time.After(time.Millisecond * 100):
		t.Error("unexpected timeout from result channel")
	}
	watching.Stop()
}
Пример #8
0
func TestEtcdWatchPodsMatch(t *testing.T) {
	registry, _, _, fakeClient, _ := newStorage(t)
	ctx := api.NewDefaultContext()
	watching, err := registry.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": "foo"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: "foo",
			Labels: map[string]string{
				"name": "foo",
			},
		},
	}
	podBytes, _ := latest.Codec.Encode(pod)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(podBytes),
		},
	}
	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	case <-time.After(time.Millisecond * 100):
		t.Error("unexpected timeout from result channel")
	}
	watching.Stop()
}
Пример #9
0
func TestEtcdWatchControllersNotMatch(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	fakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, "/registry/pods"))

	watching, err := storage.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": "foo"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	controller := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: "bar",
			Labels: map[string]string{
				"name": "bar",
			},
		},
	}
	controllerBytes, _ := latest.Codec.Encode(controller)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(controllerBytes),
		},
	}

	select {
	case <-watching.ResultChan():
		t.Error("unexpected result from result channel")
	case <-time.After(time.Millisecond * 100):
		// expected case
	}
}
Пример #10
0
func TestEtcdListControllersLabelsMatch(t *testing.T) {
	storage, fakeClient := newStorage(t)
	ctx := api.NewDefaultContext()
	key := makeControllerListKey(ctx)
	key = etcdtest.AddPrefix(key)

	controller := validController
	controller.Labels = map[string]string{"k": "v"}
	controller.Name = "bar"

	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &validController),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, &controller),
					},
				},
			},
		},
		E: nil,
	}
	testLabels := labels.SelectorFromSet(labels.Set(controller.Labels))
	objList, err := storage.List(ctx, testLabels, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	controllers, _ := objList.(*api.ReplicationControllerList)
	if len(controllers.Items) != 1 || controllers.Items[0].Name != controller.Name ||
		!testLabels.Matches(labels.Set(controllers.Items[0].Labels)) {
		t.Errorf("Unexpected controller list: %#v for query with labels %#v",
			controllers, testLabels)
	}
}
Пример #11
0
func TestEtcdWatchResourceQuotasNotMatch(t *testing.T) {
	registry, _, fakeClient, _ := newStorage(t)
	ctx := api.NewDefaultContext()
	watching, err := registry.Watch(ctx,
		labels.SelectorFromSet(labels.Set{"name": "foo"}),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	resourcequota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Name: "bar",
			Labels: map[string]string{
				"name": "bar",
			},
		},
	}
	resourcequotaBytes, _ := latest.Codec.Encode(resourcequota)
	fakeClient.WatchResponse <- &etcd.Response{
		Action: "create",
		Node: &etcd.Node{
			Value: string(resourcequotaBytes),
		},
	}

	select {
	case <-watching.ResultChan():
		t.Error("unexpected result from result channel")
	case <-time.After(time.Millisecond * 100):
		// expected case
	}
}
Пример #12
0
func TestEtcdWatchServicesBadSelector(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	_, err := registry.WatchServices(
		ctx,
		labels.Everything(),
		fields.SelectorFromSet(fields.Set{"Field.Selector": "foo"}),
		"",
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}

	_, err = registry.WatchServices(
		ctx,
		labels.SelectorFromSet(labels.Set{"Label.Selector": "foo"}),
		fields.Everything(),
		"",
	)
	if err == nil {
		t.Errorf("unexpected non-error: %v", err)
	}
}
Пример #13
0
								}
							}
							if startTime != util.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := framework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything())
						},
						WatchFunc: func(rv string) (watch.Interface, error) {
							return c.Pods(ns).Watch(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything(), rv)
						},
					},
					&api.Pod{},
					0,
					framework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
Пример #14
0
// CheckServiceAffinity ensures that only the minions that match the specified labels are considered for scheduling.
// The set of labels to be considered are provided to the struct (ServiceAffinity).
// The pod is checked for the labels and any missing labels are then checked in the minion
// that hosts the service pods (peers) for the given pod.
//
// We add an implicit selector requiring some particular value V for label L to a pod, if:
// - L is listed in the ServiceAffinity object that is passed into the function
// - the pod does not have any NodeSelector for L
// - some other pod from the same service is already scheduled onto a minion that has value V for label L
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
	var affinitySelector labels.Selector

	// check if the pod being scheduled has the affinity labels specified in its NodeSelector
	affinityLabels := map[string]string{}
	nodeSelector := labels.Set(pod.Spec.NodeSelector)
	labelsExist := true
	for _, l := range s.labels {
		if nodeSelector.Has(l) {
			affinityLabels[l] = nodeSelector.Get(l)
		} else {
			// the current pod does not specify all the labels, look in the existing service pods
			labelsExist = false
		}
	}

	// skip looking at other pods in the service if the current pod defines all the required affinity labels
	if !labelsExist {
		services, err := s.serviceLister.GetPodServices(pod)
		if err == nil {
			// just use the first service and get the other pods within the service
			// TODO: a separate predicate can be created that tries to handle all services for the pod
			selector := labels.SelectorFromSet(services[0].Spec.Selector)
			servicePods, err := s.podLister.List(selector)
			if err != nil {
				return false, err
			}
			// consider only the pods that belong to the same namespace
			nsServicePods := []*api.Pod{}
			for _, nsPod := range servicePods {
				if nsPod.Namespace == pod.Namespace {
					nsServicePods = append(nsServicePods, nsPod)
				}
			}
			if len(nsServicePods) > 0 {
				// consider any service pod and fetch the minion its hosted on
				otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName)
				if err != nil {
					return false, err
				}
				for _, l := range s.labels {
					// If the pod being scheduled has the label value specified, do not override it
					if _, exists := affinityLabels[l]; exists {
						continue
					}
					if labels.Set(otherMinion.Labels).Has(l) {
						affinityLabels[l] = labels.Set(otherMinion.Labels).Get(l)
					}
				}
			}
		}
	}

	// if there are no existing pods in the service, consider all minions
	if len(affinityLabels) == 0 {
		affinitySelector = labels.Everything()
	} else {
		affinitySelector = labels.Set(affinityLabels).AsSelector()
	}

	minion, err := s.nodeInfo.GetNodeInfo(node)
	if err != nil {
		return false, err
	}

	// check if the minion matches the selector
	return affinitySelector.Matches(labels.Set(minion.Labels)), nil
}
Пример #15
0
						LivenessProbe: &api.Probe{
							Handler: api.Handler{
								HTTPGet: &api.HTTPGetAction{
									Path: "/index.html",
									Port: util.NewIntOrStringFromInt(8080),
								},
							},
							InitialDelaySeconds: 30,
						},
					},
				},
			},
		}

		By("setting up watch")
		pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
		if err != nil {
			Fail(fmt.Sprintf("Failed to query for pods: %v", err))
		}
		Expect(len(pods.Items)).To(Equal(0))
		w, err := podClient.Watch(
			labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything(), pods.ListMeta.ResourceVersion)
		if err != nil {
			Fail(fmt.Sprintf("Failed to set up watch: %v", err))
		}

		By("submitting the pod to qingyuan")
		// We call defer here in case there is a problem with
		// the test so we can ensure that we clean up after
		// ourselves
		defer podClient.Delete(pod.Name, nil)
Пример #16
0
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
	var nsServicePods []*api.Pod

	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		// just use the first service and get the other pods within the service
		// TODO: a separate predicate can be created that tries to handle all services for the pod
		selector := labels.SelectorFromSet(services[0].Spec.Selector)
		pods, err := podLister.List(selector)
		if err != nil {
			return nil, err
		}
		// consider only the pods that belong to the same namespace
		for _, nsPod := range pods {
			if nsPod.Namespace == pod.Namespace {
				nsServicePods = append(nsServicePods, nsPod)
			}
		}
	}

	minions, err := minionLister.List()
	if err != nil {
		return nil, err
	}

	// separate out the minions that have the label from the ones that don't
	otherMinions := []string{}
	labeledMinions := map[string]string{}
	for _, minion := range minions.Items {
		if labels.Set(minion.Labels).Has(s.label) {
			label := labels.Set(minion.Labels).Get(s.label)
			labeledMinions[minion.Name] = label
		} else {
			otherMinions = append(otherMinions, minion.Name)
		}
	}

	podCounts := map[string]int{}
	for _, pod := range nsServicePods {
		label, exists := labeledMinions[pod.Spec.NodeName]
		if !exists {
			continue
		}
		podCounts[label]++
	}

	numServicePods := len(nsServicePods)
	result := []algorithm.HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for minion := range labeledMinions {
		// initializing to the default/max minion score of 10
		fScore := float32(10)
		if numServicePods > 0 {
			fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
		}
		result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)})
	}
	// add the open minions with a score of 0
	for _, minion := range otherMinions {
		result = append(result, algorithm.HostPriority{Host: minion, Score: 0})
	}

	return result, nil
}
Пример #17
0
			},
		}

		By("submitting the pod to qingyuan")
		defer func() {
			By("deleting the pod")
			podClient.Delete(pod.Name, nil)
		}()
		if _, err := podClient.Create(pod); err != nil {
			Failf("Failed to create pod: %v", err)
		}

		expectNoError(waitForPodRunning(c, pod.Name))

		By("verifying the pod is in qingyuan")
		pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
		Expect(len(pods.Items)).To(Equal(1))

		By("retrieving the pod")
		podWithUid, err := podClient.Get(pod.Name)
		if err != nil {
			Failf("Failed to get pod: %v", err)
		}
		fmt.Printf("%+v\n", podWithUid)
		var events *api.EventList
		// Check for scheduler event about the pod.
		By("checking for scheduler event about the pod")
		expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
			events, err := c.Events(api.NamespaceDefault).List(
				labels.Everything(),
				fields.Set{
Пример #18
0
func TestHelperList(t *testing.T) {
	tests := []struct {
		Err     bool
		Req     func(*http.Request) bool
		Resp    *http.Response
		HttpErr error
	}{
		{
			HttpErr: errors.New("failure"),
			Err:     true,
		},
		{
			Resp: &http.Response{
				StatusCode: http.StatusNotFound,
				Body:       objBody(&api.Status{Status: api.StatusFailure}),
			},
			Err: true,
		},
		{
			Resp: &http.Response{
				StatusCode: http.StatusOK,
				Body: objBody(&api.PodList{
					Items: []api.Pod{{
						ObjectMeta: api.ObjectMeta{Name: "foo"},
					},
					},
				}),
			},
			Req: func(req *http.Request) bool {
				if req.Method != "GET" {
					t.Errorf("unexpected method: %#v", req)
					return false
				}
				if req.URL.Path != "/namespaces/bar" {
					t.Errorf("url doesn't contain name: %#v", req.URL)
					return false
				}
				if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != labels.SelectorFromSet(labels.Set{"foo": "baz"}).String() {
					t.Errorf("url doesn't contain query parameters: %#v", req.URL)
					return false
				}
				return true
			},
		},
	}
	for _, test := range tests {
		client := &client.FakeRESTClient{
			Codec: testapi.Codec(),
			Resp:  test.Resp,
			Err:   test.HttpErr,
		}
		modifier := &Helper{
			RESTClient:      client,
			NamespaceScoped: true,
		}
		obj, err := modifier.List("bar", testapi.Version(), labels.SelectorFromSet(labels.Set{"foo": "baz"}))
		if (err != nil) != test.Err {
			t.Errorf("unexpected error: %t %v", test.Err, err)
		}
		if err != nil {
			continue
		}
		if obj.(*api.PodList).Items[0].Name != "foo" {
			t.Errorf("unexpected object: %#v", obj)
		}
		if test.Req != nil && !test.Req(client.Req) {
			t.Errorf("unexpected request: %#v", client.Req)
		}
	}
}
Пример #19
0
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
	var err error
	// First, update the template label.  This ensures that any newly created pods will have the new label
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		if rc.Spec.Template.Labels == nil {
			rc.Spec.Template.Labels = map[string]string{}
		}
		rc.Spec.Template.Labels[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
	// TODO: extract the code from the label command and re-use it here.
	podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, err
	}
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if pod.Labels == nil {
			pod.Labels = map[string]string{
				deploymentKey: deploymentValue,
			}
		} else {
			pod.Labels[deploymentKey] = deploymentValue
		}
		err = nil
		delay := 3
		for i := 0; i < MaxRetries; i++ {
			_, err = client.Pods(namespace).Update(pod)
			if err != nil {
				fmt.Fprintf(out, "Error updating pod (%v), retrying after %d seconds", err, delay)
				time.Sleep(time.Second * time.Duration(delay))
				delay *= delay
			} else {
				break
			}
		}
		if err != nil {
			return nil, err
		}
	}

	if oldRc.Spec.Selector == nil {
		oldRc.Spec.Selector = map[string]string{}
	}
	// Copy the old selector, so that we can scrub out any orphaned pods
	selectorCopy := map[string]string{}
	for k, v := range oldRc.Spec.Selector {
		selectorCopy[k] = v
	}
	oldRc.Spec.Selector[deploymentKey] = deploymentValue

	// Update the selector of the rc so it manages all the pods we updated above
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		rc.Spec.Selector[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
	// doesn't see the update to its pod template and creates a new pod with the old labels after
	// we've finished re-adopting existing pods to the rc.
	podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything())
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
			if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil {
				return nil, err
			}
		}
	}

	return oldRc, nil
}
Пример #20
0
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *Framework) {
	// TODO: For now assume we are only testing cluster logging with Elasticsearch
	// on GCE. Once we are sure that Elasticsearch cluster level logging
	// works for other providers we should widen this scope of this test.
	if !providerIs("gce") {
		Logf("Skipping cluster level logging test for provider %s", testContext.Provider)
		return
	}

	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 2 * time.Minute
	// ingestionTimeout is how long to keep retrying to wait for all the
	// logs to be ingested.
	const ingestionTimeout = 3 * time.Minute

	// Check for the existence of the Elasticsearch service.
	By("Checking the Elasticsearch service exists.")
	s := f.Client.Services(api.NamespaceDefault)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("elasticsearch-logging"); err == nil {
			break
		}
		Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Elasticsearch pods to enter the running state.
	By("Checking to make sure the Elasticsearch pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
	pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = waitForPodRunning(f.Client, pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we are talking to an Elasticsearch service.")
	// Perform a few checks to make sure this looks like an Elasticsearch cluster.
	var statusCode float64
	var esResponse map[string]interface{}
	err = nil
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		// Query against the root URL for Elasticsearch.
		body, err := f.Client.Get().
			Namespace(api.NamespaceDefault).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			DoRaw()
		if err != nil {
			Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
			continue
		}
		esResponse, err = bodyToJSON(body)
		if err != nil {
			Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
			continue
		}
		statusIntf, ok := esResponse["status"]
		if !ok {
			Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
			continue
		}
		statusCode, ok = statusIntf.(float64)
		if !ok {
			// Assume this is a string returning Failure. Retry.
			Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
	if int(statusCode) != 200 {
		Failf("Elasticsearch cluster has a bad status: %v", statusCode)
	}
	// Check to see if have a cluster_name field.
	clusterName, ok := esResponse["cluster_name"]
	if !ok {
		Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
	}
	if clusterName != "qingyuan-logging" {
		Failf("Connected to wrong cluster %q (expecting qingyuan_logging)", clusterName)
	}

	// Now assume we really are talking to an Elasticsearch instance.
	// Check the cluster health.
	By("Checking health of Elasticsearch service.")
	var body []byte
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		body, err = f.Client.Get().
			Namespace(api.NamespaceDefault).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			Suffix("_cluster/health").
			Param("health", "pretty").
			DoRaw()
		if err == nil {
			break
		}
	}
	Expect(err).NotTo(HaveOccurred())

	health, err := bodyToJSON(body)
	Expect(err).NotTo(HaveOccurred())
	statusIntf, ok := health["status"]
	if !ok {
		Failf("No status field found in cluster health response: %v", health)
	}
	status := statusIntf.(string)
	if status != "green" && status != "yellow" {
		Failf("Cluster health has bad status: %s", status)
	}

	// Obtain a list of nodes so we can place one synthetic logger on each node.
	nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		Failf("Failed to list nodes: %v", err)
	}
	nodeCount := len(nodes.Items)
	if nodeCount == 0 {
		Failf("Failed to find any nodes")
	}
	Logf("Found %d nodes.", len(nodes.Items))

	// Filter out unhealthy nodes.
	// Previous tests may have cause failures of some nodes. Let's skip
	// 'Not Ready' nodes, just in case (there is no need to fail the test).
	filterNodes(nodes, func(node api.Node) bool {
		return isNodeReadySetAsExpected(&node, true)
	})
	if len(nodes.Items) < 2 {
		Failf("Less than two nodes were found Ready.")
	}
	Logf("Found %d healthy nodes.", len(nodes.Items))

	// Create a unique root name for the resources in this test to permit
	// parallel executions of this test.
	// Use a unique namespace for the resources created in this test.
	ns := f.Namespace.Name
	name := "synthlogger"
	// Form a unique name to taint log lines to be colelcted.
	// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
	taintName := strings.Replace(ns+name, "-", "_", -1)

	// podNames records the names of the synthetic logging pods that are created in the
	// loop below.
	var podNames []string
	// countTo is the number of log lines emitted (and checked) for each synthetic logging pod.
	const countTo = 100
	// Instantiate a synthetic logger pod on each node.
	for i, node := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", name, i)
		_, err := f.Client.Pods(ns).Create(&api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "synth-logger",
						Image: "qingyuan/ubuntu:14.04",
						// notice: the subshell syntax is escaped with `$$`
						Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$$(($i+1)); done", countTo, i, taintName, podName)},
					},
				},
				NodeName:      node.Name,
				RestartPolicy: api.RestartPolicyNever,
			},
		})
		Expect(err).NotTo(HaveOccurred())
		podNames = append(podNames, podName)
	}

	// Cleanup the pods when we are done.
	defer func() {
		for _, pod := range podNames {
			if err = f.Client.Pods(ns).Delete(pod, nil); err != nil {
				Logf("Failed to delete pod %s: %v", pod, err)
			}
		}
	}()

	// Wait for the syntehtic logging pods to finish.
	By("Waiting for the pods to succeed.")
	for _, pod := range podNames {
		err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
		Expect(err).NotTo(HaveOccurred())
	}

	// Wait a bit for the log information to make it into Elasticsearch.
	time.Sleep(30 * time.Second)

	// Make several attempts to observe the logs ingested into Elasticsearch.
	By("Checking all the log lines were ingested into Elasticsearch")
	missing := 0
	expected := nodeCount * countTo
	for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {

		// Debugging code to report the status of the elasticsearch logging endpoints.
		esPods, err := f.Client.Pods(api.NamespaceDefault).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
		if err != nil {
			Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
			continue
		} else {
			for i, pod := range esPods.Items {
				Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase,
					pod.Status.Conditions)
			}
		}

		// Ask Elasticsearch to return all the log lines that were tagged with the underscore
		// verison of the name. Ask for twice as many log lines as we expect to check for
		// duplication bugs.
		body, err = f.Client.Get().
			Namespace(api.NamespaceDefault).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			Suffix("_search").
			Param("q", fmt.Sprintf("log:%s", taintName)).
			Param("size", strconv.Itoa(2*expected)).
			DoRaw()
		if err != nil {
			Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err)
			continue
		}

		response, err := bodyToJSON(body)
		if err != nil {
			Logf("After %v failed to unmarshal response: %v", time.Since(start), err)
			Logf("Body: %s", string(body))
			continue
		}
		hits, ok := response["hits"].(map[string]interface{})
		if !ok {
			Failf("response[hits] not of the expected type: %T", response["hits"])
		}
		totalF, ok := hits["total"].(float64)
		if !ok {
			Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"])
			continue
		}
		total := int(totalF)
		if total < expected {
			Logf("After %v expecting to find %d log lines but saw only %d", time.Since(start), expected, total)
			continue
		}
		h, ok := hits["hits"].([]interface{})
		if !ok {
			Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"])
			continue
		}
		// Initialize data-structure for observing counts.
		observed := make([][]int, nodeCount)
		for i := range observed {
			observed[i] = make([]int, countTo)
		}
		// Iterate over the hits and populate the observed array.
		for _, e := range h {
			l, ok := e.(map[string]interface{})
			if !ok {
				Failf("element of hit not of expected type: %T", e)
			}
			source, ok := l["_source"].(map[string]interface{})
			if !ok {
				Failf("_source not of the expected type: %T", l["_source"])
			}
			msg, ok := source["log"].(string)
			if !ok {
				Failf("log not of the expected type: %T", source["log"])
			}
			words := strings.Split(msg, " ")
			if len(words) < 4 {
				Failf("Malformed log line: %s", msg)
			}
			n, err := strconv.ParseUint(words[0], 10, 0)
			if err != nil {
				Failf("Expecting numer of node as first field of %s", msg)
			}
			if n < 0 || int(n) >= nodeCount {
				Failf("Node count index out of range: %d", nodeCount)
			}
			index, err := strconv.ParseUint(words[2], 10, 0)
			if err != nil {
				Failf("Expecting number as third field of %s", msg)
			}
			if index < 0 || index >= countTo {
				Failf("Index value out of range: %d", index)
			}
			// Record the observation of a log line from node n at the given index.
			observed[n][index]++
		}
		// Make sure we correctly observed the expected log lines from each node.
		missing = 0
		for n := range observed {
			for i, c := range observed[n] {
				if c == 0 {
					missing++
				}
				if c < 0 || c > 1 {
					Failf("Got incorrect count for node %d index %d: %d", n, i, c)
				}
			}
		}
		if missing != 0 {
			Logf("After %v still missing %d log lines", time.Since(start), missing)
			continue
		}
		Logf("After %s found all %d log lines", time.Since(start), expected)
		return
	}
	Failf("Failed to find all %d log lines", expected)
}
Пример #21
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(c *client.Client, test string, image string) {
	ns := api.NamespaceDefault
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter qingyuan/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		By("Cleaning up the replication controller")
		rcReaper, err := qingctl.ReaperFor("ReplicationController", c)
		if err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
		if _, err = rcReaper.Stop(ns, controller.Name, 0, nil); err != nil {
			Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	listTimeout := time.Minute
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := c.Pods(ns).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	t := time.Now()
	for {
		Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
		if len(pods.Items) == replicas {
			break
		}
		if time.Since(t) > listTimeout {
			Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
				name, replicas, len(pods.Items), time.Since(t).Seconds())
		}
		time.Sleep(5 * time.Second)
		pods, err = c.Pods(ns).List(label, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
	}

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		err = waitForPodRunning(c, pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, true, pods}.checkAllResponses)
	if err != nil {
		Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}