示例#1
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(labels.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, GetNewRCTemplate(deployment)) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	for _, value := range oldRCs {
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}
示例#2
0
文件: stop.go 项目: qinguoan/vulcan
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller.
func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) {
	rcs, err := c.List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, fmt.Errorf("error getting replication controllers: %v", err)
	}
	var matchingRCs []api.ReplicationController
	rcLabels := labels.Set(rc.Spec.Selector)
	for _, controller := range rcs.Items {
		newRCLabels := labels.Set(controller.Spec.Selector)
		if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) {
			matchingRCs = append(matchingRCs, controller)
		}
	}
	return matchingRCs, nil
}
示例#3
0
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()
	testLabels := map[string]string{"key": "value"}

	foo1 := copyOrDie(obj)
	t.setObjectMeta(foo1, "foo1")
	foo2 := copyOrDie(obj)
	foo2Meta := t.getObjectMetaOrFail(foo2)
	foo2Meta.Name = "foo2"
	foo2Meta.Namespace = api.NamespaceValue(ctx)
	foo2Meta.Labels = testLabels

	existing := assignFn([]runtime.Object{foo1, foo2})
	filtered := []runtime.Object{existing[1]}

	selector := labels.SelectorFromSet(labels.Set(testLabels))
	listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(filtered) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(filtered, items) {
		t.Errorf("expected: %#v, got: %#v", filtered, items)
	}
}
示例#4
0
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
	if len(pod.Spec.NodeSelector) == 0 {
		return true
	}
	selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
	return selector.Matches(labels.Set(node.Labels))
}
示例#5
0
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	timeout := 2 * time.Minute
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		created := []api.Pod{}
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp != nil {
				continue
			}
			created = append(created, pod)
		}
		Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)

		if len(created) == replicas {
			pods.Items = created
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
示例#6
0
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, timestamp time.Time, err error) {
	podList, err := h.client.Pods(namespace).
		List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything())

	if err != nil {
		return nil, nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
	}
	podNames := []string{}
	sum := resource.MustParse("0")
	missing := false
	for _, pod := range podList.Items {
		podNames = append(podNames, pod.Name)
		for _, container := range pod.Spec.Containers {
			containerRequest := container.Resources.Requests[resourceName]
			if containerRequest.Amount != nil {
				sum.Add(containerRequest)
			} else {
				missing = true
			}
		}
	}
	if missing || sum.Cmp(resource.MustParse("0")) == 0 {
		return nil, nil, time.Time{}, fmt.Errorf("some pods do not have request for %s", resourceName)
	}
	glog.Infof("Sum of %s requested: %v", resourceName, sum)
	avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items))))
	request = &avg
	consumption, timestamp, err = h.getForPods(resourceName, namespace, podNames)
	if err != nil {
		return nil, nil, time.Time{}, err
	}
	return consumption, request, timestamp, nil
}
示例#7
0
func getPodsForRCs(c client.Interface, replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
	allPods := []api.Pod{}
	for _, rc := range replicationControllers {
		podList, err := c.Pods(rc.ObjectMeta.Namespace).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything())
		if err != nil {
			return allPods, fmt.Errorf("error listing pods: %v", err)
		}
		allPods = append(allPods, podList.Items...)
	}
	return allPods, nil
}
示例#8
0
文件: load.go 项目: qinguoan/vulcan
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig) {
	defer GinkgoRecover()
	defer wg.Done()
	resizingTime := 3 * time.Minute

	sleepUpTo(resizingTime)
	newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
	expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
		fmt.Sprintf("scaling rc %s for the first time", config.Name))
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
	_, err := config.Client.Pods(config.Namespace).List(selector, fields.Everything())
	expectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
}
示例#9
0
func (h *haproxyControllerTester) start(namespace string) (err error) {

	// Create a replication controller with the given configuration.
	rc := rcFromManifest(h.cfg)
	rc.Namespace = namespace
	rc.Spec.Template.Labels["rcName"] = rc.Name

	// Add the --namespace arg.
	// TODO: Remove this when we have proper namespace support.
	for i, c := range rc.Spec.Template.Spec.Containers {
		rc.Spec.Template.Spec.Containers[i].Args = append(
			c.Args, fmt.Sprintf("--namespace=%v", namespace))
		Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
	}

	rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc)
	if err != nil {
		return
	}
	if err = waitForRCPodsRunning(h.client, namespace, h.rcName); err != nil {
		return
	}
	h.rcName = rc.Name
	h.rcNamespace = rc.Namespace

	// Find the pods of the rc we just created.
	labelSelector := labels.SelectorFromSet(
		labels.Set(map[string]string{"rcName": h.rcName}))
	pods, err := h.client.Pods(h.rcNamespace).List(
		labelSelector, fields.Everything())
	if err != nil {
		return err
	}

	// Find the external addresses of the nodes the pods are running on.
	for _, p := range pods.Items {
		wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) {
			address, err := getHostExternalAddress(h.client, &p)
			if err != nil {
				Logf("%v", err)
				return false, nil
			}
			h.address = append(h.address, address)
			return true, nil
		})
	}
	if len(h.address) == 0 {
		return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
	}
	return nil
}
示例#10
0
文件: run.go 项目: qinguoan/vulcan
func handleAttachReplicationController(c *client.Client, controller *api.ReplicationController, opts *AttachOptions) error {
	var pods *api.PodList
	for pods == nil || len(pods.Items) == 0 {
		var err error
		if pods, err = c.Pods(controller.Namespace).List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()); err != nil {
			return err
		}
		if len(pods.Items) == 0 {
			fmt.Fprint(opts.Out, "Waiting for pod to be scheduled\n")
			time.Sleep(2 * time.Second)
		}
	}
	pod := &pods.Items[0]
	return handleAttachPod(c, pod, opts)
}
示例#11
0
文件: job.go 项目: qinguoan/vulcan
// Wait for all pods to become Running.  Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error {
	label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
	return wait.Poll(poll, jobTimeout, func() (bool, error) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return false, err
		}
		count := 0
		for _, p := range pods.Items {
			if p.Status.Phase == api.PodRunning {
				count++
			}
		}
		return count == parallelism, nil
	})
}
示例#12
0
// kubectlLogLBController logs kubectl debug output for the L7 controller pod.
func kubectlLogLBController(c *client.Client, ns string) {
	selector := labels.SelectorFromSet(labels.Set(controllerLabels))
	podList, err := c.Pods(api.NamespaceAll).List(selector, fields.Everything())
	if err != nil {
		Logf("Cannot log L7 controller output, error listing pods %v", err)
		return
	}
	if len(podList.Items) == 0 {
		Logf("Loadbalancer controller pod not found")
		return
	}
	for _, p := range podList.Items {
		Logf("\nLast 100 log lines of %v\n", p.Name)
		l, _ := runKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100")
		Logf(l)
	}
}
示例#13
0
// ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive.
func ClusterLevelLoggingWithKibana(f *Framework) {
	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 2 * time.Minute

	// Check for the existence of the Kibana service.
	By("Checking the Kibana service exists.")
	s := f.Client.Services(api.NamespaceSystem)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("kibana-logging"); err == nil {
			break
		}
		Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Kibana pod(s) to enter the running state.
	By("Checking to make sure the Kibana pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
	pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we get a response from the Kibana UI.")
	err = nil
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		// Query against the root URL for Kibana.
		_, err = f.Client.Get().
			Namespace(api.NamespaceSystem).
			Prefix("proxy").
			Resource("services").
			Name("kibana-logging").
			DoRaw()
		if err != nil {
			Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
}
示例#14
0
func forEachReplicationController(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) {
	var rcs *api.ReplicationControllerList
	var err error
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		rcs, err = c.ReplicationControllers(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})))
		Expect(err).NotTo(HaveOccurred())
		if len(rcs.Items) > 0 {
			break
		}
	}

	if rcs == nil || len(rcs.Items) == 0 {
		Failf("No replication controllers found")
	}

	for _, rc := range rcs.Items {
		fn(rc)
	}
}
示例#15
0
func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) {
	var pods *api.PodList
	var err error
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		pods, err = c.Pods(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})), fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		if len(pods.Items) > 0 {
			break
		}
	}
	if pods == nil || len(pods.Items) == 0 {
		Failf("No pods found")
	}
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(c, pod.Name, ns)
		Expect(err).NotTo(HaveOccurred())
		fn(pod)
	}
}
示例#16
0
func NodeSelectorPredicate(t *T, offer *mesos.Offer) bool {
	// if the user has specified a target host, make sure this offer is for that host
	if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
		return false
	}

	// check the NodeSelector
	if len(t.Pod.Spec.NodeSelector) > 0 {
		slaveLabels := map[string]string{}
		for _, a := range offer.Attributes {
			if a.GetType() == mesos.Value_TEXT {
				slaveLabels[a.GetName()] = a.GetText().GetValue()
			}
		}
		selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
		if !selector.Matches(labels.Set(slaveLabels)) {
			return false
		}
	}
	return true
}
示例#17
0
								}
							}
							if startTime != unversioned.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := controllerFramework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything())
						},
						WatchFunc: func(rv string) (watch.Interface, error) {
							return c.Pods(ns).Watch(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything(), rv)
						},
					},
					&api.Pod{},
					0,
					controllerFramework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
示例#18
0
		uiServiceName = "kube-ui"
		uiAppName     = uiServiceName
		uiNamespace   = api.NamespaceSystem

		serverStartTimeout = 1 * time.Minute
	)

	f := NewFramework("kube-ui")

	It("should check that the kube-ui instance is alive [Conformance]", func() {
		By("Checking the kube-ui service exists.")
		err := waitForService(f.Client, uiNamespace, uiServiceName, true, poll, serviceStartTimeout)
		Expect(err).NotTo(HaveOccurred())

		By("Checking to make sure the kube-ui pods are running")
		selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
		err = waitForPodsWithLabelRunning(f.Client, uiNamespace, selector)
		Expect(err).NotTo(HaveOccurred())

		By("Checking to make sure we get a response from the kube-ui.")
		err = wait.Poll(poll, serverStartTimeout, func() (bool, error) {
			var status int
			// Query against the proxy URL for the kube-ui service.
			err := f.Client.Get().
				Namespace(uiNamespace).
				Prefix("proxy").
				Resource("services").
				Name(uiServiceName).
				Timeout(singleCallTimeout).
				Do().
				StatusCode(&status).
示例#19
0
func TestFiltering(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	prefixedKey := etcdtest.AddPrefix("pods")
	fakeClient.ExpectNotFoundGet(prefixedKey)
	cacher := newTestCacher(fakeClient)
	fakeClient.WaitForWatchCompletion()

	podFoo := makeTestPod("foo")
	podFoo.ObjectMeta.Labels = map[string]string{"filter": "foo"}
	podFooFiltered := makeTestPod("foo")

	testCases := []struct {
		object       *api.Pod
		etcdResponse *etcd.Response
		filtered     bool
		event        watch.EventType
	}{
		{
			object: podFoo,
			etcdResponse: &etcd.Response{
				Action: "create",
				Node: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 1,
				},
			},
			filtered: true,
			event:    watch.Added,
		},
		{
			object: podFooFiltered,
			etcdResponse: &etcd.Response{
				Action: "set",
				Node: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooFiltered)),
					CreatedIndex:  1,
					ModifiedIndex: 2,
				},
				PrevNode: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 1,
				},
			},
			filtered: true,
			// Deleted, because the new object doesn't match filter.
			event: watch.Deleted,
		},
		{
			object: podFoo,
			etcdResponse: &etcd.Response{
				Action: "set",
				Node: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 3,
				},
				PrevNode: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooFiltered)),
					CreatedIndex:  1,
					ModifiedIndex: 2,
				},
			},
			filtered: true,
			// Added, because the previous object didn't match filter.
			event: watch.Added,
		},
		{
			object: podFoo,
			etcdResponse: &etcd.Response{
				Action: "set",
				Node: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 4,
				},
				PrevNode: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 3,
				},
			},
			filtered: true,
			event:    watch.Modified,
		},
		{
			object: podFoo,
			etcdResponse: &etcd.Response{
				Action: "delete",
				Node: &etcd.Node{
					CreatedIndex:  1,
					ModifiedIndex: 5,
				},
				PrevNode: &etcd.Node{
					Value:         string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)),
					CreatedIndex:  1,
					ModifiedIndex: 4,
				},
			},
			filtered: true,
			event:    watch.Deleted,
		},
	}

	// Set up Watch for object "podFoo" with label filter set.
	selector := labels.SelectorFromSet(labels.Set{"filter": "foo"})
	filter := func(obj runtime.Object) bool {
		metadata, err := meta.Accessor(obj)
		if err != nil {
			t.Errorf("unexpected error: %v", err)
			return false
		}
		return selector.Matches(labels.Set(metadata.Labels()))
	}
	watcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", 1, filter)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	for _, test := range testCases {
		fakeClient.WatchResponse <- test.etcdResponse
		if test.filtered {
			event := <-watcher.ResultChan()
			if e, a := test.event, event.Type; e != a {
				t.Errorf("%v %v", e, a)
			}
			// unset fields that are set by the infrastructure
			obj := event.Object.(*api.Pod)
			obj.ObjectMeta.ResourceVersion = ""
			obj.ObjectMeta.CreationTimestamp = unversioned.Time{}
			if e, a := test.object, obj; !reflect.DeepEqual(e, a) {
				t.Errorf("expected: %#v, got: %#v", e, a)
			}
		}
	}

	close(fakeClient.WatchResponse)
}
示例#20
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags

	generators := map[string]kubectl.Generator{
		"run/v1":                          kubectl.BasicReplicationController{},
		"run-pod/v1":                      kubectl.BasicPod{},
		"service/v1":                      kubectl.ServiceGeneratorV1{},
		"service/v2":                      kubectl.ServiceGeneratorV2{},
		"horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{},
	}

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients:    clients,
		flags:      flags,
		generators: generators,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := cfg.Version

			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion("")
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion("")
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			switch group {
			case "":
				return client.RESTClient, nil
			case "extensions":
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %s", kind)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %s", kind)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.Kind, client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.Kind, client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion("")
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generator: func(name string) (kubectl.Generator, bool) {
			generator, ok := generators[name]
			return generator, ok
		},
		CanBeExposed: func(kind string) error {
			switch kind {
			case "ReplicationController", "Service", "Pod":
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind string) error {
			switch kind {
			// TODO: support autoscale for deployments
			case "ReplicationController":
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %s", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion("")
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				var pods *api.PodList
				for pods == nil || len(pods.Items) == 0 {
					var err error
					if pods, err = client.Pods(t.Namespace).List(labels.SelectorFromSet(t.Spec.Selector), fields.Everything()); err != nil {
						return nil, err
					}
					if len(pods.Items) == 0 {
						time.Sleep(2 * time.Second)
					}
				}
				pod := &pods.Items[0]
				return pod, nil
			case *api.Pod:
				return t, nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %s: not implemented", kind)
			}
		},
	}
}
示例#21
0
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under
// Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of
// pods which match the same selectors of Services and RCs as current pod.
func (s *NetworkSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
	var maxCount int
	var nsPods []*api.Pod

	selectors := make([]labels.Selector, 0)
	services, err := s.serviceLister.GetPodServices(pod)
	if err == nil {
		for _, service := range services {
			selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector))
		}
	}
	controllers, err := s.controllerLister.GetPodControllers(pod)
	if err == nil {
		for _, controller := range controllers {
			selectors = append(selectors, labels.SelectorFromSet(controller.Spec.Selector))
		}
	}

	if len(selectors) > 0 {
		pods, err := podLister.List(labels.Everything())
		if err != nil {
			return nil, err
		}
		// consider only the pods that belong to the same namespace
		for _, nsPod := range pods {
			if nsPod.Namespace == pod.Namespace {
				nsPods = append(nsPods, nsPod)
			}
		}
	}

	nodes, err := nodeLister.List()
	if err != nil {
		return nil, err
	}

	subnetMap := map[string]string{}

	for _, node := range nodes.Items {
		for _, addrs := range node.Status.Addresses {
			if addrs.Type == api.NodeLegacyHostIP {
				// for now wandoujia used 24 mask, a pit here.
				_, subnet, _ := net.ParseCIDR(fmt.Sprintf("%s/24", addrs.Address))
				subnetMap[node.Name] = subnet.String()
			}
		}
	}

	counts := map[string]int{}
	if len(nsPods) > 0 {
		for _, pod := range nsPods {
			matches := false
			for _, selector := range selectors {
				if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) {
					matches = true
					break
				}
			}
			if matches {
				subnet := subnetMap[pod.Spec.NodeName]
				counts[subnet]++
				// Compute the maximum number of pods hosted on any node
				if counts[subnet] > maxCount {
					maxCount = counts[subnet]
				}
			}
		}
	}

	result := []algorithm.HostPriority{}
	//score int - scale of 0-10
	// 0 being the lowest priority and 10 being the highest
	for _, node := range nodes.Items {
		// initializing to the default/max node score of 10
		fScore := float32(10)
		subnet := subnetMap[node.Name]
		if maxCount > 0 {
			fScore = 10 * (float32(maxCount-counts[subnet]) / float32(maxCount))
		}
		glog.Infof("maxcount: %v, subNetCounts: %v, fscore: %v", maxCount, counts[subnet], fScore)
		result = append(result, algorithm.HostPriority{Host: node.Name, Score: int(fScore)})
		glog.V(10).Infof(
			"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, node.Name, int(fScore),
		)
	}
	return result, nil
}
示例#22
0
func TestHelperList(t *testing.T) {
	tests := []struct {
		Err     bool
		Req     func(*http.Request) bool
		Resp    *http.Response
		HttpErr error
	}{
		{
			HttpErr: errors.New("failure"),
			Err:     true,
		},
		{
			Resp: &http.Response{
				StatusCode: http.StatusNotFound,
				Body:       objBody(&unversioned.Status{Status: unversioned.StatusFailure}),
			},
			Err: true,
		},
		{
			Resp: &http.Response{
				StatusCode: http.StatusOK,
				Body: objBody(&api.PodList{
					Items: []api.Pod{{
						ObjectMeta: api.ObjectMeta{Name: "foo"},
					},
					},
				}),
			},
			Req: func(req *http.Request) bool {
				if req.Method != "GET" {
					t.Errorf("unexpected method: %#v", req)
					return false
				}
				if req.URL.Path != "/namespaces/bar" {
					t.Errorf("url doesn't contain name: %#v", req.URL)
					return false
				}
				if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.Version())) != labels.SelectorFromSet(labels.Set{"foo": "baz"}).String() {
					t.Errorf("url doesn't contain query parameters: %#v", req.URL)
					return false
				}
				return true
			},
		},
	}
	for _, test := range tests {
		client := &fake.RESTClient{
			Codec: testapi.Default.Codec(),
			Resp:  test.Resp,
			Err:   test.HttpErr,
		}
		modifier := &Helper{
			RESTClient:      client,
			NamespaceScoped: true,
		}
		obj, err := modifier.List("bar", testapi.Default.Version(), labels.SelectorFromSet(labels.Set{"foo": "baz"}))
		if (err != nil) != test.Err {
			t.Errorf("unexpected error: %t %v", test.Err, err)
		}
		if err != nil {
			continue
		}
		if obj.(*api.PodList).Items[0].Name != "foo" {
			t.Errorf("unexpected object: %#v", obj)
		}
		if test.Req != nil && !test.Req(client.Req) {
			t.Errorf("unexpected request: %#v", client.Req)
		}
	}
}
示例#23
0
			// 2. when a node joins the cluster, it can host new pods.
			// Factor out the cases into two separate tests.
			It("[replication controller] recreates pods scheduled on the unreachable minion node "+
				"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {

				// Create a replication controller for a service that serves its hostname.
				// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
				name := "my-hostname-net"
				newSVCByName(framework.Client, framework.Namespace.Name, name)
				replicas := testContext.CloudConfig.NumNodes
				newRCByName(framework.Client, framework.Namespace.Name, name, replicas)
				err := verifyPods(framework.Client, framework.Namespace.Name, name, true, replicas)
				Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")

				By("choose a node with at least one pod - we will block some network traffic on this node")
				label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
				pods, err := framework.Client.Pods(framework.Namespace.Name).List(label, fields.Everything()) // list pods after all have been scheduled
				Expect(err).NotTo(HaveOccurred())
				nodeName := pods.Items[0].Spec.NodeName

				node, err := framework.Client.Nodes().Get(nodeName)
				Expect(err).NotTo(HaveOccurred())

				By(fmt.Sprintf("block network traffic from node %s", node.Name))
				performTemporaryNetworkFailure(framework.Client, framework.Namespace.Name, name, replicas, pods.Items[0].Name, node)
				Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
				if !waitForNodeToBeReady(framework.Client, node.Name, resizeNodeReadyTimeout) {
					Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
				}

				// sleep a bit, to allow Watch in NodeController to catch up.
示例#24
0
文件: rc.go 项目: qinguoan/vulcan
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

	pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses)
	if err != nil {
		Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
示例#25
0
// CheckServiceAffinity ensures that only the nodes that match the specified labels are considered for scheduling.
// The set of labels to be considered are provided to the struct (ServiceAffinity).
// The pod is checked for the labels and any missing labels are then checked in the node
// that hosts the service pods (peers) for the given pod.
//
// We add an implicit selector requiring some particular value V for label L to a pod, if:
// - L is listed in the ServiceAffinity object that is passed into the function
// - the pod does not have any NodeSelector for L
// - some other pod from the same service is already scheduled onto a node that has value V for label L
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
	var affinitySelector labels.Selector

	// check if the pod being scheduled has the affinity labels specified in its NodeSelector
	affinityLabels := map[string]string{}
	nodeSelector := labels.Set(pod.Spec.NodeSelector)
	labelsExist := true
	for _, l := range s.labels {
		if nodeSelector.Has(l) {
			affinityLabels[l] = nodeSelector.Get(l)
		} else {
			// the current pod does not specify all the labels, look in the existing service pods
			labelsExist = false
		}
	}

	// skip looking at other pods in the service if the current pod defines all the required affinity labels
	if !labelsExist {
		services, err := s.serviceLister.GetPodServices(pod)
		if err == nil {
			// just use the first service and get the other pods within the service
			// TODO: a separate predicate can be created that tries to handle all services for the pod
			selector := labels.SelectorFromSet(services[0].Spec.Selector)
			servicePods, err := s.podLister.List(selector)
			if err != nil {
				return false, err
			}
			// consider only the pods that belong to the same namespace
			nsServicePods := []*api.Pod{}
			for _, nsPod := range servicePods {
				if nsPod.Namespace == pod.Namespace {
					nsServicePods = append(nsServicePods, nsPod)
				}
			}
			if len(nsServicePods) > 0 {
				// consider any service pod and fetch the node its hosted on
				otherNode, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName)
				if err != nil {
					return false, err
				}
				for _, l := range s.labels {
					// If the pod being scheduled has the label value specified, do not override it
					if _, exists := affinityLabels[l]; exists {
						continue
					}
					if labels.Set(otherNode.Labels).Has(l) {
						affinityLabels[l] = labels.Set(otherNode.Labels).Get(l)
					}
				}
			}
		}
	}

	// if there are no existing pods in the service, consider all nodes
	if len(affinityLabels) == 0 {
		affinitySelector = labels.Everything()
	} else {
		affinitySelector = labels.Set(affinityLabels).AsSelector()
	}

	node, err := s.nodeInfo.GetNodeInfo(nodeID)
	if err != nil {
		return false, err
	}

	// check if the node matches the selector
	return affinitySelector.Matches(labels.Set(node.Labels)), nil
}
示例#26
0
			image := "nginx"

			By("running the image " + image)
			runKubectlOrDie("run", rcName, "--image="+image, nsFlag)
			By("verifying the rc " + rcName + " was created")
			rc, err := c.ReplicationControllers(ns).Get(rcName)
			if err != nil {
				Failf("Failed getting rc %s: %v", rcName, err)
			}
			containers := rc.Spec.Template.Spec.Containers
			if containers == nil || len(containers) != 1 || containers[0].Image != image {
				Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, image)
			}

			By("verifying the pod controlled by rc " + rcName + " was created")
			label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
			podlist, err := waitForPodsWithLabel(c, ns, label)
			if err != nil {
				Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
			}
			pods := podlist.Items
			if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != image {
				runKubectlOrDie("get", "pods", "-L", "run", nsFlag)
				Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", image, len(pods))
			}
		})

	})

	Describe("Kubectl run pod", func() {
		var nsFlag string
示例#27
0
文件: events.go 项目: qinguoan/vulcan
			},
		}

		By("submitting the pod to kubernetes")
		defer func() {
			By("deleting the pod")
			podClient.Delete(pod.Name, nil)
		}()
		if _, err := podClient.Create(pod); err != nil {
			Failf("Failed to create pod: %v", err)
		}

		expectNoError(framework.WaitForPodRunning(pod.Name))

		By("verifying the pod is in kubernetes")
		pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
		Expect(len(pods.Items)).To(Equal(1))

		By("retrieving the pod")
		podWithUid, err := podClient.Get(pod.Name)
		if err != nil {
			Failf("Failed to get pod: %v", err)
		}
		fmt.Printf("%+v\n", podWithUid)
		var events *api.EventList
		// Check for scheduler event about the pod.
		By("checking for scheduler event about the pod")
		expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
			events, err := framework.Client.Events(framework.Namespace.Name).List(
				labels.Everything(),
				fields.Set{
示例#28
0
				runKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
			}

			for _, ns := range namespaces {
				runKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
			}

			// wait for objects
			for _, ns := range namespaces {
				waitForRCPodsRunning(c, ns.Name, backendRcName)
				waitForService(c, ns.Name, backendSvcName, true, poll, serviceStartTimeout)
			}
			// it is not enough that pods are running because they may be set to running, but
			// the application itself may have not been initialized. Just query the application.
			for _, ns := range namespaces {
				label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
				pods, err := c.Pods(ns.Name).List(label, fields.Everything())
				Expect(err).NotTo(HaveOccurred())
				err = podsResponding(c, ns.Name, backendPodName, false, pods)
				Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
				Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)

				err = serviceResponding(c, ns.Name, backendSvcName)
				Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
			}

			// Now another tricky part:
			// It may happen that the service name is not yet in DNS.
			// So if we start our pod, it will fail. We must make sure
			// the name is already resolvable. So let's try to query DNS from
			// the pod we have, until we find our service name.
示例#29
0
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *Framework) {
	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 2 * time.Minute
	// ingestionTimeout is how long to keep retrying to wait for all the
	// logs to be ingested.
	const ingestionTimeout = 3 * time.Minute

	// Check for the existence of the Elasticsearch service.
	By("Checking the Elasticsearch service exists.")
	s := f.Client.Services(api.NamespaceSystem)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("elasticsearch-logging"); err == nil {
			break
		}
		Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Elasticsearch pods to enter the running state.
	By("Checking to make sure the Elasticsearch pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
	pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we are talking to an Elasticsearch service.")
	// Perform a few checks to make sure this looks like an Elasticsearch cluster.
	var statusCode float64
	var esResponse map[string]interface{}
	err = nil
	var body []byte
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		// Query against the root URL for Elasticsearch.
		body, err = f.Client.Get().
			Namespace(api.NamespaceSystem).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			DoRaw()
		if err != nil {
			Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
			continue
		}
		esResponse, err = bodyToJSON(body)
		if err != nil {
			Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
			continue
		}
		statusIntf, ok := esResponse["status"]
		if !ok {
			Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
			continue
		}
		statusCode, ok = statusIntf.(float64)
		if !ok {
			// Assume this is a string returning Failure. Retry.
			Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
	if int(statusCode) != 200 {
		Failf("Elasticsearch cluster has a bad status: %v", statusCode)
	}
	// Check to see if have a cluster_name field.
	clusterName, ok := esResponse["cluster_name"]
	if !ok {
		Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
	}
	if clusterName != "kubernetes-logging" {
		Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
	}

	// Now assume we really are talking to an Elasticsearch instance.
	// Check the cluster health.
	By("Checking health of Elasticsearch service.")
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		body, err = f.Client.Get().
			Namespace(api.NamespaceSystem).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			Suffix("_cluster/health").
			Param("health", "pretty").
			DoRaw()
		if err == nil {
			break
		}
	}
	Expect(err).NotTo(HaveOccurred())

	health, err := bodyToJSON(body)
	Expect(err).NotTo(HaveOccurred())
	statusIntf, ok := health["status"]
	if !ok {
		Failf("No status field found in cluster health response: %v", health)
	}
	status := statusIntf.(string)
	if status != "green" && status != "yellow" {
		Failf("Cluster health has bad status: %s", status)
	}

	// Obtain a list of nodes so we can place one synthetic logger on each node.
	nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		Failf("Failed to list nodes: %v", err)
	}
	nodeCount := len(nodes.Items)
	if nodeCount == 0 {
		Failf("Failed to find any nodes")
	}
	Logf("Found %d nodes.", len(nodes.Items))

	// Filter out unhealthy nodes.
	// Previous tests may have cause failures of some nodes. Let's skip
	// 'Not Ready' nodes, just in case (there is no need to fail the test).
	filterNodes(nodes, func(node api.Node) bool {
		return isNodeReadySetAsExpected(&node, true)
	})
	if len(nodes.Items) < 2 {
		Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
	}
	Logf("Found %d healthy nodes.", len(nodes.Items))

	// Create a unique root name for the resources in this test to permit
	// parallel executions of this test.
	// Use a unique namespace for the resources created in this test.
	ns := f.Namespace.Name
	name := "synthlogger"
	// Form a unique name to taint log lines to be colelcted.
	// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
	taintName := strings.Replace(ns+name, "-", "_", -1)

	// podNames records the names of the synthetic logging pods that are created in the
	// loop below.
	var podNames []string
	// countTo is the number of log lines emitted (and checked) for each synthetic logging pod.
	const countTo = 100
	// Instantiate a synthetic logger pod on each node.
	for i, node := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", name, i)
		_, err := f.Client.Pods(ns).Create(&api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "synth-logger",
						Image: "gcr.io/google_containers/ubuntu:14.04",
						// notice: the subshell syntax is escaped with `$$`
						Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$$(($i+1)); done", countTo, i, taintName, podName)},
					},
				},
				NodeName:      node.Name,
				RestartPolicy: api.RestartPolicyNever,
			},
		})
		Expect(err).NotTo(HaveOccurred())
		podNames = append(podNames, podName)
	}

	// Cleanup the pods when we are done.
	defer func() {
		for _, pod := range podNames {
			if err = f.Client.Pods(ns).Delete(pod, nil); err != nil {
				Logf("Failed to delete pod %s: %v", pod, err)
			}
		}
	}()

	// Wait for the syntehtic logging pods to finish.
	By("Waiting for the pods to succeed.")
	for _, pod := range podNames {
		err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
		Expect(err).NotTo(HaveOccurred())
	}

	// Wait a bit for the log information to make it into Elasticsearch.
	time.Sleep(30 * time.Second)

	// Make several attempts to observe the logs ingested into Elasticsearch.
	By("Checking all the log lines were ingested into Elasticsearch")
	missing := 0
	expected := nodeCount * countTo
	for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {

		// Debugging code to report the status of the elasticsearch logging endpoints.
		esPods, err := f.Client.Pods(api.NamespaceSystem).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
		if err != nil {
			Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
			continue
		} else {
			for i, pod := range esPods.Items {
				Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase,
					pod.Status.Conditions)
			}
		}

		// Ask Elasticsearch to return all the log lines that were tagged with the underscore
		// version of the name. Ask for twice as many log lines as we expect to check for
		// duplication bugs.
		body, err = f.Client.Get().
			Namespace(api.NamespaceSystem).
			Prefix("proxy").
			Resource("services").
			Name("elasticsearch-logging").
			Suffix("_search").
			Param("q", fmt.Sprintf("log:%s", taintName)).
			Param("size", strconv.Itoa(2*expected)).
			DoRaw()
		if err != nil {
			Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err)
			continue
		}

		response, err := bodyToJSON(body)
		if err != nil {
			Logf("After %v failed to unmarshal response: %v", time.Since(start), err)
			Logf("Body: %s", string(body))
			continue
		}
		hits, ok := response["hits"].(map[string]interface{})
		if !ok {
			Failf("response[hits] not of the expected type: %T", response["hits"])
		}
		totalF, ok := hits["total"].(float64)
		if !ok {
			Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"])
			continue
		}
		total := int(totalF)
		if total < expected {
			Logf("After %v expecting to find %d log lines but saw only %d", time.Since(start), expected, total)
			continue
		}
		h, ok := hits["hits"].([]interface{})
		if !ok {
			Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"])
			continue
		}
		// Initialize data-structure for observing counts.
		observed := make([][]int, nodeCount)
		for i := range observed {
			observed[i] = make([]int, countTo)
		}
		// Iterate over the hits and populate the observed array.
		for _, e := range h {
			l, ok := e.(map[string]interface{})
			if !ok {
				Failf("element of hit not of expected type: %T", e)
			}
			source, ok := l["_source"].(map[string]interface{})
			if !ok {
				Failf("_source not of the expected type: %T", l["_source"])
			}
			msg, ok := source["log"].(string)
			if !ok {
				Failf("log not of the expected type: %T", source["log"])
			}
			words := strings.Split(msg, " ")
			if len(words) < 4 {
				Failf("Malformed log line: %s", msg)
			}
			n, err := strconv.ParseUint(words[0], 10, 0)
			if err != nil {
				Failf("Expecting numer of node as first field of %s", msg)
			}
			if n < 0 || int(n) >= nodeCount {
				Failf("Node count index out of range: %d", nodeCount)
			}
			index, err := strconv.ParseUint(words[2], 10, 0)
			if err != nil {
				Failf("Expecting number as third field of %s", msg)
			}
			if index < 0 || index >= countTo {
				Failf("Index value out of range: %d", index)
			}
			// Record the observation of a log line from node n at the given index.
			observed[n][index]++
		}
		// Make sure we correctly observed the expected log lines from each node.
		missing = 0
		for n := range observed {
			for i, c := range observed[n] {
				if c == 0 {
					missing++
				}
				if c < 0 || c > 1 {
					Failf("Got incorrect count for node %d index %d: %d", n, i, c)
				}
			}
		}
		if missing != 0 {
			Logf("After %v still missing %d log lines", time.Since(start), missing)
			continue
		}
		Logf("After %s found all %d log lines", time.Since(start), expected)
		return
	}
	Failf("Failed to find all %d log lines", expected)
}
示例#30
0
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
	var err error
	// First, update the template label.  This ensures that any newly created pods will have the new label
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		if rc.Spec.Template.Labels == nil {
			rc.Spec.Template.Labels = map[string]string{}
		}
		rc.Spec.Template.Labels[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
	// TODO: extract the code from the label command and re-use it here.
	podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, err
	}
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if pod.Labels == nil {
			pod.Labels = map[string]string{
				deploymentKey: deploymentValue,
			}
		} else {
			pod.Labels[deploymentKey] = deploymentValue
		}
		err = nil
		delay := 3
		for i := 0; i < MaxRetries; i++ {
			_, err = client.Pods(namespace).Update(pod)
			if err != nil {
				fmt.Fprintf(out, "Error updating pod (%v), retrying after %d seconds", err, delay)
				time.Sleep(time.Second * time.Duration(delay))
				delay *= delay
			} else {
				break
			}
		}
		if err != nil {
			return nil, err
		}
	}

	if oldRc.Spec.Selector == nil {
		oldRc.Spec.Selector = map[string]string{}
	}
	// Copy the old selector, so that we can scrub out any orphaned pods
	selectorCopy := map[string]string{}
	for k, v := range oldRc.Spec.Selector {
		selectorCopy[k] = v
	}
	oldRc.Spec.Selector[deploymentKey] = deploymentValue

	// Update the selector of the rc so it manages all the pods we updated above
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		rc.Spec.Selector[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
	// doesn't see the update to its pod template and creates a new pod with the old labels after
	// we've finished re-adopting existing pods to the rc.
	podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything())
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
			if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil {
				return nil, err
			}
		}
	}

	return oldRc, nil
}