// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error {
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
		nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
			"spec.unschedulable": "false",
		}.AsSelector()})
		if err != nil {
			glog.Warningf("Failed to list nodes: %v", err)
			continue
		}
		numNodes := len(nodes.Items)

		// Filter out not-ready nodes.
		framework.FilterNodes(nodes, func(node api.Node) bool {
			return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
		})
		numReady := len(nodes.Items)

		if numNodes == numReady && sizeFunc(numReady) {
			glog.Infof("Cluster has reached the desired size")
			return nil
		}
		glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
	}
	return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
Esempio n. 2
0
func GetNodes(kubeClient *kclient.Client) ([]kapi.Node, error) {
	nodeList, err := kubeClient.Nodes().List(kapi.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("Listing nodes in the cluster failed. Error: %s", err)
	}
	return nodeList.Items, nil
}
Esempio n. 3
0
func (this *NodeHandler) List(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
	nodes := c.Nodes()
	return nodes.List(api.ListOptions{
		LabelSelector: label,
		FieldSelector: field,
	})
}
Esempio n. 4
0
// Update updates an existing node api object
// by looking up the given hostname.
// The updated node merges the given slave attribute labels
// and annotations with the found api object.
func Update(
	client *client.Client,
	hostname string,
	slaveAttrLabels,
	annotations map[string]string,
) (n *api.Node, err error) {
	for i := 0; i < clientRetryCount; i++ {
		n, err = client.Nodes().Get(hostname)
		if err != nil {
			return nil, fmt.Errorf("error getting node %q: %v", hostname, err)
		}
		if n == nil {
			return nil, fmt.Errorf("no node instance returned for %q", hostname)
		}

		// update labels derived from Mesos slave attributes, keep all other labels
		n.Labels = mergeMaps(
			filterMap(n.Labels, IsNotSlaveAttributeLabel),
			slaveAttrLabels,
		)
		n.Annotations = mergeMaps(n.Annotations, annotations)

		n, err = client.Nodes().Update(n)
		if err == nil && !errors.IsConflict(err) {
			return n, nil
		}

		log.Infof("retry %d/%d: error updating node %v err %v", i, clientRetryCount, n, err)
		time.Sleep(time.Duration(i) * clientRetryInterval)
	}

	return nil, err
}
Esempio n. 5
0
func updateNodes(kubeClient *kube.Client, cPort int) (hostIPtoNodeMap map[string]kubeAPI.Node, nodeIPs []string) {

	hostIPtoNodeMap = make(map[string]kubeAPI.Node, 2)
	nodeIPs = make([]string, 0, 2)
	nodeList, apiErr := kubeClient.Nodes().List(kubeLabels.Everything(), kubeFields.Everything())
	if apiErr != nil {
		glog.Errorf("Failed to list kubernetes nodes. Error: %v\n", apiErr)
	} else {
		for _, node := range nodeList.Items {
			var hostIP string
			for _, nodeAddress := range node.Status.Addresses {
				switch nodeAddress.Type {
				case kubeAPI.NodeInternalIP:
					hostIP = nodeAddress.Address
					break
				case kubeAPI.NodeLegacyHostIP:
					hostIP = nodeAddress.Address
				}
			}
			if hostIP != "" {
				hostIP = fmt.Sprintf("http://%v:%v", hostIP, cPort)
				nodeIPs = append(nodeIPs, hostIP)
				hostIPtoNodeMap[hostIP] = node
			}
		}
	}

	return hostIPtoNodeMap, nodeIPs
}
Esempio n. 6
0
func addIngressInfraLabel(c *k8sclient.Client, ns string) string {
	nodeClient := c.Nodes()
	nodes, err := nodeClient.List(api.ListOptions{})
	if err != nil {
		util.Errorf("\nUnable to find any nodes: %s\n", err)
	}
	changed := false
	hasExistingExposeIPLabel, externalNodeName := hasExistingLabel(nodes, externalIPLabel)
	if externalNodeName != "" {
		return externalNodeName
	}
	if !hasExistingExposeIPLabel && len(nodes.Items) > 0 {
		for _, node := range nodes.Items {
			if !node.Spec.Unschedulable {
				changed = addLabelIfNotExist(&node.ObjectMeta, externalIPLabel, "true")
				if changed {
					_, err = nodeClient.Update(&node)
					if err != nil {
						printError("Failed to label node with ", err)
					}
					return node.Name
				}
			}
		}
	}
	if !changed && !hasExistingExposeIPLabel {
		util.Warnf("Unable to add label for ingress controller to run on a specific node, please add manually: kubectl label node [your node name] %s=true", externalIPLabel)
	}
	return ""
}
Esempio n. 7
0
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove map[string]string) {
	const maxRetries = 5
	for nodeName := range nodeNames {
		var node *api.Node
		var err error
		for i := 0; i < maxRetries; i++ {
			node, err = c.Nodes().Get(nodeName)
			if err != nil {
				framework.Logf("Error getting node %s: %v", nodeName, err)
				continue
			}
			if toAdd != nil {
				for k, v := range toAdd {
					node.ObjectMeta.Labels[k] = v
				}
			}
			if toRemove != nil {
				for k := range toRemove {
					delete(node.ObjectMeta.Labels, k)
				}
			}
			_, err = c.Nodes().Update(node)
			if err != nil {
				framework.Logf("Error updating node %s: %v", nodeName, err)
			} else {
				break
			}
		}
		Expect(err).NotTo(HaveOccurred())
	}
}
Esempio n. 8
0
func addTaint(client *kube_client.Client, node *kube_api.Node, value string) error {
	taints, err := kube_api.GetTaintsFromNodeAnnotations(node.Annotations)
	if err != nil {
		return err
	}

	taint := kube_api.Taint{
		Key:    criticalAddonsOnlyTaintKey,
		Value:  value,
		Effect: kube_api.TaintEffectNoSchedule,
	}
	taints = append(taints, taint)

	taintsJson, err := json.Marshal(taints)
	if err != nil {
		return err
	}

	if node.Annotations == nil {
		node.Annotations = make(map[string]string)
	}
	node.Annotations[kube_api.TaintsAnnotationKey] = string(taintsJson)
	_, err = client.Nodes().Update(node)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 9
0
// getClusterNodesIP returns the IP address of each node in the kubernetes cluster
func getClusterNodesIP(kubeClient *unversioned.Client, nodeSelector string) (clusterNodes []string) {
	listOpts := api.ListOptions{}

	if nodeSelector != "" {
		label, err := labels.Parse(nodeSelector)
		if err != nil {
			glog.Fatalf("'%v' is not a valid selector: %v", nodeSelector, err)
		}
		listOpts.LabelSelector = label
	}

	nodes, err := kubeClient.Nodes().List(listOpts)
	if err != nil {
		glog.Fatalf("Error getting running nodes: %v", err)
	}

	for _, nodo := range nodes.Items {
		nodeIP, err := node.GetNodeHostIP(&nodo)
		if err == nil {
			clusterNodes = append(clusterNodes, nodeIP.String())
		}
	}
	sort.Strings(clusterNodes)

	return
}
Esempio n. 10
0
func clearNodeLabels(c *client.Client) error {
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	for _, node := range nodeList.Items {
		if len(node.Labels) != 0 {
			node.Labels = map[string]string{}
			var newNode *api.Node
			err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
				newNode, err = nodeClient.Update(&node)
				if err == nil {
					return true, err
				}
				if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
					Logf("failed to update node due to resource version conflict")
					return false, nil
				}
				return false, err
			})
			if err != nil {
				return err
			} else if len(newNode.Labels) != 0 {
				return fmt.Errorf("Could not make node labels nil.")
			}
		}
	}
	return nil
}
Esempio n. 11
0
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
	// It should be OK to list unschedulable Nodes here.
	By("getting list of nodes")
	nodeList, err := c.Nodes().List(api.ListOptions{})
	framework.ExpectNoError(err)
	var errors []error
	retries := maxRetries
	for {
		errors = []error{}
		for _, node := range nodeList.Items {
			// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
			// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
			statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
			By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
			_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
			if err != nil {
				errors = append(errors, err)
			}
		}
		if len(errors) == 0 {
			return
		}
		if retries--; retries <= 0 {
			break
		}
		framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
		time.Sleep(sleepDuration)
	}
	framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
Esempio n. 12
0
func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
	registeredMaster := false
	masterName := ""
	nodeList, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	if len(nodeList.Items) < 1 {
		glog.Warning("Can't find any Nodes in the API server to grab metrics from")
	}
	for _, node := range nodeList.Items {
		if system.IsMasterNode(&node) {
			registeredMaster = true
			masterName = node.Name
			break
		}
	}
	if !registeredMaster {
		scheduler = false
		controllers = false
		glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.")
	}

	return &MetricsGrabber{
		client:                    c,
		grabFromApiServer:         apiServer,
		grabFromControllerManager: controllers,
		grabFromKubelets:          kubelets,
		grabFromScheduler:         scheduler,
		masterName:                masterName,
		registeredMaster:          registeredMaster,
	}, nil
}
Esempio n. 13
0
func externalMetrics(kubeClient *kube.Client, metrics *Metrics) error {
	nodeList, err := kubeClient.Nodes().List(kube_api.ListOptions{})
	if err != nil {
		return fmt.Errorf("externalMetrics: unable to retrieve node list from k8s")
	}
	metrics.Nodes = int64(len(nodeList.Items))
	return nil
}
Esempio n. 14
0
func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) (*containerResourceGatherer, error) {
	g := containerResourceGatherer{
		client:               c,
		stopCh:               make(chan struct{}),
		containerIDToNameMap: make(map[string]string),
		containerIDs:         make([]string, 0),
		options:              options,
	}

	if options.inKubemark {
		g.workerWg.Add(1)
		g.workers = append(g.workers, resourceGatherWorker{
			inKubemark: true,
			stopCh:     g.stopCh,
			wg:         &g.workerWg,
			finished:   false,
		})
	} else {
		pods, err := c.Pods("kube-system").List(api.ListOptions{})
		if err != nil {
			Logf("Error while listing Pods: %v", err)
			return nil, err
		}
		for _, pod := range pods.Items {
			for _, container := range pod.Status.ContainerStatuses {
				containerID := strings.TrimPrefix(container.ContainerID, "docker:/")
				g.containerIDToNameMap[containerID] = pod.Name + "/" + container.Name
				g.containerIDs = append(g.containerIDs, containerID)
			}
		}
		nodeList, err := c.Nodes().List(api.ListOptions{})
		if err != nil {
			Logf("Error while listing Nodes: %v", err)
			return nil, err
		}

		for _, node := range nodeList.Items {
			if !options.masterOnly || system.IsMasterNode(&node) {
				g.workerWg.Add(1)
				g.workers = append(g.workers, resourceGatherWorker{
					c:                    c,
					nodeName:             node.Name,
					wg:                   &g.workerWg,
					containerIDToNameMap: g.containerIDToNameMap,
					containerIDs:         g.containerIDs,
					stopCh:               g.stopCh,
					finished:             false,
					inKubemark:           false,
				})
				if options.masterOnly {
					break
				}
			}
		}
	}
	return &g, nil
}
Esempio n. 15
0
func pickNode(c *client.Client) (string, error) {
	nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	if len(nodes.Items) == 0 {
		return "", fmt.Errorf("no nodes exist, can't test node proxy")
	}
	return nodes.Items[0].Name, nil
}
Esempio n. 16
0
func pickNode(c *client.Client) (string, error) {
	nodes, err := c.Nodes().List(unversioned.ListOptions{})
	if err != nil {
		return "", err
	}
	if len(nodes.Items) == 0 {
		return "", fmt.Errorf("no nodes exist, can't test node proxy")
	}
	return nodes.Items[0].Name, nil
}
Esempio n. 17
0
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
// returning an error if this doesn't happen in time. It returns the names of
// nodes it finds.
func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) {
	// First, keep getting all of the nodes until we get the number we expect.
	var nodeList *api.NodeList
	var errLast error
	start := time.Now()
	found := wait.Poll(Poll, nt, func() (bool, error) {
		// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
		// knows about all of the nodes. Thus, we retry the list nodes call
		// until we get the expected number of nodes.
		nodeList, errLast = c.Nodes().List(api.ListOptions{
			FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
		if errLast != nil {
			return false, nil
		}
		if len(nodeList.Items) != expect {
			errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
				expect, len(nodeList.Items), time.Since(start))
			Logf("%v", errLast)
			return false, nil
		}
		return true, nil
	}) == nil
	nodeNames := make([]string, len(nodeList.Items))
	for i, n := range nodeList.Items {
		nodeNames[i] = n.ObjectMeta.Name
	}
	if !found {
		return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
			expect, nt, errLast)
	}
	Logf("Successfully found %d nodes", expect)

	// Next, ensure in parallel that all the nodes are ready. We subtract the
	// time we spent waiting above.
	timeout := nt - time.Since(start)
	result := make(chan bool, len(nodeList.Items))
	for _, n := range nodeNames {
		n := n
		go func() { result <- WaitForNodeToBeReady(c, n, timeout) }()
	}
	failed := false
	// TODO(mbforbes): Change to `for range` syntax once we support only Go
	// >= 1.4.
	for i := range nodeList.Items {
		_ = i
		if !<-result {
			failed = true
		}
	}
	if failed {
		return nodeNames, fmt.Errorf("at least one node failed to be ready")
	}
	return nodeNames, nil
}
Esempio n. 18
0
func getAllNodesInCluster(c *client.Client) ([]string, error) {
	nodeList, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	result := []string{}
	for _, node := range nodeList.Items {
		result = append(result, node.Name)
	}
	return result, nil
}
Esempio n. 19
0
func getAllNodesInCluster(c *client.Client) ([]string, error) {
	nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}
	result := []string{}
	for _, node := range nodeList.Items {
		result = append(result, node.Name)
	}
	return result, nil
}
Esempio n. 20
0
func isMini(c *k8sclient.Client, ns string) bool {
	nodes, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		util.Errorf("\nUnable to find any nodes: %s\n", err)
	}
	if len(nodes.Items) == 1 {
		node := nodes.Items[0]
		return node.Name == minikubeNodeName || node.Name == minishiftNodeName || node.Name == boot2docker
	}
	return false
}
Esempio n. 21
0
func getAllNodesInCluster(c *client.Client) ([]string, error) {
	// It should be OK to list unschedulable Nodes here.
	nodeList, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	result := []string{}
	for _, node := range nodeList.Items {
		result = append(result, node.Name)
	}
	return result, nil
}
Esempio n. 22
0
func getNodePublicIps(c *client.Client) ([]string, error) {
	nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}

	ips := collectAddresses(nodes, api.NodeExternalIP)
	if len(ips) == 0 {
		ips = collectAddresses(nodes, api.NodeLegacyHostIP)
	}
	return ips, nil
}
Esempio n. 23
0
func getNodePublicIps(c *client.Client) ([]string, error) {
	nodes, err := c.Nodes().List(unversioned.ListOptions{})
	if err != nil {
		return nil, err
	}

	ips := collectAddresses(nodes, api.NodeExternalIP)
	if len(ips) == 0 {
		ips = collectAddresses(nodes, api.NodeLegacyHostIP)
	}
	return ips, nil
}
Esempio n. 24
0
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Client) {
	// NOTE: This test cannot run in parallel, because it is creating and deleting
	// non-namespaced objects (Nodes).
	defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})

	goodCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionTrue,
		Reason:            fmt.Sprintf("schedulable condition"),
		LastHeartbeatTime: unversioned.Time{time.Now()},
	}
	node := &api.Node{
		Spec: api.NodeSpec{Unschedulable: false},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
			},
			Conditions: []api.NodeCondition{goodCondition},
		},
	}

	for ii := 0; ii < 5; ii++ {
		node.Name = fmt.Sprintf("machine%d", ii+1)
		if _, err := restClient.Nodes().Create(node); err != nil {
			t.Fatalf("Failed to create nodes: %v", err)
		}
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
		Spec: api.PodSpec{
			Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
		},
	}

	myPod, err := restClient.Pods(ns.Name).Create(pod)
	if err != nil {
		t.Fatalf("Failed to create pod: %v", err)
	}

	err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
	if err != nil {
		t.Fatalf("Failed to schedule pod: %v", err)
	}

	if myPod, err := restClient.Pods(ns.Name).Get(myPod.Name); err != nil {
		t.Fatalf("Failed to get pod: %v", err)
	} else if myPod.Spec.NodeName != "machine3" {
		t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
	}
	t.Logf("Scheduled pod using extenders")
}
Esempio n. 25
0
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c *client.Client) ([]string, error) {
	zoneNames := sets.NewString()
	nodes, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	for _, node := range nodes.Items {
		zoneName, err := getZoneNameForNode(node)
		Expect(err).NotTo(HaveOccurred())
		zoneNames.Insert(zoneName)
	}
	return zoneNames.List(), nil
}
Esempio n. 26
0
func clearDaemonSetNodeLabels(c *client.Client) error {
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(unversioned.ListOptions{})
	if err != nil {
		return err
	}
	for _, node := range nodeList.Items {
		_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
		if err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 27
0
func clearDaemonSetNodeLabels(c *client.Client) error {
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	for _, node := range nodeList.Items {
		_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
		if err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 28
0
func getNodeIP(client *kubeclient.Client, hostname string) net.IP {
	var nodeIP net.IP
	node, err := client.Nodes().Get(hostname)
	if err != nil {
		glog.Warningf("Failed to retrieve node info: %v", err)
		return nil
	}
	nodeIP, err = nodeutil.GetNodeHostIP(node)
	if err != nil {
		glog.Warningf("Failed to retrieve node IP: %v", err)
		return nil
	}
	return nodeIP
}
Esempio n. 29
0
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
	// It should be OK to list unschedulable Nodes here.
	By("getting list of nodes")
	nodeList, err := c.Nodes().List(api.ListOptions{})
	framework.ExpectNoError(err)
	var errors []error

	// returns maxRetries, sleepDuration
	readConfig := func() (int, time.Duration) {
		// Read in configuration settings, reasonable defaults.
		retry := framework.TestContext.Cadvisor.MaxRetries
		if framework.TestContext.Cadvisor.MaxRetries == 0 {
			retry = 6
			framework.Logf("Overriding default retry value of zero to %d", retry)
		}

		sleepDurationMS := framework.TestContext.Cadvisor.SleepDurationMS
		if sleepDurationMS == 0 {
			sleepDurationMS = 10000
			framework.Logf("Overriding default milliseconds value of zero to %d", sleepDurationMS)
		}

		return retry, time.Duration(sleepDurationMS) * time.Millisecond
	}

	maxRetries, sleepDuration := readConfig()
	for {
		errors = []error{}
		for _, node := range nodeList.Items {
			// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
			// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
			statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
			By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
			_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
			if err != nil {
				errors = append(errors, err)
			}
		}
		if len(errors) == 0 {
			return
		}
		if maxRetries--; maxRetries <= 0 {
			break
		}
		framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
		time.Sleep(sleepDuration)
	}
	framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
Esempio n. 30
0
func DoTestPodScheduling(t *testing.T, restClient *client.Client) {
	goodCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionTrue,
		Reason:            fmt.Sprintf("schedulable condition"),
		LastHeartbeatTime: unversioned.Time{time.Now()},
	}
	node := &api.Node{
		Spec: api.NodeSpec{Unschedulable: false},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
			},
			Conditions: []api.NodeCondition{goodCondition},
		},
	}

	for ii := 0; ii < 5; ii++ {
		node.Name = fmt.Sprintf("machine%d", ii+1)
		if _, err := restClient.Nodes().Create(node); err != nil {
			t.Fatalf("Failed to create nodes: %v", err)
		}
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
		Spec: api.PodSpec{
			Containers: []api.Container{{Name: "container", Image: "kubernetes/pause:go"}},
		},
	}

	myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		t.Fatalf("Failed to create pod: %v", err)
	}

	err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
	if err != nil {
		t.Fatalf("Failed to schedule pod: %v", err)
	}

	if myPod, err := restClient.Pods(api.NamespaceDefault).Get(myPod.Name); err != nil {
		t.Fatalf("Failed to get pod: %v", err)
	} else if myPod.Spec.NodeName != "machine3" {
		t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
	}
	t.Logf("Scheduled pod using extenders")
}