コード例 #1
0
ファイル: taint_test.go プロジェクト: ncdc/kubernetes
func generateNodeAndTaintedNode(oldTaints []api.Taint, newTaints []api.Taint) (*api.Node, *api.Node) {
	var taintedNode *api.Node

	oldTaintsData, _ := json.Marshal(oldTaints)
	// Create a node.
	node := &api.Node{
		ObjectMeta: api.ObjectMeta{
			Name:              "node-name",
			CreationTimestamp: unversioned.Time{Time: time.Now()},
			Annotations: map[string]string{
				api.TaintsAnnotationKey: string(oldTaintsData),
			},
		},
		Spec: api.NodeSpec{
			ExternalID: "node-name",
		},
		Status: api.NodeStatus{},
	}
	clone, _ := conversion.NewCloner().DeepCopy(node)

	newTaintsData, _ := json.Marshal(newTaints)
	// A copy of the same node, but tainted.
	taintedNode = clone.(*api.Node)
	taintedNode.Annotations = map[string]string{
		api.TaintsAnnotationKey: string(newTaintsData),
	}

	return node, taintedNode
}
コード例 #2
0
ファイル: node.go プロジェクト: fwalker/dashboard
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
	client *client.Client,
	hostName string,
	slaveAttrLabels,
	annotations map[string]string,
) (*api.Node, error) {
	n := api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: hostName,
		},
		Spec: api.NodeSpec{
			ExternalID: hostName,
		},
		Status: api.NodeStatus{
			Phase: api.NodePending,
		},
	}

	n.Labels = mergeMaps(
		map[string]string{"kubernetes.io/hostname": hostName},
		slaveAttrLabels,
	)

	n.Annotations = annotations

	// try to create
	return client.Nodes().Create(&n)
}
コード例 #3
0
ファイル: node_probe_test.go プロジェクト: vmturbo/kubernetes
func newFakeNodeBuilder(uid types.UID, name string) *FakeNodeBuilder {
	node := new(api.Node)
	node.UID = uid
	node.Name = name

	return &FakeNodeBuilder{
		node: node,
	}
}
コード例 #4
0
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
// attach-detach annotation on a new node and the existing node, returning
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *api.Node) bool {
	var (
		existingCMAAnnotation    = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]
		newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation]
	)

	if newCMAAnnotation == existingCMAAnnotation {
		return false
	}

	// If the just-constructed node and the existing node do
	// not have the same value, update the existing node with
	// the correct value of the annotation.
	if !newSet {
		glog.Info("Controller attach-detach setting changed to false; updating existing Node")
		delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation)
	} else {
		glog.Info("Controller attach-detach setting changed to true; updating existing Node")
		if existingNode.Annotations == nil {
			existingNode.Annotations = make(map[string]string)
		}
		existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation
	}

	return true
}
コード例 #5
0
ファイル: rescheduler.go プロジェクト: danielibrahim/contrib
func addTaint(client *kube_client.Client, node *kube_api.Node, value string) error {
	taints, err := kube_api.GetTaintsFromNodeAnnotations(node.Annotations)
	if err != nil {
		return err
	}

	taint := kube_api.Taint{
		Key:    criticalAddonsOnlyTaintKey,
		Value:  value,
		Effect: kube_api.TaintEffectNoSchedule,
	}
	taints = append(taints, taint)

	taintsJson, err := json.Marshal(taints)
	if err != nil {
		return err
	}

	if node.Annotations == nil {
		node.Annotations = make(map[string]string)
	}
	node.Annotations[kube_api.TaintsAnnotationKey] = string(taintsJson)
	_, err = client.Nodes().Update(node)
	if err != nil {
		return err
	}
	return nil
}
コード例 #6
0
ファイル: node.go プロジェクト: Clarifai/kubernetes
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
	client unversionedcore.NodesGetter,
	hostName string,
	slaveAttrLabels,
	annotations map[string]string,
) (*api.Node, error) {
	n := api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: hostName,
		},
		Spec: api.NodeSpec{
			ExternalID: hostName,
		},
		Status: api.NodeStatus{
			Phase: api.NodePending,
			// WORKAROUND(sttts): make sure that the Ready condition is the
			// first one. The kube-ui v3 depends on this assumption.
			// TODO(sttts): remove this workaround when kube-ui v4 is used or we
			//              merge this with the statusupdate in the controller manager.
			Conditions: []api.NodeCondition{
				{
					Type:              api.NodeReady,
					Status:            api.ConditionTrue,
					Reason:            slaveReadyReason,
					Message:           slaveReadyMessage,
					LastHeartbeatTime: unversioned.Now(),
				},
			},
		},
	}

	n.Labels = mergeMaps(
		map[string]string{"kubernetes.io/hostname": hostName},
		slaveAttrLabels,
	)

	n.Annotations = annotations

	// try to create
	return client.Nodes().Create(&n)
}
コード例 #7
0
ファイル: main.go プロジェクト: aclisp/kubecon
func updateNode(c *gin.Context) {
	nodename := c.Param("no")
	nodejson := c.PostForm("json")

	var node api.Node
	err := json.Unmarshal([]byte(nodejson), &node)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	r, _ := kubeclient.Get().Nodes().Get(node.Name)
	node.ResourceVersion = r.ResourceVersion
	_, err = kubeclient.Get().Nodes().Update(&node)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	c.Redirect(http.StatusMovedPermanently, fmt.Sprintf("/nodes/%s/edit", nodename))
}
コード例 #8
0
ファイル: summary_test.go プロジェクト: XbinZh/kubernetes
func TestBuildSummary(t *testing.T) {
	node := api.Node{}
	node.Name = "FooNode"
	nodeConfig := cm.NodeConfig{
		RuntimeCgroupsName: "/docker-daemon",
		SystemCgroupsName:  "/system",
		KubeletCgroupsName: "/kubelet",
	}
	const (
		namespace0 = "test0"
		namespace2 = "test2"
	)
	const (
		seedRoot           = 0
		seedRuntime        = 100
		seedKubelet        = 200
		seedMisc           = 300
		seedPod0Infra      = 1000
		seedPod0Container0 = 2000
		seedPod0Container1 = 2001
		seedPod1Infra      = 3000
		seedPod1Container  = 4000
		seedPod2Infra      = 5000
		seedPod2Container  = 6000
	)
	const (
		pName0 = "pod0"
		pName1 = "pod1"
		pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
	)
	const (
		cName00 = "c0"
		cName01 = "c1"
		cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
		cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
	)

	prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
	prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
	prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
	infos := map[string]v2.ContainerInfo{
		"/":              summaryTestContainerInfo(seedRoot, "", "", ""),
		"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
		"/kubelet":       summaryTestContainerInfo(seedKubelet, "", "", ""),
		"/system":        summaryTestContainerInfo(seedMisc, "", "", ""),
		// Pod0 - Namespace0
		"/pod0-i":  summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
		"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
		"/pod0-c1": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
		// Pod1 - Namespace0
		"/pod1-i":  summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
		"/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
		// Pod2 - Namespace2
		"/pod2-i":  summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
		"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
	}

	rootfs := v2.FsInfo{}
	imagefs := v2.FsInfo{}

	// memory limit overrides for each container (used to test available bytes if a memory limit is known)
	memoryLimitOverrides := map[string]uint64{
		"/":        uint64(1 << 30),
		"/pod2-c0": uint64(1 << 15),
	}
	for name, memoryLimitOverride := range memoryLimitOverrides {
		info, found := infos[name]
		if !found {
			t.Errorf("No container defined with name %v", name)
		}
		info.Spec.Memory.Limit = memoryLimitOverride
		infos[name] = info
	}

	sb := &summaryBuilder{
		newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, container.ImageStats{}, infos}
	summary, err := sb.build()

	assert.NoError(t, err)
	nodeStats := summary.Node
	assert.Equal(t, "FooNode", nodeStats.NodeName)
	assert.EqualValues(t, testTime(creationTime, seedRoot).Unix(), nodeStats.StartTime.Time.Unix())
	checkCPUStats(t, "Node", seedRoot, nodeStats.CPU)
	checkMemoryStats(t, "Node", seedRoot, infos["/"], nodeStats.Memory)
	checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)

	systemSeeds := map[string]int{
		kubestats.SystemContainerRuntime: seedRuntime,
		kubestats.SystemContainerKubelet: seedKubelet,
		kubestats.SystemContainerMisc:    seedMisc,
	}
	systemContainerToNodeCgroup := map[string]string{
		kubestats.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName,
		kubestats.SystemContainerKubelet: nodeConfig.KubeletCgroupsName,
		kubestats.SystemContainerMisc:    nodeConfig.SystemCgroupsName,
	}
	for _, sys := range nodeStats.SystemContainers {
		name := sys.Name
		info := infos[systemContainerToNodeCgroup[name]]
		seed, found := systemSeeds[name]
		if !found {
			t.Errorf("Unknown SystemContainer: %q", name)
		}
		assert.EqualValues(t, testTime(creationTime, seed).Unix(), sys.StartTime.Time.Unix(), name+".StartTime")
		checkCPUStats(t, name, seed, sys.CPU)
		checkMemoryStats(t, name, seed, info, sys.Memory)
	}

	assert.Equal(t, 3, len(summary.Pods))
	indexPods := make(map[kubestats.PodReference]kubestats.PodStats, len(summary.Pods))
	for _, pod := range summary.Pods {
		indexPods[pod.PodRef] = pod
	}

	// Validate Pod0 Results
	ps, found := indexPods[prf0]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 2)
	indexCon := make(map[string]kubestats.ContainerStats, len(ps.Containers))
	for _, con := range ps.Containers {
		indexCon[con.Name] = con
	}
	con := indexCon[cName00]
	assert.EqualValues(t, testTime(creationTime, seedPod0Container0).Unix(), con.StartTime.Time.Unix())
	checkCPUStats(t, "Pod0Container0", seedPod0Container0, con.CPU)
	checkMemoryStats(t, "Pod0Conainer0", seedPod0Container0, infos["/pod0-c0"], con.Memory)

	con = indexCon[cName01]
	assert.EqualValues(t, testTime(creationTime, seedPod0Container1).Unix(), con.StartTime.Time.Unix())
	checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU)
	checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory)

	assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix())
	checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network)

	// Validate Pod1 Results
	ps, found = indexPods[prf1]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 1)
	con = ps.Containers[0]
	assert.Equal(t, cName10, con.Name)
	checkCPUStats(t, "Pod1Container0", seedPod1Container, con.CPU)
	checkMemoryStats(t, "Pod1Container0", seedPod1Container, infos["/pod1-c0"], con.Memory)
	checkNetworkStats(t, "Pod1", seedPod1Infra, ps.Network)

	// Validate Pod2 Results
	ps, found = indexPods[prf2]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 1)
	con = ps.Containers[0]
	assert.Equal(t, cName20, con.Name)
	checkCPUStats(t, "Pod2Container0", seedPod2Container, con.CPU)
	checkMemoryStats(t, "Pod2Container0", seedPod2Container, infos["/pod2-c0"], con.Memory)
	checkNetworkStats(t, "Pod2", seedPod2Infra, ps.Network)
}
コード例 #9
0
ファイル: summary_test.go プロジェクト: pault84/kubernetes
func TestBuildSummary(t *testing.T) {
	node := api.Node{}
	node.Name = "FooNode"
	nodeConfig := cm.NodeConfig{
		RuntimeCgroupsName: "/docker-daemon",
		SystemCgroupsName:  "/system",
		KubeletCgroupsName: "/kubelet",
	}
	const (
		namespace0 = "test0"
		namespace2 = "test2"
	)
	const (
		seedRoot           = 0
		seedRuntime        = 100
		seedKubelet        = 200
		seedMisc           = 300
		seedPod0Infra      = 1000
		seedPod0Container0 = 2000
		seedPod0Container1 = 2001
		seedPod1Infra      = 3000
		seedPod1Container  = 4000
		seedPod2Infra      = 5000
		seedPod2Container  = 6000
	)
	const (
		pName0 = "pod0"
		pName1 = "pod1"
		pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
	)
	const (
		cName00 = "c0"
		cName01 = "c1"
		cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
		cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
	)

	prf0 := PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
	prf1 := PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
	prf2 := PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
	infos := map[string]v2.ContainerInfo{
		"/":              summaryTestContainerInfo(seedRoot, "", "", ""),
		"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
		"/kubelet":       summaryTestContainerInfo(seedKubelet, "", "", ""),
		"/system":        summaryTestContainerInfo(seedMisc, "", "", ""),
		// Pod0 - Namespace0
		"/pod0-i":  summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
		"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
		"/pod0-c2": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
		// Pod1 - Namespace0
		"/pod1-i":  summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
		"/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
		// Pod2 - Namespace2
		"/pod2-i":  summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
		"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
	}

	rootfs := v2.FsInfo{}
	imagefs := v2.FsInfo{}

	sb := &summaryBuilder{
		newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, infos}
	summary, err := sb.build()

	assert.NoError(t, err)
	nodeStats := summary.Node
	assert.Equal(t, "FooNode", nodeStats.NodeName)
	checkCPUStats(t, "Node", seedRoot, nodeStats.CPU)
	checkMemoryStats(t, "Node", seedRoot, nodeStats.Memory)
	checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)

	systemSeeds := map[string]int{
		SystemContainerRuntime: seedRuntime,
		SystemContainerKubelet: seedKubelet,
		SystemContainerMisc:    seedMisc,
	}
	for _, sys := range nodeStats.SystemContainers {
		name := sys.Name
		seed, found := systemSeeds[name]
		if !found {
			t.Errorf("Unknown SystemContainer: %q", name)
		}
		checkCPUStats(t, name, seed, sys.CPU)
		checkMemoryStats(t, name, seed, sys.Memory)
	}

	assert.Equal(t, 3, len(summary.Pods))
	indexPods := make(map[PodReference]PodStats, len(summary.Pods))
	for _, pod := range summary.Pods {
		indexPods[pod.PodRef] = pod
	}

	// Validate Pod0 Results
	ps, found := indexPods[prf0]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 2)
	indexCon := make(map[string]ContainerStats, len(ps.Containers))
	for _, con := range ps.Containers {
		indexCon[con.Name] = con
	}
	con := indexCon[cName00]
	checkCPUStats(t, "container", seedPod0Container0, con.CPU)
	checkMemoryStats(t, "container", seedPod0Container0, con.Memory)

	con = indexCon[cName01]
	checkCPUStats(t, "container", seedPod0Container1, con.CPU)
	checkMemoryStats(t, "container", seedPod0Container1, con.Memory)

	checkNetworkStats(t, "Pod", seedPod0Infra, ps.Network)

	// Validate Pod1 Results
	ps, found = indexPods[prf1]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 1)
	con = ps.Containers[0]
	assert.Equal(t, cName10, con.Name)
	checkCPUStats(t, "container", seedPod1Container, con.CPU)
	checkMemoryStats(t, "container", seedPod1Container, con.Memory)
	checkNetworkStats(t, "Pod", seedPod1Infra, ps.Network)

	// Validate Pod2 Results
	ps, found = indexPods[prf2]
	assert.True(t, found)
	assert.Len(t, ps.Containers, 1)
	con = ps.Containers[0]
	assert.Equal(t, cName20, con.Name)
	checkCPUStats(t, "container", seedPod2Container, con.CPU)
	checkMemoryStats(t, "container", seedPod2Container, con.Memory)
	checkNetworkStats(t, "Pod", seedPod2Infra, ps.Network)
}
コード例 #10
0
func testDaemonSets(f *Framework) {
	ns := f.Namespace.Name
	c := f.Client
	simpleDSName := "simple-daemon-set"
	image := "gcr.io/google_containers/serve_hostname:1.1"
	label := map[string]string{"name": simpleDSName}
	retryTimeout := 1 * time.Minute
	retryInterval := 5 * time.Second

	Logf("Creating simple daemon set %s", simpleDSName)
	_, err := c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: simpleDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: label,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  simpleDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	defer func() {
		Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
		dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
		Expect(err).NotTo(HaveOccurred())
		_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
	}()

	By("Check that daemon pods launch on every node of the cluster.")
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

	By("Stop a daemon pod, check that the daemon pod is revived.")
	podClient := c.Pods(ns)

	podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	Expect(len(podList.Items)).To(BeNumerically(">", 0))
	pod := podList.Items[0]
	err = podClient.Delete(pod.Name, nil)
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")

	complexDSName := "complex-daemon-set"
	complexLabel := map[string]string{"name": complexDSName}
	nodeSelector := map[string]string{"color": "blue"}
	Logf("Creating daemon with a node selector %s", complexDSName)
	_, err = c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: complexDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Selector: complexLabel,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: complexLabel,
				},
				Spec: api.PodSpec{
					NodeSelector: nodeSelector,
					Containers: []api.Container{
						{
							Name:  complexDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())

	By("Initially, daemon pods should not be running on any nodes.")
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

	By("Change label of node, check that daemon pod is launched.")
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	nodeList.Items[0].Labels = nodeSelector
	var newNode *api.Node
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(&nodeList.Items[0])
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(len(newNode.Labels)).To(Equal(1))
	err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")

	By("remove the node selector and wait for daemons to be unscheduled")
	newNode, err = nodeClient.Get(newNode.Name)
	Expect(err).NotTo(HaveOccurred(), "error getting node")
	newNode.Labels = map[string]string{}
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(newNode)
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))).
		NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")

	By("We should now be able to delete the daemon set.")
	Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred())
}