コード例 #1
0
ファイル: node.go プロジェクト: fwalker/dashboard
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
	client *client.Client,
	hostName string,
	slaveAttrLabels,
	annotations map[string]string,
) (*api.Node, error) {
	n := api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: hostName,
		},
		Spec: api.NodeSpec{
			ExternalID: hostName,
		},
		Status: api.NodeStatus{
			Phase: api.NodePending,
		},
	}

	n.Labels = mergeMaps(
		map[string]string{"kubernetes.io/hostname": hostName},
		slaveAttrLabels,
	)

	n.Annotations = annotations

	// try to create
	return client.Nodes().Create(&n)
}
コード例 #2
0
ファイル: node.go プロジェクト: Clarifai/kubernetes
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
	client unversionedcore.NodesGetter,
	hostName string,
	slaveAttrLabels,
	annotations map[string]string,
) (*api.Node, error) {
	n := api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: hostName,
		},
		Spec: api.NodeSpec{
			ExternalID: hostName,
		},
		Status: api.NodeStatus{
			Phase: api.NodePending,
			// WORKAROUND(sttts): make sure that the Ready condition is the
			// first one. The kube-ui v3 depends on this assumption.
			// TODO(sttts): remove this workaround when kube-ui v4 is used or we
			//              merge this with the statusupdate in the controller manager.
			Conditions: []api.NodeCondition{
				{
					Type:              api.NodeReady,
					Status:            api.ConditionTrue,
					Reason:            slaveReadyReason,
					Message:           slaveReadyMessage,
					LastHeartbeatTime: unversioned.Now(),
				},
			},
		},
	}

	n.Labels = mergeMaps(
		map[string]string{"kubernetes.io/hostname": hostName},
		slaveAttrLabels,
	)

	n.Annotations = annotations

	// try to create
	return client.Nodes().Create(&n)
}
コード例 #3
0
func testDaemonSets(f *Framework) {
	ns := f.Namespace.Name
	c := f.Client
	simpleDSName := "simple-daemon-set"
	image := "gcr.io/google_containers/serve_hostname:1.1"
	label := map[string]string{"name": simpleDSName}
	retryTimeout := 1 * time.Minute
	retryInterval := 5 * time.Second

	Logf("Creating simple daemon set %s", simpleDSName)
	_, err := c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: simpleDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: label,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  simpleDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	defer func() {
		Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
		dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
		Expect(err).NotTo(HaveOccurred())
		_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
	}()

	By("Check that daemon pods launch on every node of the cluster.")
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

	By("Stop a daemon pod, check that the daemon pod is revived.")
	podClient := c.Pods(ns)

	podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	Expect(len(podList.Items)).To(BeNumerically(">", 0))
	pod := podList.Items[0]
	err = podClient.Delete(pod.Name, nil)
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")

	complexDSName := "complex-daemon-set"
	complexLabel := map[string]string{"name": complexDSName}
	nodeSelector := map[string]string{"color": "blue"}
	Logf("Creating daemon with a node selector %s", complexDSName)
	_, err = c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: complexDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Selector: complexLabel,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: complexLabel,
				},
				Spec: api.PodSpec{
					NodeSelector: nodeSelector,
					Containers: []api.Container{
						{
							Name:  complexDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())

	By("Initially, daemon pods should not be running on any nodes.")
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

	By("Change label of node, check that daemon pod is launched.")
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	nodeList.Items[0].Labels = nodeSelector
	var newNode *api.Node
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(&nodeList.Items[0])
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(len(newNode.Labels)).To(Equal(1))
	err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")

	By("remove the node selector and wait for daemons to be unscheduled")
	newNode, err = nodeClient.Get(newNode.Name)
	Expect(err).NotTo(HaveOccurred(), "error getting node")
	newNode.Labels = map[string]string{}
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(newNode)
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))).
		NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")

	By("We should now be able to delete the daemon set.")
	Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred())
}