Beispiel #1
0
func waitForNodeReady() {
	const (
		// nodeReadyTimeout is the time to wait for node to become ready.
		nodeReadyTimeout = 2 * time.Minute
		// nodeReadyPollInterval is the interval to check node ready.
		nodeReadyPollInterval = 1 * time.Second
	)
	config, err := framework.LoadConfig()
	Expect(err).NotTo(HaveOccurred())
	client, err := clientset.NewForConfig(config)
	Expect(err).NotTo(HaveOccurred())
	Eventually(func() error {
		nodes, err := client.Nodes().List(api.ListOptions{})
		Expect(err).NotTo(HaveOccurred())
		if nodes == nil {
			return fmt.Errorf("the node list is nil.")
		}
		Expect(len(nodes.Items) > 1).NotTo(BeTrue())
		if len(nodes.Items) == 0 {
			return fmt.Errorf("empty node list: %+v", nodes)
		}
		node := nodes.Items[0]
		if !api.IsNodeReady(&node) {
			return fmt.Errorf("node is not ready: %+v", node)
		}
		return nil
	}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}
Beispiel #2
0
func nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
	// Check if the node satisfies the daemon set's node selector.
	nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
	shouldRun := nodeSelector.Matches(labels.Set(node.Labels))
	// If the daemon set specifies a node name, check that it matches with node.Name.
	shouldRun = shouldRun && (ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name)
	// If the node is not ready, don't run on it.
	// TODO(mikedanese): remove this once daemonpods forgive nodes
	shouldRun = shouldRun && api.IsNodeReady(node)

	// If the node is unschedulable, don't run it
	// TODO(mikedanese): remove this once we have the right node admitance levels.
	// See https://github.com/kubernetes/kubernetes/issues/17297#issuecomment-156857375.
	shouldRun = shouldRun && !node.Spec.Unschedulable
	return shouldRun
}
Beispiel #3
0
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
	// Check if the node satisfies the daemon set's node selector.
	nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
	if !nodeSelector.Matches(labels.Set(node.Labels)) {
		return false
	}
	// If the daemon set specifies a node name, check that it matches with node.Name.
	if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
		return false
	}
	// If the node is not ready, don't run on it.
	// TODO(mikedanese): remove this once daemonpods forgive nodes
	if !api.IsNodeReady(node) {
		return false
	}

	for _, c := range node.Status.Conditions {
		if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue {
			return false
		}
	}

	newPod := &api.Pod{Spec: ds.Spec.Template.Spec}
	newPod.Spec.NodeName = node.Name
	pods := []*api.Pod{newPod}

	for _, m := range dsc.podStore.Store.List() {
		pod := m.(*api.Pod)
		if pod.Spec.NodeName != node.Name {
			continue
		}
		pods = append(pods, pod)
	}
	_, notFittingCPU, notFittingMemory := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable)
	if len(notFittingCPU)+len(notFittingMemory) != 0 {
		return false
	}
	ports := sets.String{}
	for _, pod := range pods {
		if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
			return false
		}
	}
	return true
}
func waitForNodeReady() {
	const (
		// nodeReadyTimeout is the time to wait for node to become ready.
		nodeReadyTimeout = 2 * time.Minute
		// nodeReadyPollInterval is the interval to check node ready.
		nodeReadyPollInterval = 1 * time.Second
	)
	client, err := getAPIServerClient()
	Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
	Eventually(func() error {
		node, err := getNode(client)
		if err != nil {
			return fmt.Errorf("failed to get node: %v", err)
		}
		if !api.IsNodeReady(node) {
			return fmt.Errorf("node is not ready: %+v", node)
		}
		return nil
	}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}
Beispiel #5
0
func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Clientset, error) {
	adminClientConfig, err := clientcmd.NewDefaultClientConfig(
		*adminConfig,
		&clientcmd.ConfigOverrides{},
	).ClientConfig()
	if err != nil {
		return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err)
	}

	fmt.Println("<master/apiclient> created API client configuration")

	client, err := clientset.NewForConfig(adminClientConfig)
	if err != nil {
		return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err)
	}

	fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready")

	start := time.Now()
	wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
		cs, err := client.ComponentStatuses().List(api.ListOptions{})
		if err != nil {
			return false, nil
		}
		// TODO(phase2) must revisit this when we implement HA
		if len(cs.Items) < 3 {
			fmt.Println("<master/apiclient> not all control plane components are ready yet")
			return false, nil
		}
		for _, item := range cs.Items {
			for _, condition := range item.Conditions {
				if condition.Type != api.ComponentHealthy {
					fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
					return false, nil
				}
			}
		}

		fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
		return true, nil
	})

	fmt.Println("<master/apiclient> waiting for at least one node to register and become ready")
	start = time.Now()
	wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
		nodeList, err := client.Nodes().List(api.ListOptions{})
		if err != nil {
			fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)")
			return false, nil
		}
		if len(nodeList.Items) < 1 {
			return false, nil
		}
		n := &nodeList.Items[0]
		if !api.IsNodeReady(n) {
			fmt.Println("<master/apiclient> first node has registered, but is not ready yet")
			return false, nil
		}

		fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds())
		return true, nil
	})

	return client, nil
}
Beispiel #6
0
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
	// Find out which nodes are running the daemon pods selected by ds.
	nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
	if err != nil {
		glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err)
	}

	// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
	// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
	nodeList, err := dsc.nodeStore.List()
	if err != nil {
		glog.Errorf("Couldn't get list of nodes when syncing daemon set %+v: %v", ds, err)
	}
	var nodesNeedingDaemonPods, podsToDelete []string
	for i, node := range nodeList.Items {
		// Check if the node satisfies the daemon set's node selector.
		nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
		shouldRun := nodeSelector.Matches(labels.Set(nodeList.Items[i].Labels))
		// If the daemon set specifies a node name, check that it matches with nodeName.
		nodeName := nodeList.Items[i].Name
		shouldRun = shouldRun && (ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == nodeName)

		// If the node is not ready, don't run on it.
		// TODO(mikedanese): remove this once daemonpods forgive nodes
		shouldRun = shouldRun && api.IsNodeReady(&node)

		daemonPods, isRunning := nodeToDaemonPods[nodeName]
		if shouldRun && !isRunning {
			// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
			nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodeName)
		} else if shouldRun && len(daemonPods) > 1 {
			// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
			// Sort the daemon pods by creation time, so the the oldest is preserved.
			sort.Sort(podByCreationTimestamp(daemonPods))
			for i := 1; i < len(daemonPods); i++ {
				podsToDelete = append(podsToDelete, daemonPods[i].Name)
			}
		} else if !shouldRun && isRunning {
			// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
			for i := range daemonPods {
				podsToDelete = append(podsToDelete, daemonPods[i].Name)
			}
		}
	}

	// We need to set expectations before creating/deleting pods to avoid race conditions.
	dsKey, err := controller.KeyFunc(ds)
	if err != nil {
		glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
		return
	}
	dsc.expectations.SetExpectations(dsKey, len(nodesNeedingDaemonPods), len(podsToDelete))

	glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v", ds.Name, nodesNeedingDaemonPods)
	for i := range nodesNeedingDaemonPods {
		if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[i], ds.Namespace, ds.Spec.Template, ds); err != nil {
			glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
			dsc.expectations.CreationObserved(dsKey)
			util.HandleError(err)
		}
	}

	glog.V(4).Infof("Pods to delete for daemon set %s: %+v", ds.Name, podsToDelete)
	for i := range podsToDelete {
		if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[i]); err != nil {
			glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
			dsc.expectations.DeletionObserved(dsKey)
			util.HandleError(err)
		}
	}
}