Ejemplo n.º 1
0
// WaitForReplicaSetUpdated polls the replica set until it is updated.
func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error {
	return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
		rs, err := c.Extensions().ReplicaSets(namespace).Get(name)
		if err != nil {
			return false, err
		}
		return rs.Status.ObservedGeneration >= desiredGeneration, nil
	})
}
Ejemplo n.º 2
0
// WaitForPodsHashPopulated polls the replica set until updated and fully labeled.
func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error {
	return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
		rs, err := c.Extensions().ReplicaSets(namespace).Get(name)
		if err != nil {
			return false, err
		}
		return rs.Status.ObservedGeneration >= desiredGeneration &&
			rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
	})
}
Ejemplo n.º 3
0
func getDNSReplicas(c clientset.Interface) (int, error) {
	label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: KubeDNSLabelName}))
	listOpts := v1.ListOptions{LabelSelector: label.String()}
	deployments, err := c.Extensions().Deployments(DNSNamespace).List(listOpts)
	if err != nil {
		return 0, err
	}
	Expect(len(deployments.Items)).Should(Equal(1))

	deployment := deployments.Items[0]
	return int(*(deployment.Spec.Replicas)), nil
}
Ejemplo n.º 4
0
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod, ns string) {
	rsClient := clientSet.Extensions().ReplicaSets(ns)
	podClient := clientSet.Core().Pods(ns)
	for _, rs := range rss {
		if _, err := rsClient.Create(rs); err != nil {
			t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
		}
	}
	for _, pod := range pods {
		if _, err := podClient.Create(pod); err != nil {
			t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
		}
	}
}
Ejemplo n.º 5
0
func getDNSReplicas(c clientset.Interface) (int, error) {
	label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
	listOpts := v1.ListOptions{LabelSelector: label.String()}
	deployments, err := c.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
	if err != nil {
		return 0, err
	}
	if len(deployments.Items) != 1 {
		return 0, fmt.Errorf("expected 1 DNS deployment, got %v", len(deployments.Items))
	}

	deployment := deployments.Items[0]
	return int(*(deployment.Spec.Replicas)), nil
}
Ejemplo n.º 6
0
// listReplicaSets lists all RSes the given deployment targets with the given client interface.
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
	return ListReplicaSets(deployment,
		func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
			rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
			if err != nil {
				return nil, err
			}
			ret := []*extensions.ReplicaSet{}
			for i := range rsList.Items {
				ret = append(ret, &rsList.Items[i])
			}
			return ret, err
		})
}
Ejemplo n.º 7
0
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
	rsClient := clientSet.Extensions().ReplicaSets(ns)
	if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
		updatedRS, err := rsClient.Get(rs.Name)
		if err != nil {
			return false, err
		}
		if updatedRS.Status.Replicas != *rs.Spec.Replicas {
			return false, nil
		} else {
			return true, nil
		}
	}); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 8
0
// installThirdParty installs a third party resoure and returns a defer func
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
	var err error
	_, err = client.Extensions().ThirdPartyResources().Create(tpr)
	if err != nil {
		t.Fatal(err)
	}

	fooClientConfig := *clientConfig
	fooClientConfig.APIPath = "apis"
	fooClientConfig.GroupVersion = &schema.GroupVersion{Group: group, Version: version}
	fooClient, err := restclient.RESTClientFor(&fooClientConfig)
	if err != nil {
		t.Fatal(err)
	}

	err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
		_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
		if err == nil {
			return true, nil
		}
		if apierrors.IsNotFound(err) {
			return false, nil
		}

		return false, err
	})
	if err != nil {
		t.Fatal(err)
	}

	return func() {
		client.Extensions().ThirdPartyResources().Delete(tpr.Name, nil)
		err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
			_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
			if apierrors.IsNotFound(err) {
				return true, nil
			}

			return false, err
		})
		if err != nil {
			t.Fatal(err)
		}
	}
}
Ejemplo n.º 9
0
// verifyRemainingObjects verifies if the number of the remaining replica
// sets and pods are rsNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
	rsClient := clientSet.Extensions().ReplicaSets(namespace)
	podClient := clientSet.Core().Pods(namespace)
	pods, err := podClient.List(v1.ListOptions{})
	if err != nil {
		return false, fmt.Errorf("Failed to list pods: %v", err)
	}
	var ret = true
	if len(pods.Items) != podNum {
		ret = false
		t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
	}
	rss, err := rsClient.List(v1.ListOptions{})
	if err != nil {
		return false, fmt.Errorf("Failed to list replica sets: %v", err)
	}
	if len(rss.Items) != rsNum {
		ret = false
		t.Logf("expect %d RSs, got %d RSs", rsNum, len(rss.Items))
	}
	return ret, nil
}
Ejemplo n.º 10
0
	It("should run and stop simple daemon", func() {
		label := map[string]string{daemonsetNameLabel: dsName}

		framework.Logf("Creating simple daemon set %s", dsName)
		_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
			ObjectMeta: v1.ObjectMeta{
				Name: dsName,
			},
			Spec: extensions.DaemonSetSpec{
				Template: v1.PodTemplateSpec{
					ObjectMeta: v1.ObjectMeta{
						Labels: label,
					},
					Spec: v1.PodSpec{
						Containers: []v1.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []v1.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
			Expect(err).NotTo(HaveOccurred())
Ejemplo n.º 11
0
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(
	podList []*apiv1.Pod,
	decoder runtime.Decoder,
	deleteAll bool,
	skipNodesWithSystemPods bool,
	skipNodesWithLocalStorage bool,
	checkReferences bool, // Setting this to true requires client to be not-null.
	client client.Interface,
	minReplica int32) ([]*apiv1.Pod, error) {

	pods := []*apiv1.Pod{}

	for _, pod := range podList {
		if IsMirrorPod(pod) {
			continue
		}

		daemonsetPod := false
		replicated := false

		sr, err := CreatorRef(pod)
		if err != nil {
			return []*apiv1.Pod{}, fmt.Errorf("failed to obtain refkind: %v", err)
		}
		refKind := ""
		if sr != nil {
			refKind = sr.Reference.Kind
		}

		if refKind == "ReplicationController" {
			if checkReferences {
				rc, err := client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
				// Assume a reason for an error is because the RC is either
				// gone/missing or that the rc has too few replicas configured.
				// TODO: replace the minReplica check with pod disruption budget.
				if err == nil && rc != nil {
					if rc.Spec.Replicas != nil && *rc.Spec.Replicas < minReplica {
						return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
							pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica)
					}
					replicated = true

				} else {
					return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		} else if refKind == "DaemonSet" {
			if checkReferences {
				ds, err := client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the DaemonSet is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && ds != nil {
					// Otherwise, treat daemonset-managed pods as unmanaged since
					// DaemonSet Controller currently ignores the unschedulable bit.
					// FIXME(mml): Add link to the issue concerning a proper way to drain
					// daemonset pods, probably using taints.
					daemonsetPod = true
				} else {
					return []*apiv1.Pod{}, fmt.Errorf("deamonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				daemonsetPod = true
			}
		} else if refKind == "Job" {
			if checkReferences {
				job, err := client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the Job is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && job != nil {
					replicated = true
				} else {
					return []*apiv1.Pod{}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		} else if refKind == "ReplicaSet" {
			if checkReferences {
				rs, err := client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the RS is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && rs != nil {
					if rs.Spec.Replicas != nil && *rs.Spec.Replicas < minReplica {
						return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
							pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica)
					}
					replicated = true
				} else {
					return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		}
		if daemonsetPod {
			continue
		}
		if !deleteAll {
			if !replicated {
				return []*apiv1.Pod{}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name)
			}
			if pod.Namespace == "kube-system" && skipNodesWithSystemPods {
				return []*apiv1.Pod{}, fmt.Errorf("non-deamons set, non-mirrored, kube-system pod present: %s", pod.Name)
			}
			if HasLocalStorage(pod) && skipNodesWithLocalStorage {
				return []*apiv1.Pod{}, fmt.Errorf("pod with local storage present: %s", pod.Name)
			}
		}
		pods = append(pods, pod)
	}
	return pods, nil
}
Ejemplo n.º 12
0
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
		options := v1.ListOptions{LabelSelector: selector.String()}
		deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		// Check all the replication controllers.
		for _, rc := range rcList.Items {
			selector := labels.Set(rc.Spec.Selector).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// Do the same for all deployments.
		for _, rc := range deploymentList.Items {
			selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// And for pet sets.
		for _, ps := range psList.Items {
			selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
			options := v1.ListOptions{LabelSelector: selector.String()}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}