コード例 #1
0
ファイル: daemon_set.go プロジェクト: vjsamuel/kubernetes
	It("should run and stop simple daemon", func() {
		label := map[string]string{daemonsetNameLabel: dsName}

		Logf("Creating simple daemon set %s", dsName)
		_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
			ObjectMeta: api.ObjectMeta{
				Name: dsName,
			},
			Spec: extensions.DaemonSetSpec{
				Template: api.PodTemplateSpec{
					ObjectMeta: api.ObjectMeta{
						Labels: label,
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
			Expect(err).NotTo(HaveOccurred())
コード例 #2
0
ファイル: clusterinfo_dump.go プロジェクト: apcera/kubernetes
func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
	var c *unversioned.Client
	var err error
	if c, err = f.Client(); err != nil {
		return err
	}
	printer, _, err := kubectl.GetPrinter("json", "")
	if err != nil {
		return err
	}

	nodes, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return err
	}

	if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil {
		return err
	}

	var namespaces []string
	if cmdutil.GetFlagBool(cmd, "all-namespaces") {
		namespaceList, err := c.Namespaces().List(api.ListOptions{})
		if err != nil {
			return err
		}
		for ix := range namespaceList.Items {
			namespaces = append(namespaces, namespaceList.Items[ix].Name)
		}
	} else {
		namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces")
		if len(namespaces) == 0 {
			cmdNamespace, _, err := f.DefaultNamespace()
			if err != nil {
				return err
			}
			namespaces = []string{
				api.NamespaceSystem,
				cmdNamespace,
			}
		}
	}
	for _, namespace := range namespaces {
		// TODO: this is repetitive in the extreme.  Use reflection or
		// something to make this a for loop.
		events, err := c.Events(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil {
			return err
		}

		rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil {
			return err
		}

		svcs, err := c.Services(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil {
			return err
		}

		sets, err := c.DaemonSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil {
			return err
		}

		deps, err := c.Deployments(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil {
			return err
		}

		rps, err := c.ReplicaSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil {
			return err
		}

		pods, err := c.Pods(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}

		if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil {
			return err
		}

		for ix := range pods.Items {
			pod := &pods.Items[ix]
			writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt"))
			writer.Write([]byte(fmt.Sprintf("==== START logs for %s/%s ====\n", pod.Namespace, pod.Name)))
			request, err := f.LogsForObject(pod, &api.PodLogOptions{})
			if err != nil {
				return err
			}

			data, err := request.DoRaw()
			if err != nil {
				return err
			}
			writer.Write(data)
			writer.Write([]byte(fmt.Sprintf("==== END logs for %s/%s ====\n", pod.Namespace, pod.Name)))
		}
	}
	dir := cmdutil.GetFlagString(cmd, "output-directory")
	if len(dir) == 0 {
		dir = "."
	}
	if dir != "-" {
		fmt.Fprintf(out, "Cluster info dumped to %s", dir)
	}
	return nil
}
コード例 #3
0
ファイル: drain.go プロジェクト: spxtr/contrib
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(
	podList []*api.Pod,
	decoder runtime.Decoder,
	skipNodesWithSystemPods bool,
	skipNodesWithLocalStorage bool,
	checkReferences bool, // Setting this to true requires client to be not-null.
	client *client.Client,
	minReplica int32) ([]*api.Pod, error) {

	pods := []*api.Pod{}

	for _, pod := range podList {
		if IsMirrorPod(pod) {
			continue
		}

		daemonsetPod := false
		replicated := false

		sr, err := CreatorRef(pod)
		if err != nil {
			return []*api.Pod{}, fmt.Errorf("failed to obtain refkind: %v", err)
		}
		refKind := ""
		if sr != nil {
			refKind = sr.Reference.Kind
		}

		if refKind == "ReplicationController" {
			if checkReferences {
				rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
				// Assume a reason for an error is because the RC is either
				// gone/missing or that the rc has too few replicas configured.
				// TODO: replace the minReplica check with pod disruption budget.
				if err == nil && rc != nil {
					if rc.Spec.Replicas < minReplica {
						return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
							pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica)
					}
					replicated = true

				} else {
					return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		} else if refKind == "DaemonSet" {
			if checkReferences {
				ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the DaemonSet is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && ds != nil {
					// Otherwise, treat daemonset-managed pods as unmanaged since
					// DaemonSet Controller currently ignores the unschedulable bit.
					// FIXME(mml): Add link to the issue concerning a proper way to drain
					// daemonset pods, probably using taints.
					daemonsetPod = true
				} else {
					return []*api.Pod{}, fmt.Errorf("deamonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				daemonsetPod = true
			}
		} else if refKind == "Job" {
			if checkReferences {
				job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the Job is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && job != nil {
					replicated = true
				} else {
					return []*api.Pod{}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		} else if refKind == "ReplicaSet" {
			if checkReferences {
				rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the RS is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && rs != nil {
					if rs.Spec.Replicas < minReplica {
						return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
							pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica)
					}
					replicated = true
				} else {
					return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
				}
			} else {
				replicated = true
			}
		}
		if daemonsetPod {
			continue
		}
		if !replicated {
			return []*api.Pod{}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name)
		}
		if pod.Namespace == "kube-system" && skipNodesWithSystemPods {
			return []*api.Pod{}, fmt.Errorf("non-deamons set, non-mirrored, kube-system pod present: %s", pod.Name)
		}
		if HasLocalStorage(pod) && skipNodesWithLocalStorage {
			return []*api.Pod{}, fmt.Errorf("pod with local storage present: %s", pod.Name)
		}
		pods = append(pods, pod)
	}
	return pods, nil
}
コード例 #4
0
ファイル: drain.go プロジェクト: timstclair/kube-contrib
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(client *client.Client, nodename string, decoder runtime.Decoder, removeUnderplicated bool,
	ignoreDeamonSet bool) (pods []api.Pod, unreplicatedPodNames []string, daemonSetPodNames []string, finalError error) {

	pods = []api.Pod{}
	unreplicatedPodNames = []string{}
	daemonSetPodNames = []string{}
	podList, err := client.Pods(api.NamespaceAll).List(api.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodename})})
	if err != nil {
		return []api.Pod{}, []string{}, []string{}, err
	}

	for _, pod := range podList.Items {
		_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
		if found {
			// Skip mirror pod
			continue
		}
		replicated := false
		daemonset_pod := false

		creatorRef, found := pod.ObjectMeta.Annotations[controller.CreatedByAnnotation]
		if found {
			// Now verify that the specified creator actually exists.
			var sr api.SerializedReference
			if err := runtime.DecodeInto(decoder, []byte(creatorRef), &sr); err != nil {
				return []api.Pod{}, []string{}, []string{}, err
			}
			if sr.Reference.Kind == "ReplicationController" {
				rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
				// Assume the only reason for an error is because the RC is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && rc != nil {
					replicated = true
				}
			} else if sr.Reference.Kind == "DaemonSet" {
				ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the DaemonSet is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && ds != nil {
					// Otherwise, treat daemonset-managed pods as unmanaged since
					// DaemonSet Controller currently ignores the unschedulable bit.
					// FIXME(mml): Add link to the issue concerning a proper way to drain
					// daemonset pods, probably using taints.
					daemonset_pod = true
				}
			} else if sr.Reference.Kind == "Job" {
				job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the Job is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && job != nil {
					replicated = true
				}
			} else if sr.Reference.Kind == "ReplicaSet" {
				rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)

				// Assume the only reason for an error is because the RS is
				// gone/missing, not for any other cause.  TODO(mml): something more
				// sophisticated than this
				if err == nil && rs != nil {
					replicated = true
				}
			}
		}

		switch {
		case daemonset_pod:
			daemonSetPodNames = append(daemonSetPodNames, pod.Name)
		case !replicated:
			unreplicatedPodNames = append(unreplicatedPodNames, pod.Name)
			if removeUnderplicated {
				pods = append(pods, pod)
			}
		default:
			pods = append(pods, pod)
		}
	}
	return pods, unreplicatedPodNames, daemonSetPodNames, nil
}