コード例 #1
0
ファイル: statefulset.go プロジェクト: nak3/kubernetes
func newStatefulSetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
	sharedIndexInformer := cache.NewSharedIndexInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Apps().StatefulSets(api.NamespaceAll).List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Apps().StatefulSets(api.NamespaceAll).Watch(internalOptions)
			},
		},
		&apps.StatefulSet{},
		resyncPeriod,
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	return sharedIndexInformer
}
コード例 #2
0
ファイル: stop.go プロジェクト: alex-mohr/kubernetes
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c.Core()}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c.Core()}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil

	case apps.Kind("StatefulSet"):
		return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
コード例 #3
0
ファイル: scale.go プロジェクト: alex-mohr/kubernetes
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c.Core()}, nil
	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetScaler{c.Extensions()}, nil
	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
	case apps.Kind("StatefulSet"):
		return &StatefulSetScaler{c.Apps()}, nil
	case extensions.Kind("Deployment"):
		return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
コード例 #4
0
ファイル: poddetail.go プロジェクト: floreks/dashboard
func toStatefulSetPodController(client k8sClient.Interface, reference api.ObjectReference, pods []api.Pod, events []api.Event, heapsterClient client.HeapsterClient) (*Controller, error) {
	statefulset, err := client.Apps().StatefulSets(reference.Namespace).Get(reference.Name)
	if err != nil {
		return nil, err
	}
	statefulsets := []apps.StatefulSet{*statefulset}

	statefulSetList := statefulsetlist.CreateStatefulSetList(statefulsets, pods, events, dataselect.StdMetricsDataSelect, &heapsterClient)
	return &Controller{
		Kind:            "StatefulSet",
		StatefulSetList: statefulSetList,
	}, nil
}
コード例 #5
0
// GetStatefulSetListChannel returns a pair of channels to a StatefulSet list and errors that
// both must be read numReads times.
func GetStatefulSetListChannel(client client.Interface,
	nsQuery *NamespaceQuery, numReads int) StatefulSetListChannel {
	channel := StatefulSetListChannel{
		List:  make(chan *apps.StatefulSetList, numReads),
		Error: make(chan error, numReads),
	}

	go func() {
		statefulSets, err := client.Apps().StatefulSets(nsQuery.ToRequestParam()).List(listEverything)
		var filteredItems []apps.StatefulSet
		for _, item := range statefulSets.Items {
			if nsQuery.Matches(item.ObjectMeta.Namespace) {
				filteredItems = append(filteredItems, item)
			}
		}
		statefulSets.Items = filteredItems
		for i := 0; i < numReads; i++ {
			channel.List <- statefulSets
			channel.Error <- err
		}
	}()

	return channel
}
コード例 #6
0
func deleteAllStatefulSets(c clientset.Interface, ns string) {
	pst := &statefulSetTester{c: c}
	psList, err := c.Apps().StatefulSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
	ExpectNoError(err)

	// Scale down each statefulset, then delete it completely.
	// Deleting a pvc without doing this will leak volumes, #25101.
	errList := []string{}
	for _, ps := range psList.Items {
		framework.Logf("Scaling statefulset %v to 0", ps.Name)
		if err := pst.scale(&ps, 0); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
		pst.waitForStatus(&ps, 0)
		framework.Logf("Deleting statefulset %v", ps.Name)
		if err := c.Apps().StatefulSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
	}

	// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
	pvNames := sets.NewString()
	// TODO: Don't assume all pvcs in the ns belong to a statefulset
	pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
		pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
			return false, nil
		}
		for _, pvc := range pvcList.Items {
			pvNames.Insert(pvc.Spec.VolumeName)
			// TODO: Double check that there are no pods referencing the pvc
			framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
			if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
				return false, nil
			}
		}
		return true, nil
	})
	if pvcPollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
	}

	pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
		pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
			return false, nil
		}
		waitingFor := []string{}
		for _, pv := range pvList.Items {
			if pvNames.Has(pv.Name) {
				waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
			}
		}
		if len(waitingFor) == 0 {
			return true, nil
		}
		framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
		return false, nil
	})
	if pollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
	}
	if len(errList) != 0 {
		ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
	}
}
コード例 #7
0
		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed {
				dumpDebugInfo(c, ns)
			}
			framework.Logf("Deleting all statefulset in ns %v", ns)
			deleteAllStatefulSets(c, ns)
		})

		It("should provide basic identity [Feature:StatefulSet]", func() {
			By("creating statefulset " + psName + " in namespace " + ns)
			petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
			podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
			ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
			setInitializedAnnotation(ps, "false")

			_, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := statefulSetTester{c: c}

			By("Saturating pet set " + ps.Name)
			pst.saturate(ps)

			By("Verifying statefulset mounted data directory is usable")
			ExpectNoError(pst.checkMount(ps, "/data"))

			By("Verifying statefulset provides a stable hostname for each pod")
			ExpectNoError(pst.checkHostname(ps))

			cmd := "echo $(hostname) > /data/hostname; sync;"
			By("Running " + cmd + " in all pets")
コード例 #8
0
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		// Check all the replication controllers.
		for _, rc := range rcList.Items {
			selector := labels.Set(rc.Spec.Selector).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// Do the same for all deployments.
		for _, rc := range deploymentList.Items {
			selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// And for pet sets.
		for _, ps := range psList.Items {
			selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}
コード例 #9
0
ファイル: petset.go プロジェクト: neujie/kubernetes
		})

		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed {
				dumpDebugInfo(c, ns)
			}
			framework.Logf("Deleting all petset in ns %v", ns)
			deleteAllPetSets(c, ns)
		})

		It("should provide basic identity [Feature:PetSet]", func() {
			By("creating petset " + psName + " in namespace " + ns)
			petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
			podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
			ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
			_, err := c.Apps().PetSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := petSetTester{c: c}

			By("Saturating pet set " + ps.Name)
			pst.saturate(ps)

			By("Verifying petset mounted data directory is usable")
			ExpectNoError(pst.checkMount(ps, "/data"))

			cmd := "echo $(hostname) > /data/hostname; sync;"
			By("Running " + cmd + " in all pets")
			ExpectNoError(pst.execInPets(ps, cmd))

			By("Restarting pet set " + ps.Name)