コード例 #1
0
ファイル: controller_utils.go プロジェクト: Xmagicer/origin
func (s ByLogging) Less(i, j int) bool {
	// 1. assigned < unassigned
	if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
		return len(s[i].Spec.NodeName) > 0
	}
	// 2. PodRunning < PodUnknown < PodPending
	m := map[api.PodPhase]int{api.PodRunning: 0, api.PodUnknown: 1, api.PodPending: 2}
	if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
		return m[s[i].Status.Phase] < m[s[j].Status.Phase]
	}
	// 3. ready < not ready
	if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
		return api.IsPodReady(s[i])
	}
	// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
	//       see https://github.com/kubernetes/kubernetes/issues/22065
	// 4. Been ready for more time < less time < empty time
	if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
		return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
	}
	// 5. Pods with containers with higher restart counts < lower restart counts
	if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
		return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
	}
	// 6. older pods < newer pods < empty timestamp pods
	if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
		return afterOrZero(s[j].CreationTimestamp, s[i].CreationTimestamp)
	}
	return false
}
コード例 #2
0
ファイル: controller_utils.go プロジェクト: Xmagicer/origin
func (s ActivePods) Less(i, j int) bool {
	// 1. Unassigned < assigned
	// If only one of the pods is unassigned, the unassigned one is smaller
	if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
		return len(s[i].Spec.NodeName) == 0
	}
	// 2. PodPending < PodUnknown < PodRunning
	m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2}
	if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
		return m[s[i].Status.Phase] < m[s[j].Status.Phase]
	}
	// 3. Not ready < ready
	// If only one of the pods is not ready, the not ready one is smaller
	if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
		return !api.IsPodReady(s[i])
	}
	// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
	//       see https://github.com/kubernetes/kubernetes/issues/22065
	// 4. Been ready for empty time < less time < more time
	// If both pods are ready, the latest ready one is smaller
	if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
		return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
	}
	// 5. Pods with containers with higher restart counts < lower restart counts
	if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
		return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
	}
	// 6. Empty creation time pods < newer pods < older pods
	if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
		return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
	}
	return false
}
コード例 #3
0
// readyPods returns the old and new ready counts for their pods.
// If a pod is observed as being ready, it's considered ready even
// if it later becomes notReady.
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
	controllers := []*api.ReplicationController{oldRc, newRc}
	oldReady := int32(0)
	newReady := int32(0)

	for i := range controllers {
		controller := controllers[i]
		selector := labels.Set(controller.Spec.Selector).AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		pods, err := r.c.Pods(controller.Namespace).List(options)
		if err != nil {
			return 0, 0, err
		}
		for _, pod := range pods.Items {
			if api.IsPodReady(&pod) {
				switch controller.Name {
				case oldRc.Name:
					oldReady++
				case newRc.Name:
					newReady++
				}
			}
		}
	}
	return oldReady, newReady, nil
}
コード例 #4
0
ファイル: container.go プロジェクト: CodeJuan/kubernetes
func (cc *ConformanceContainer) IsReady() (bool, error) {
	pod, err := cc.PodClient.Get(cc.podName)
	if err != nil {
		return false, err
	}
	return api.IsPodReady(pod), nil
}
コード例 #5
0
func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet) {
	pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
		func() (bool, error) {
			podList := p.getPodList(ps)
			if int32(len(podList.Items)) < numPets {
				framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
				return false, nil
			}
			if int32(len(podList.Items)) > numPets {
				return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
			}
			for _, p := range podList.Items {
				isReady := api.IsPodReady(&p)
				if p.Status.Phase != api.PodRunning || !isReady {
					framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
					return false, nil
				}
			}
			return true, nil
		})
	if pollErr != nil {
		framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
	}

	p.waitForStatus(ps, numPets)
}
コード例 #6
0
// pollForReadyPods polls oldRc and newRc each interval and returns the old
// and new ready counts for their pods. If a pod is observed as being ready,
// it's considered ready even if it later becomes notReady.
func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error) {
	controllers := []*api.ReplicationController{oldRc, newRc}
	oldReady := 0
	newReady := 0
	err := wait.Poll(interval, timeout, func() (done bool, err error) {
		anyReady := false
		for _, controller := range controllers {
			selector := labels.Set(controller.Spec.Selector).AsSelector()
			pods, err := r.c.Pods(controller.Namespace).List(selector, fields.Everything())
			if err != nil {
				return false, err
			}
			for _, pod := range pods.Items {
				if api.IsPodReady(&pod) {
					switch controller.Name {
					case oldRc.Name:
						oldReady++
					case newRc.Name:
						newReady++
					}
					anyReady = true
				}
			}
		}
		if anyReady {
			return true, nil
		}
		return false, nil
	})
	return oldReady, newReady, err
}
コード例 #7
0
func checkExistingRCRecovers(f *Framework) {
	By("assert that the pre-existing replication controller recovers")
	podClient := f.Client.Pods(f.Namespace.Name)
	rcSelector := labels.Set{"name": "baz"}.AsSelector()

	By("deleting pods from existing replication controller")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		if err != nil {
			Logf("apiserver returned error, as expected before recovery: %v", err)
			return false, nil
		}
		if len(pods.Items) == 0 {
			return false, nil
		}
		for _, pod := range pods.Items {
			err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
			Expect(err).NotTo(HaveOccurred())
		}
		Logf("apiserver has recovered")
		return true, nil
	}))

	By("waiting for replication controller to recover")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
				return true, nil
			}
		}
		return false, nil
	}))
}
コード例 #8
0
func (s ActivePods) Less(i, j int) bool {
	// Unassigned < assigned
	if s[i].Spec.NodeName == "" && s[j].Spec.NodeName != "" {
		return true
	}
	// PodPending < PodUnknown < PodRunning
	m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2}
	if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
		return m[s[i].Status.Phase] < m[s[j].Status.Phase]
	}
	// Not ready < ready
	if !api.IsPodReady(s[i]) && api.IsPodReady(s[j]) {
		return true
	}
	return false
}
コード例 #9
0
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
	name := ps.Name
	ns := ps.Namespace
	p.update(ns, name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count })

	var petList *api.PodList
	pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
		petList = p.getPodList(ps)
		if int32(len(petList.Items)) == count {
			return true, nil
		}
		return false, nil
	})
	if pollErr != nil {
		unhealthy := []string{}
		for _, pet := range petList.Items {
			delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, api.IsPodReady(&pet)
			if delTs != nil || phase != api.PodRunning || !readiness {
				unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
			}
		}
		return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, statefulsetTimeout, unhealthy)
	}
	return nil
}
コード例 #10
0
ファイル: controller_utils.go プロジェクト: Xmagicer/origin
func podReadyTime(pod *api.Pod) unversioned.Time {
	if api.IsPodReady(pod) {
		for _, c := range pod.Status.Conditions {
			// we only care about pod ready conditions
			if c.Type == api.PodReady && c.Status == api.ConditionTrue {
				return c.LastTransitionTime
			}
		}
	}
	return unversioned.Time{}
}
コード例 #11
0
ファイル: deployment.go プロジェクト: yghannam/kubernetes
// Returns the number of available pods corresponding to the given RCs.
func GetAvailablePodsForRCs(c client.Interface, rcs []*api.ReplicationController) (int, error) {
	// TODO: Use MinReadySeconds once https://github.com/kubernetes/kubernetes/pull/12894 is merged.
	allPods, err := getPodsForRCs(c, rcs)
	if err != nil {
		return 0, err
	}
	readyPodCount := 0
	for _, pod := range allPods {
		if api.IsPodReady(&pod) {
			readyPodCount++
		}
	}
	return readyPodCount, nil
}
コード例 #12
0
func (d *DeploymentController) scaleDown(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
	oldPodsCount := d.getReplicaCountForRCs(oldRCs)
	if oldPodsCount == 0 {
		// Cant scale down further
		return false, nil
	}
	maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
	if err != nil {
		return false, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
	}
	if isPercent {
		maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
	}
	// Check if we can scale down.
	minAvailable := deployment.Spec.Replicas - maxUnavailable
	// Find the number of ready pods.
	// TODO: Use MinReadySeconds once https://github.com/kubernetes/kubernetes/pull/12894 is merged.
	readyPodCount := 0
	allPods, err := d.getPodsForRCs(allRCs)
	for _, pod := range allPods {
		if api.IsPodReady(&pod) {
			readyPodCount++
		}
	}

	if readyPodCount <= minAvailable {
		// Cannot scale down.
		return false, nil
	}
	totalScaleDownCount := readyPodCount - minAvailable
	for _, targetRC := range oldRCs {
		if totalScaleDownCount == 0 {
			// No further scaling required.
			break
		}
		if targetRC.Spec.Replicas == 0 {
			// cannot scale down this RC.
			continue
		}
		// Scale down.
		scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount)))
		_, err = d.scaleRC(targetRC, targetRC.Spec.Replicas-scaleDownCount)
		if err != nil {
			return false, err
		}
		totalScaleDownCount -= scaleDownCount
	}
	return true, err
}
コード例 #13
0
ファイル: pet.go プロジェクト: ncdc/kubernetes
// isHealthy returns true if the pod is running and has the
// "pod.alpha.kubernetes.io/initialized" set to "true".
func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
	if pod == nil || pod.Status.Phase != api.PodRunning {
		return false
	}
	initialized, ok := pod.Annotations[PetSetInitAnnotation]
	if !ok {
		glog.Infof("PetSet pod %v in %v, waiting on annotation %v", api.PodRunning, pod.Name, PetSetInitAnnotation)
		return false
	}
	b, err := strconv.ParseBool(initialized)
	if err != nil {
		return false
	}
	return b && api.IsPodReady(pod)
}
コード例 #14
0
ファイル: conditions.go プロジェクト: jojimt/contrib
// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not
// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or
// an error in any other case.
func PodRunningAndReady(event watch.Event) (bool, error) {
	switch event.Type {
	case watch.Deleted:
		return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
	}
	switch t := event.Object.(type) {
	case *api.Pod:
		switch t.Status.Phase {
		case api.PodFailed, api.PodSucceeded:
			return false, ErrPodCompleted
		case api.PodRunning:
			return api.IsPodReady(t), nil
		}
	}
	return false, nil
}
コード例 #15
0
ファイル: tunnel.go プロジェクト: kubernetes/helm
func getFirstRunningPod(client internalversion.PodsGetter, namespace string, selector labels.Selector) (*api.Pod, error) {
	options := api.ListOptions{LabelSelector: selector}
	pods, err := client.Pods(namespace).List(options)
	if err != nil {
		return nil, err
	}
	if len(pods.Items) < 1 {
		return nil, fmt.Errorf("could not find tiller")
	}
	for _, p := range pods.Items {
		if api.IsPodReady(&p) {
			return &p, nil
		}
	}
	return nil, fmt.Errorf("could not find a ready tiller pod")
}
コード例 #16
0
ファイル: lifecycle.go プロジェクト: RomainVabre/origin
// Accept implements UpdateAcceptor.
func (c *AcceptNewlyObservedReadyPods) Accept(deployment *kapi.ReplicationController) error {
	// Make a pod store to poll and ensure it gets cleaned up.
	podStore, stopStore := c.getDeploymentPodStore(deployment)
	defer close(stopStore)

	// Start checking for pod updates.
	if c.acceptedPods.Len() > 0 {
		fmt.Fprintf(c.out, "--> Waiting up to %s for pods in deployment %s to become ready (%d pods previously accepted)\n", c.timeout, deployment.Name, c.acceptedPods.Len())
	} else {
		fmt.Fprintf(c.out, "--> Waiting up to %s for pods in deployment %s to become ready\n", c.timeout, deployment.Name)
	}
	err := wait.Poll(c.interval, c.timeout, func() (done bool, err error) {
		// Check for pod readiness.
		unready := sets.NewString()
		for _, obj := range podStore.List() {
			pod := obj.(*kapi.Pod)
			// Skip previously accepted pods; we only want to verify newly observed
			// and unaccepted pods.
			if c.acceptedPods.Has(pod.Name) {
				continue
			}
			if kapi.IsPodReady(pod) {
				// If the pod is ready, track it as accepted.
				c.acceptedPods.Insert(pod.Name)
			} else {
				// Otherwise, track it as unready.
				unready.Insert(pod.Name)
			}
		}
		// Check to see if we're done.
		if unready.Len() == 0 {
			return true, nil
		}
		// Otherwise, try again later.
		glog.V(4).Infof("Still waiting for %d pods to become ready for deployment %s", unready.Len(), deployment.Name)
		return false, nil
	})

	// Handle acceptance failure.
	if err != nil {
		if err == wait.ErrWaitTimeout {
			return fmt.Errorf("pods for deployment %q took longer than %.f seconds to become ready", deployment.Name, c.timeout.Seconds())
		}
		return fmt.Errorf("pod readiness check failed for deployment %q: %v", deployment.Name, err)
	}
	return nil
}
func desiredPodsAreReady(c unversioned.Interface, rc *api.ReplicationController, settleDuration time.Duration) wait.ConditionFunc {
	return func() (done bool, err error) {
		selector := labels.Set(rc.Spec.Selector).AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		pods, err := c.Pods(rc.Namespace).List(options)
		if err != nil {
			return false, err
		}

		var ready = 0
		var nonReady = 0

		for _, pod := range pods.Items {
			if !api.IsPodReady(&pod) {
				nonReady++
				continue
			}
			if len(pod.Status.ContainerStatuses) == 0 {
				continue
			}

			now := time.Now()
			var readyContainers = 0
			for _, c := range pod.Status.ContainerStatuses {
				if !c.Ready {
					continue
				}
				if c.State.Running == nil {
					continue
				}
				if c.State.Running.StartedAt.After(now.Add(-settleDuration)) {
					continue
				}
				readyContainers++
			}
			if readyContainers != len(pod.Spec.Containers) {
				continue
			}

			ready++
		}

		return ready == rc.Spec.Replicas && nonReady == 0, nil
	}
}
コード例 #18
0
// isHealthy returns true if the pod is ready & running. If the pod has the
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
	if pod == nil || pod.Status.Phase != api.PodRunning {
		return false
	}
	podReady := api.IsPodReady(pod)

	// User may have specified a pod readiness override through a debug annotation.
	initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
	if ok {
		if initAnnotation, err := strconv.ParseBool(initialized); err != nil {
			glog.V(4).Infof("Failed to parse %v annotation on pod %v: %v", StatefulSetInitAnnotation, pod.Name, err)
		} else if !initAnnotation {
			glog.V(4).Infof("StatefulSet pod %v waiting on annotation %v", pod.Name, StatefulSetInitAnnotation)
			podReady = initAnnotation
		}
	}
	return podReady
}
コード例 #19
0
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) error {
	glog.V(4).Infof("Updating daemon set status")
	nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
	if err != nil {
		return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
	}

	nodeList, err := dsc.nodeStore.List()
	if err != nil {
		return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
	}

	var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
	for _, node := range nodeList.Items {
		shouldRun := dsc.nodeShouldRunDaemonPod(&node, ds)

		scheduled := len(nodeToDaemonPods[node.Name]) > 0

		if shouldRun {
			desiredNumberScheduled++
			if scheduled {
				currentNumberScheduled++
				// Sort the daemon pods by creation time, so the the oldest is first.
				daemonPods, _ := nodeToDaemonPods[node.Name]
				sort.Sort(podByCreationTimestamp(daemonPods))
				if api.IsPodReady(daemonPods[0]) {
					numberReady++
				}
			}
		} else {
			if scheduled {
				numberMisscheduled++
			}
		}
	}

	err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady)
	if err != nil {
		return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
	}

	return nil
}
コード例 #20
0
func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, manageReplicasErr error) api.ReplicationControllerStatus {
	newStatus := rc.Status
	// Count the number of pods that have labels matching the labels of the pod
	// template of the replication controller, the matching pods may have more
	// labels than are in the template. Because the label of podTemplateSpec is
	// a superset of the selector of the replication controller, so the possible
	// matching pods must be part of the filteredPods.
	fullyLabeledReplicasCount := 0
	readyReplicasCount := 0
	availableReplicasCount := 0
	templateLabel := labels.Set(rc.Spec.Template.Labels).AsSelectorPreValidated()
	for _, pod := range filteredPods {
		if templateLabel.Matches(labels.Set(pod.Labels)) {
			fullyLabeledReplicasCount++
		}
		if api.IsPodReady(pod) {
			readyReplicasCount++
			if api.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) {
				availableReplicasCount++
			}
		}
	}

	failureCond := GetCondition(rc.Status, api.ReplicationControllerReplicaFailure)
	if manageReplicasErr != nil && failureCond == nil {
		var reason string
		if diff := len(filteredPods) - int(rc.Spec.Replicas); diff < 0 {
			reason = "FailedCreate"
		} else if diff > 0 {
			reason = "FailedDelete"
		}
		cond := NewReplicationControllerCondition(api.ReplicationControllerReplicaFailure, api.ConditionTrue, reason, manageReplicasErr.Error())
		SetCondition(&newStatus, cond)
	} else if manageReplicasErr == nil && failureCond != nil {
		RemoveCondition(&newStatus, api.ReplicationControllerReplicaFailure)
	}

	newStatus.Replicas = int32(len(filteredPods))
	newStatus.FullyLabeledReplicas = int32(fullyLabeledReplicasCount)
	newStatus.ReadyReplicas = int32(readyReplicasCount)
	newStatus.AvailableReplicas = int32(availableReplicasCount)
	return newStatus
}
コード例 #21
0
func countHealthyPods(pods []*api.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
Pod:
	for _, pod := range pods {
		// Pod is beeing deleted.
		if pod.DeletionTimestamp != nil {
			continue
		}
		// Pod is expected to be deleted soon.
		if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
			continue
		}
		if api.IsPodReady(pod) {
			currentHealthy++
			continue Pod
		}
	}

	return
}
コード例 #22
0
ファイル: deployment.go プロジェクト: tacy/dashboard
func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int {
	readyPodCount := 0
	for _, pod := range pods {
		if api.IsPodReady(&pod) {
			// Check if we've passed minReadySeconds since LastTransitionTime
			// If so, this pod is ready
			for _, c := range pod.Status.Conditions {
				// we only care about pod ready conditions
				if c.Type == api.PodReady {
					// 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is ready):
					// 1. minReadySeconds <= 0
					// 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time
					minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
					if minReadySeconds <= 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) {
						readyPodCount++
						break
					}
				}
			}
		}
	}
	return readyPodCount
}
コード例 #23
0
ファイル: container_probe.go プロジェクト: Q-Lee/kubernetes
		if readyTime.Sub(startedTime) < initialDelay {
			framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
		}

		restartCount := getRestartCount(p)
		Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
	})

	It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
		p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
		Consistently(func() (bool, error) {
			p, err := podClient.Get(p.Name)
			if err != nil {
				return false, err
			}
			return api.IsPodReady(p), nil
		}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")

		p, err := podClient.Get(p.Name)
		framework.ExpectNoError(err)

		isReady, err := testutils.PodRunningReady(p)
		Expect(isReady).NotTo(BeTrue(), "pod should be not ready")

		restartCount := getRestartCount(p)
		Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
	})

	It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
		runLivenessTest(f, &api.Pod{
			ObjectMeta: api.ObjectMeta{
コード例 #24
0
// syncReplicationController will sync the rc with the given key if it has had its expectations fulfilled, meaning
// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked
// concurrently with the same key.
func (rm *ReplicationManager) syncReplicationController(key string) error {
	trace := util.NewTrace("syncReplicationController: " + key)
	defer trace.LogIfLong(250 * time.Millisecond)

	startTime := time.Now()
	defer func() {
		glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime))
	}()

	if !rm.podStoreSynced() {
		// Sleep so we give the pod reflector goroutine a chance to run.
		time.Sleep(PodStoreSyncedPollPeriod)
		glog.Infof("Waiting for pods controller to sync, requeuing rc %v", key)
		rm.queue.Add(key)
		return nil
	}

	obj, exists, err := rm.rcStore.Indexer.GetByKey(key)
	if !exists {
		glog.Infof("Replication Controller has been deleted %v", key)
		rm.expectations.DeleteExpectations(key)
		return nil
	}
	if err != nil {
		return err
	}
	rc := *obj.(*api.ReplicationController)

	// Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in
	// and update the expectations after we've retrieved active pods from the store. If a new pod enters
	// the store after we've checked the expectation, the rc sync is just deferred till the next relist.
	rcKey, err := controller.KeyFunc(&rc)
	if err != nil {
		glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
		return err
	}
	trace.Step("ReplicationController restored")
	rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey)
	trace.Step("Expectations restored")

	// NOTE: filteredPods are pointing to objects from cache - if you need to
	// modify them, you need to copy it first.
	// TODO: Do the List and Filter in a single pass, or use an index.
	var filteredPods []*api.Pod
	if rm.garbageCollectorEnabled {
		// list all pods to include the pods that don't match the rc's selector
		// anymore but has the stale controller ref.
		pods, err := rm.podStore.Pods(rc.Namespace).List(labels.Everything())
		if err != nil {
			glog.Errorf("Error getting pods for rc %q: %v", key, err)
			rm.queue.Add(key)
			return err
		}
		cm := controller.NewPodControllerRefManager(rm.podControl, rc.ObjectMeta, labels.Set(rc.Spec.Selector).AsSelectorPreValidated(), getRCKind())
		matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(pods)
		for _, pod := range matchesNeedsController {
			err := cm.AdoptPod(pod)
			// continue to next pod if adoption fails.
			if err != nil {
				// If the pod no longer exists, don't even log the error.
				if !errors.IsNotFound(err) {
					utilruntime.HandleError(err)
				}
			} else {
				matchesAndControlled = append(matchesAndControlled, pod)
			}
		}
		filteredPods = matchesAndControlled
		// remove the controllerRef for the pods that no longer have matching labels
		var errlist []error
		for _, pod := range controlledDoesNotMatch {
			err := cm.ReleasePod(pod)
			if err != nil {
				errlist = append(errlist, err)
			}
		}
		if len(errlist) != 0 {
			aggregate := utilerrors.NewAggregate(errlist)
			// push the RC into work queue again. We need to try to free the
			// pods again otherwise they will stuck with the stale
			// controllerRef.
			rm.queue.Add(key)
			return aggregate
		}
	} else {
		pods, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelectorPreValidated())
		if err != nil {
			glog.Errorf("Error getting pods for rc %q: %v", key, err)
			rm.queue.Add(key)
			return err
		}
		filteredPods = controller.FilterActivePods(pods)
	}

	var manageReplicasErr error
	if rcNeedsSync && rc.DeletionTimestamp == nil {
		manageReplicasErr = rm.manageReplicas(filteredPods, &rc)
	}
	trace.Step("manageReplicas done")

	// Count the number of pods that have labels matching the labels of the pod
	// template of the replication controller, the matching pods may have more
	// labels than are in the template. Because the label of podTemplateSpec is
	// a superset of the selector of the replication controller, so the possible
	// matching pods must be part of the filteredPods.
	fullyLabeledReplicasCount := 0
	readyReplicasCount := 0
	availableReplicasCount := 0
	templateLabel := labels.Set(rc.Spec.Template.Labels).AsSelectorPreValidated()
	for _, pod := range filteredPods {
		if templateLabel.Matches(labels.Set(pod.Labels)) {
			fullyLabeledReplicasCount++
		}
		if api.IsPodReady(pod) {
			readyReplicasCount++
			if api.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) {
				availableReplicasCount++
			}
		}
	}

	// Always updates status as pods come up or die.
	if err := updateReplicaCount(
		rm.kubeClient.Core().ReplicationControllers(rc.Namespace),
		rc,
		len(filteredPods),
		fullyLabeledReplicasCount,
		readyReplicasCount,
		availableReplicasCount,
	); err != nil {
		// Multiple things could lead to this update failing.  Returning an error causes a requeue without forcing a hotloop
		return err
	}

	return manageReplicasErr
}
コード例 #25
0
func (e *EndpointController) syncService(key string) {
	startTime := time.Now()
	defer func() {
		glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
	}()

	if !e.podStoreSynced() {
		// Sleep so we give the pod reflector goroutine a chance to run.
		time.Sleep(PodStoreSyncedPollPeriod)
		glog.Infof("Waiting for pods controller to sync, requeuing service %v", key)
		e.queue.Add(key)
		return
	}

	obj, exists, err := e.serviceStore.Store.GetByKey(key)
	if err != nil || !exists {
		// Delete the corresponding endpoint, as the service has been deleted.
		// TODO: Please note that this will delete an endpoint when a
		// service is deleted. However, if we're down at the time when
		// the service is deleted, we will miss that deletion, so this
		// doesn't completely solve the problem. See #6877.
		namespace, name, err := cache.SplitMetaNamespaceKey(key)
		if err != nil {
			glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
			// Don't retry, as the key isn't going to magically become understandable.
			return
		}
		err = e.client.Endpoints(namespace).Delete(name, nil)
		if err != nil && !errors.IsNotFound(err) {
			glog.Errorf("Error deleting endpoint %q: %v", key, err)
			e.queue.Add(key) // Retry
		}
		return
	}

	service := obj.(*api.Service)
	if service.Spec.Selector == nil {
		// services without a selector receive no endpoints from this controller;
		// these services will receive the endpoints that are created out-of-band via the REST API.
		return
	}

	glog.V(5).Infof("About to update endpoints for service %q", key)
	pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
	if err != nil {
		// Since we're getting stuff from a local cache, it is
		// basically impossible to get this error.
		glog.Errorf("Error syncing service %q: %v", key, err)
		e.queue.Add(key) // Retry
		return
	}

	subsets := []api.EndpointSubset{}
	podHostNames := map[string]endpoints.HostRecord{}

	var tolerateUnreadyEndpoints bool
	if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
		b, err := strconv.ParseBool(v)
		if err == nil {
			tolerateUnreadyEndpoints = b
		} else {
			glog.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err)
		}
	}

	for i := range pods.Items {
		pod := &pods.Items[i]

		for i := range service.Spec.Ports {
			servicePort := &service.Spec.Ports[i]

			portName := servicePort.Name
			portProto := servicePort.Protocol
			portNum, err := podutil.FindPort(pod, servicePort)
			if err != nil {
				glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
				continue
			}
			if len(pod.Status.PodIP) == 0 {
				glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
				continue
			}
			if pod.DeletionTimestamp != nil {
				glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
				continue
			}

			epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
			epa := api.EndpointAddress{
				IP: pod.Status.PodIP,
				TargetRef: &api.ObjectReference{
					Kind:            "Pod",
					Namespace:       pod.ObjectMeta.Namespace,
					Name:            pod.ObjectMeta.Name,
					UID:             pod.ObjectMeta.UID,
					ResourceVersion: pod.ObjectMeta.ResourceVersion,
				}}

			hostname := getHostname(pod)
			if len(hostname) > 0 &&
				getSubdomain(pod) == service.Name &&
				service.Namespace == pod.Namespace {
				hostRecord := endpoints.HostRecord{
					HostName: hostname,
				}
				// TODO: stop populating podHostNames annotation in 1.4
				podHostNames[string(pod.Status.PodIP)] = hostRecord
				epa.Hostname = hostname
			}

			if tolerateUnreadyEndpoints || api.IsPodReady(pod) {
				subsets = append(subsets, api.EndpointSubset{
					Addresses: []api.EndpointAddress{epa},
					Ports:     []api.EndpointPort{epp},
				})
			} else {
				glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
				subsets = append(subsets, api.EndpointSubset{
					NotReadyAddresses: []api.EndpointAddress{epa},
					Ports:             []api.EndpointPort{epp},
				})
			}
		}
	}
	subsets = endpoints.RepackSubsets(subsets)

	// See if there's actually an update here.
	currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
	if err != nil {
		if errors.IsNotFound(err) {
			currentEndpoints = &api.Endpoints{
				ObjectMeta: api.ObjectMeta{
					Name:   service.Name,
					Labels: service.Labels,
				},
			}
		} else {
			glog.Errorf("Error getting endpoints: %v", err)
			e.queue.Add(key) // Retry
			return
		}
	}

	serializedPodHostNames := ""
	if len(podHostNames) > 0 {
		b, err := json.Marshal(podHostNames)
		if err != nil {
			glog.Errorf("Error updating endpoints. Marshalling of hostnames failed.: %v", err)
			e.queue.Add(key) // Retry
			return
		}
		serializedPodHostNames = string(b)
	}

	newAnnotations := make(map[string]string)
	newAnnotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
	if reflect.DeepEqual(currentEndpoints.Subsets, subsets) &&
		reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
		glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
		return
	}
	newEndpoints := currentEndpoints
	newEndpoints.Subsets = subsets
	newEndpoints.Labels = service.Labels
	if newEndpoints.Annotations == nil {
		newEndpoints.Annotations = make(map[string]string)
	}
	if len(serializedPodHostNames) == 0 {
		delete(newEndpoints.Annotations, endpoints.PodHostnamesAnnotation)
	} else {
		newEndpoints.Annotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
	}
	if len(currentEndpoints.ResourceVersion) == 0 {
		// No previous endpoints, create them
		_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)
	} else {
		// Pre-existing
		_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
	}
	if err != nil {
		glog.Errorf("Error updating endpoints: %v", err)
		e.queue.Add(key) // Retry
	}
}
コード例 #26
0
func (e *EndpointController) syncService(key string) error {
	startTime := time.Now()
	defer func() {
		glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
	}()

	obj, exists, err := e.serviceStore.Indexer.GetByKey(key)
	if err != nil || !exists {
		// Delete the corresponding endpoint, as the service has been deleted.
		// TODO: Please note that this will delete an endpoint when a
		// service is deleted. However, if we're down at the time when
		// the service is deleted, we will miss that deletion, so this
		// doesn't completely solve the problem. See #6877.
		namespace, name, err := cache.SplitMetaNamespaceKey(key)
		if err != nil {
			utilruntime.HandleError(fmt.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err))
			// Don't retry, as the key isn't going to magically become understandable.
			return nil
		}
		err = e.client.Core().Endpoints(namespace).Delete(name, nil)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
		return nil
	}

	service := obj.(*api.Service)
	if service.Spec.Selector == nil {
		// services without a selector receive no endpoints from this controller;
		// these services will receive the endpoints that are created out-of-band via the REST API.
		return nil
	}

	glog.V(5).Infof("About to update endpoints for service %q", key)
	pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
	if err != nil {
		// Since we're getting stuff from a local cache, it is
		// basically impossible to get this error.
		return err
	}

	subsets := []api.EndpointSubset{}
	podHostNames := map[string]endpoints.HostRecord{}

	var tolerateUnreadyEndpoints bool
	if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
		b, err := strconv.ParseBool(v)
		if err == nil {
			tolerateUnreadyEndpoints = b
		} else {
			utilruntime.HandleError(fmt.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err))
		}
	}

	readyEps := 0
	notReadyEps := 0
	for i := range pods {
		// TODO: Do we need to copy here?
		pod := &(*pods[i])

		for i := range service.Spec.Ports {
			servicePort := &service.Spec.Ports[i]

			portName := servicePort.Name
			portProto := servicePort.Protocol
			portNum, err := podutil.FindPort(pod, servicePort)
			if err != nil {
				glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
				continue
			}
			if len(pod.Status.PodIP) == 0 {
				glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
				continue
			}
			if pod.DeletionTimestamp != nil {
				glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
				continue
			}

			epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
			epa := api.EndpointAddress{
				IP:       pod.Status.PodIP,
				NodeName: &pod.Spec.NodeName,
				TargetRef: &api.ObjectReference{
					Kind:            "Pod",
					Namespace:       pod.ObjectMeta.Namespace,
					Name:            pod.ObjectMeta.Name,
					UID:             pod.ObjectMeta.UID,
					ResourceVersion: pod.ObjectMeta.ResourceVersion,
				}}

			hostname := getHostname(pod)
			if len(hostname) > 0 &&
				getSubdomain(pod) == service.Name &&
				service.Namespace == pod.Namespace {
				hostRecord := endpoints.HostRecord{
					HostName: hostname,
				}
				// TODO: stop populating podHostNames annotation in 1.4
				podHostNames[string(pod.Status.PodIP)] = hostRecord
				epa.Hostname = hostname
			}

			if tolerateUnreadyEndpoints || api.IsPodReady(pod) {
				subsets = append(subsets, api.EndpointSubset{
					Addresses: []api.EndpointAddress{epa},
					Ports:     []api.EndpointPort{epp},
				})
				readyEps++
			} else {
				glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
				subsets = append(subsets, api.EndpointSubset{
					NotReadyAddresses: []api.EndpointAddress{epa},
					Ports:             []api.EndpointPort{epp},
				})
				notReadyEps++
			}
		}
	}
	subsets = endpoints.RepackSubsets(subsets)

	// See if there's actually an update here.
	currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name)
	if err != nil {
		if errors.IsNotFound(err) {
			currentEndpoints = &api.Endpoints{
				ObjectMeta: api.ObjectMeta{
					Name:   service.Name,
					Labels: service.Labels,
				},
			}
		} else {
			return err
		}
	}

	serializedPodHostNames := ""
	if len(podHostNames) > 0 {
		b, err := json.Marshal(podHostNames)
		if err != nil {
			return err
		}
		serializedPodHostNames = string(b)
	}

	newAnnotations := make(map[string]string)
	newAnnotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
	if reflect.DeepEqual(currentEndpoints.Subsets, subsets) &&
		reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
		glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
		return nil
	}
	newEndpoints := currentEndpoints
	newEndpoints.Subsets = subsets
	newEndpoints.Labels = service.Labels
	if newEndpoints.Annotations == nil {
		newEndpoints.Annotations = make(map[string]string)
	}
	if len(serializedPodHostNames) == 0 {
		delete(newEndpoints.Annotations, endpoints.PodHostnamesAnnotation)
	} else {
		newEndpoints.Annotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames
	}

	glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, readyEps, notReadyEps)
	createEndpoints := len(currentEndpoints.ResourceVersion) == 0
	if createEndpoints {
		// No previous endpoints, create them
		_, err = e.client.Core().Endpoints(service.Namespace).Create(newEndpoints)
	} else {
		// Pre-existing
		_, err = e.client.Core().Endpoints(service.Namespace).Update(newEndpoints)
	}
	if err != nil {
		if createEndpoints && errors.IsForbidden(err) {
			// A request is forbidden primarily for two reasons:
			// 1. namespace is terminating, endpoint creation is not allowed by default.
			// 2. policy is misconfigured, in which case no service would function anywhere.
			// Given the frequency of 1, we log at a lower level.
			glog.V(5).Infof("Forbidden from creating endpoints: %v", err)
		}
		return err
	}
	return nil
}
コード例 #27
0
func (e *EndpointController) syncService(key string) {
	startTime := time.Now()
	defer func() {
		glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
	}()
	obj, exists, err := e.serviceStore.Store.GetByKey(key)
	if err != nil || !exists {
		// Delete the corresponding endpoint, as the service has been deleted.
		// TODO: Please note that this will delete an endpoint when a
		// service is deleted. However, if we're down at the time when
		// the service is deleted, we will miss that deletion, so this
		// doesn't completely solve the problem. See #6877.
		namespace, name, err := cache.SplitMetaNamespaceKey(key)
		if err != nil {
			glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
			// Don't retry, as the key isn't going to magically become understandable.
			return
		}
		err = e.client.Endpoints(namespace).Delete(name)
		if err != nil && !errors.IsNotFound(err) {
			glog.Errorf("Error deleting endpoint %q: %v", key, err)
			e.queue.Add(key) // Retry
		}
		return
	}

	service := obj.(*api.Service)
	if service.Spec.Selector == nil {
		// services without a selector receive no endpoints from this controller;
		// these services will receive the endpoints that are created out-of-band via the REST API.
		return
	}

	glog.V(5).Infof("About to update endpoints for service %q", key)
	pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
	if err != nil {
		// Since we're getting stuff from a local cache, it is
		// basically impossible to get this error.
		glog.Errorf("Error syncing service %q: %v", key, err)
		e.queue.Add(key) // Retry
		return
	}

	subsets := []api.EndpointSubset{}
	for i := range pods.Items {
		pod := &pods.Items[i]

		for i := range service.Spec.Ports {
			servicePort := &service.Spec.Ports[i]

			portName := servicePort.Name
			portProto := servicePort.Protocol
			portNum, err := findPort(pod, servicePort)
			if err != nil {
				glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
				continue
			}
			if len(pod.Status.PodIP) == 0 {
				glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
				continue
			}
			if pod.DeletionTimestamp != nil {
				glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
				continue
			}

			epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
			epa := api.EndpointAddress{IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{
				Kind:            "Pod",
				Namespace:       pod.ObjectMeta.Namespace,
				Name:            pod.ObjectMeta.Name,
				UID:             pod.ObjectMeta.UID,
				ResourceVersion: pod.ObjectMeta.ResourceVersion,
			}}
			if api.IsPodReady(pod) {
				subsets = append(subsets, api.EndpointSubset{
					Addresses: []api.EndpointAddress{epa},
					Ports:     []api.EndpointPort{epp},
				})
			} else {
				glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
				subsets = append(subsets, api.EndpointSubset{
					NotReadyAddresses: []api.EndpointAddress{epa},
					Ports:             []api.EndpointPort{epp},
				})
			}
		}
	}
	subsets = endpoints.RepackSubsets(subsets)

	// See if there's actually an update here.
	currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
	if err != nil {
		if errors.IsNotFound(err) {
			currentEndpoints = &api.Endpoints{
				ObjectMeta: api.ObjectMeta{
					Name:   service.Name,
					Labels: service.Labels,
				},
			}
		} else {
			glog.Errorf("Error getting endpoints: %v", err)
			e.queue.Add(key) // Retry
			return
		}
	}
	if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
		glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
		return
	}
	newEndpoints := currentEndpoints
	newEndpoints.Subsets = subsets
	newEndpoints.Labels = service.Labels

	if len(currentEndpoints.ResourceVersion) == 0 {
		// No previous endpoints, create them
		_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)
	} else {
		// Pre-existing
		_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
	}
	if err != nil {
		glog.Errorf("Error updating endpoints: %v", err)
		e.queue.Add(key) // Retry
	}
}
コード例 #28
0
		podClient = framework.Client.Pods(framework.Namespace.Name)
	})

	AfterEach(framework.afterEach)

	It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
		p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
		expectNoError(err)
		startTime := time.Now()

		Expect(wait.Poll(poll, 90*time.Second, func() (bool, error) {
			p, err := podClient.Get(p.Name)
			if err != nil {
				return false, err
			}
			ready := api.IsPodReady(p)
			if !ready {
				Logf("pod is not yet ready; pod has phase %q.", p.Status.Phase)
				return false, nil
			}
			return true, nil
		})).NotTo(HaveOccurred(), "pod never became ready")

		if time.Since(startTime) < 30*time.Second {
			Failf("Pod became ready before it's initial delay")
		}

		p, err = podClient.Get(p.Name)
		expectNoError(err)

		isReady, err := podRunningReady(p)
コード例 #29
0
ファイル: petset.go プロジェクト: Q-Lee/kubernetes
				api.ObjectMeta{
					Name:            expectedPod.Name,
					ResourceVersion: expectedPod.ResourceVersion,
				},
			))
			Expect(err).NotTo(HaveOccurred())

			By("Verifying the 2nd pod is removed only when it becomes running and ready")
			pst.restoreProbe(ps, testProbe)
			_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
				pod := event.Object.(*api.Pod)
				if event.Type == watch.Deleted && pod.Name == expectedPodName {
					return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
				}
				framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
					event.Type, pod.Name, pod.Status.Phase, api.IsPodReady(pod))
				if pod.Name != expectedPodName {
					return false, nil
				}
				if pod.Status.Phase == api.PodRunning && api.IsPodReady(pod) {
					return true, nil
				}
				return false, nil
			})
			Expect(err).NotTo(HaveOccurred())
		})

		It("Scaling should happen in predictable order and halt if any pet is unhealthy", func() {
			psLabels := klabels.Set(labels)
			By("Initializing watcher for selector " + psLabels.String())
			watcher, err := f.ClientSet.Core().Pods(ns).Watch(api.ListOptions{
コード例 #30
0
ファイル: deployments.go プロジェクト: ncdc/origin
			config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
			o.Expect(err).NotTo(o.HaveOccurred())

			selector := labels.Set(config.Spec.Selector).AsSelector()
			opts := kapi.ListOptions{LabelSelector: selector}
			ready := 0
			if err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
				pods, err := oc.KubeREST().Pods(oc.Namespace()).List(opts)
				if err != nil {
					return false, nil
				}

				ready = 0
				for i := range pods.Items {
					pod := pods.Items[i]
					if kapi.IsPodReady(&pod) {
						ready++
					}
				}

				return len(pods.Items) == ready, nil
			}); err != nil {
				o.Expect(fmt.Errorf("deployment config %q never became ready (ready: %d, desired: %d)",
					config.Name, ready, config.Spec.Replicas)).NotTo(o.HaveOccurred())
			}

			g.By("verifying that the deployment is still running")
			latestName := deployutil.DeploymentNameForConfigVersion(name, config.Status.LatestVersion)
			latest, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(latestName)
			o.Expect(err).NotTo(o.HaveOccurred())
			if deployutil.IsTerminatedDeployment(latest) {