示例#1
0
// WaitForRunningDeployerPod waits a given period of time until the deployer pod
// for given replication controller is not running.
func WaitForRunningDeployerPod(podClient kcoreclient.PodsGetter, rc *api.ReplicationController, timeout time.Duration) error {
	podName := DeployerPodNameForDeployment(rc.Name)
	canGetLogs := func(p *api.Pod) bool {
		return api.PodSucceeded == p.Status.Phase || api.PodFailed == p.Status.Phase || api.PodRunning == p.Status.Phase
	}
	pod, err := podClient.Pods(rc.Namespace).Get(podName)
	if err == nil && canGetLogs(pod) {
		return nil
	}
	watcher, err := podClient.Pods(rc.Namespace).Watch(
		api.ListOptions{
			FieldSelector: fields.Set{"metadata.name": podName}.AsSelector(),
		},
	)
	if err != nil {
		return err
	}

	defer watcher.Stop()
	if _, err := watch.Until(timeout, watcher, func(e watch.Event) (bool, error) {
		if e.Type == watch.Error {
			return false, fmt.Errorf("encountered error while watching for pod: %v", e.Object)
		}
		obj, isPod := e.Object.(*api.Pod)
		if !isPod {
			return false, errors.New("received unknown object while watching for pods")
		}
		return canGetLogs(obj), nil
	}); err != nil {
		return err
	}
	return nil
}
示例#2
0
// NewAcceptNewlyObservedReadyPods makes a new AcceptNewlyObservedReadyPods
// from a real client.
func NewAcceptNewlyObservedReadyPods(
	out io.Writer,
	kclient kcoreclient.PodsGetter,
	timeout time.Duration,
	interval time.Duration,
	minReadySeconds int32,
) *AcceptNewlyObservedReadyPods {

	return &AcceptNewlyObservedReadyPods{
		out:             out,
		timeout:         timeout,
		interval:        interval,
		minReadySeconds: minReadySeconds,
		acceptedPods:    sets.NewString(),
		getDeploymentPodStore: func(deployment *kapi.ReplicationController) (cache.Store, chan struct{}) {
			selector := labels.Set(deployment.Spec.Selector).AsSelector()
			store := cache.NewStore(cache.MetaNamespaceKeyFunc)
			lw := &cache.ListWatch{
				ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
					options.LabelSelector = selector
					return kclient.Pods(deployment.Namespace).List(options)
				},
				WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
					options.LabelSelector = selector
					return kclient.Pods(deployment.Namespace).Watch(options)
				},
			}
			stop := make(chan struct{})
			cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
			return store, stop
		},
	}
}
示例#3
0
// waitForPod watches the given pod until the exitCondition is true. Each two seconds
// the tick function is called e.g. for progress output.
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc, tick func(*api.Pod)) (*api.Pod, error) {
	w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: name}))
	if err != nil {
		return nil, err
	}

	pods := make(chan *api.Pod) // observed pods passed to the exitCondition
	defer close(pods)

	// wait for the first event, then start the 2 sec ticker and loop
	go func() {
		pod := <-pods
		if pod == nil {
			return
		}
		tick(pod)

		t := time.NewTicker(2 * time.Second)
		defer t.Stop()

		for {
			select {
			case pod = <-pods:
				if pod == nil {
					return
				}
			case _, ok := <-t.C:
				if !ok {
					return
				}
				tick(pod)
			}
		}
	}()

	intr := interrupt.New(nil, w.Stop)
	var result *api.Pod
	err = intr.Run(func() error {
		ev, err := watch.Until(0, w, func(ev watch.Event) (bool, error) {
			c, err := exitCondition(ev)
			if c == false && err == nil {
				pods <- ev.Object.(*api.Pod) // send to ticker
			}
			return c, err
		})
		result = ev.Object.(*api.Pod)
		return err
	})
	return result, err
}
示例#4
0
文件: run.go 项目: bryk/kubernetes
// waitForPod watches the given pod until the exitCondition is true. Each two seconds
// the tick function is called e.g. for progress output.
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition func(*api.Pod) bool, tick func(*api.Pod)) (*api.Pod, error) {
	pod, err := podClient.Pods(ns).Get(name)
	if err != nil {
		return nil, err
	}
	if exitCondition(pod) {
		return pod, nil
	}

	tick(pod)

	w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name, ResourceVersion: pod.ResourceVersion}))
	if err != nil {
		return nil, err
	}

	t := time.NewTicker(2 * time.Second)
	defer t.Stop()
	go func() {
		for range t.C {
			tick(pod)
		}
	}()

	err = nil
	result := pod
	kubectl.WatchLoop(w, func(ev watch.Event) error {
		switch ev.Type {
		case watch.Added, watch.Modified:
			pod = ev.Object.(*api.Pod)
			if exitCondition(pod) {
				result = pod
				w.Stop()
			}
		case watch.Deleted:
			w.Stop()
		case watch.Error:
			result = nil
			err = fmt.Errorf("failed to watch pod %s/%s", ns, name)
			w.Stop()
		}
		return nil
	})

	return result, err
}
示例#5
0
// GetFirstPod returns a pod matching the namespace and label selector
// and the number of all pods that match the label selector.
func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) {
	options := api.ListOptions{LabelSelector: selector}

	podList, err := client.Pods(namespace).List(options)
	if err != nil {
		return nil, 0, err
	}
	pods := []*api.Pod{}
	for i := range podList.Items {
		pod := podList.Items[i]
		pods = append(pods, &pod)
	}
	if len(pods) > 0 {
		sort.Sort(sortBy(pods))
		return pods[0], len(podList.Items), nil
	}

	// Watch until we observe a pod
	options.ResourceVersion = podList.ResourceVersion
	w, err := client.Pods(namespace).Watch(options)
	if err != nil {
		return nil, 0, err
	}
	defer w.Stop()

	condition := func(event watch.Event) (bool, error) {
		return event.Type == watch.Added || event.Type == watch.Modified, nil
	}
	event, err := watch.Until(timeout, w, condition)
	if err != nil {
		return nil, 0, err
	}
	pod, ok := event.Object.(*api.Pod)
	if !ok {
		return nil, 0, fmt.Errorf("%#v is not a pod event", event)
	}
	return pod, 1, nil
}
示例#6
0
// updatePodWithRetries retries updating the given pod on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) {
	// Deep copy the pod in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(pod)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
	}
	oldPod := obj.(*api.Pod)
	err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(pod)
		if pod, e = podClient.Pods(namespace).Update(pod); e == nil {
			return
		}
		updateErr := e
		if pod, e = podClient.Pods(namespace).Get(oldPod.Name); e != nil {
			pod = oldPod
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return pod, err
}
示例#7
0
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
	var err error
	// First, update the template label.  This ensures that any newly created pods will have the new label
	applyUpdate := func(rc *api.ReplicationController) {
		if rc.Spec.Template.Labels == nil {
			rc.Spec.Template.Labels = map[string]string{}
		}
		rc.Spec.Template.Labels[deploymentKey] = deploymentValue
	}
	if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil {
		return nil, err
	}

	// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
	// TODO: extract the code from the label command and re-use it here.
	selector := labels.SelectorFromSet(oldRc.Spec.Selector)
	options := api.ListOptions{LabelSelector: selector}
	podList, err := podClient.Pods(namespace).List(options)
	if err != nil {
		return nil, err
	}
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		applyUpdate := func(p *api.Pod) {
			if p.Labels == nil {
				p.Labels = map[string]string{
					deploymentKey: deploymentValue,
				}
			} else {
				p.Labels[deploymentKey] = deploymentValue
			}
		}
		if pod, err = updatePodWithRetries(podClient, namespace, pod, applyUpdate); err != nil {
			return nil, err
		}
	}

	if oldRc.Spec.Selector == nil {
		oldRc.Spec.Selector = map[string]string{}
	}
	// Copy the old selector, so that we can scrub out any orphaned pods
	selectorCopy := map[string]string{}
	for k, v := range oldRc.Spec.Selector {
		selectorCopy[k] = v
	}
	applyUpdate = func(rc *api.ReplicationController) {
		rc.Spec.Selector[deploymentKey] = deploymentValue
	}
	// Update the selector of the rc so it manages all the pods we updated above
	if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil {
		return nil, err
	}

	// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
	// doesn't see the update to its pod template and creates a new pod with the old labels after
	// we've finished re-adopting existing pods to the rc.
	selector = labels.SelectorFromSet(selectorCopy)
	options = api.ListOptions{LabelSelector: selector}
	podList, err = podClient.Pods(namespace).List(options)
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
			if err := podClient.Pods(namespace).Delete(pod.Name, nil); err != nil {
				return nil, err
			}
		}
	}

	return oldRc, nil
}