func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) { var minRuntimeCacheTime time.Time for newWork := range podUpdates { func() { defer p.checkForUpdates(newWork.pod.UID, newWork.updateCompleteFn) // We would like to have the state of Docker from at least the moment // when we finished the previous processing of that pod. if err := p.runtimeCache.ForceUpdateIfOlder(minRuntimeCacheTime); err != nil { glog.Errorf("Error updating docker cache: %v", err) return } pods, err := p.runtimeCache.GetPods() if err != nil { glog.Errorf("Error getting pods while syncing pod: %v", err) return } err = p.syncPodFn(newWork.pod, newWork.mirrorPod, kubecontainer.Pods(pods).FindPodByID(newWork.pod.UID)) if err != nil { glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err) p.recorder.Eventf(newWork.pod, "failedSync", "Error syncing pod, skipping: %v", err) return } minRuntimeCacheTime = time.Now() newWork.updateCompleteFn() }() } }
// runPod runs a single pod and wait until all containers are running. func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error { delay := retryDelay retry := 0 for { pods, err := kl.containerRuntime.GetPods(false) if err != nil { return fmt.Errorf("failed to get kubelet pods: %v", err) } p := container.Pods(pods).FindPodByID(pod.UID) running, err := kl.isPodRunning(pod, p) if err != nil { return fmt.Errorf("failed to check pod status: %v", err) } if running { glog.Infof("pod %q containers running", pod.Name) return nil } glog.Infof("pod %q containers not running: syncing", pod.Name) // We don't create mirror pods in this mode; pass a dummy boolean value // to sycnPod. if err = kl.syncPod(pod, nil, p, SyncPodUpdate); err != nil { return fmt.Errorf("error syncing pod: %v", err) } if retry >= RunOnceMaxRetries { return fmt.Errorf("timeout error: pod %q containers not running after %d retries", pod.Name, RunOnceMaxRetries) } // TODO(proppy): health checking would be better than waiting + checking the state at the next iteration. glog.Infof("pod %q containers synced, waiting for %v", pod.Name, delay) time.Sleep(delay) retry++ delay *= RunOnceRetryDelayBackoff } }
func (f *fakePodWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateComplete func()) { pods, err := f.runtimeCache.GetPods() if err != nil { f.t.Errorf("Unexpected error: %v", err) } if err := f.syncPodFn(pod, mirrorPod, kubecontainer.Pods(pods).FindPodByID(pod.UID)); err != nil { f.t.Errorf("Unexpected error: %v", err) } }
// GetPodStatus currently invokes GetPods() to return the status. // TODO(yifan): Split the get status logic from GetPods(). func (r *runtime) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { pods, err := r.GetPods(true) if err != nil { return nil, err } p := kubecontainer.Pods(pods).FindPodByID(pod.UID) if len(p.Containers) == 0 { return nil, fmt.Errorf("cannot find status for pod: %q", kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)) } return &p.Status, nil }
// runSyncPod is a helper function to retrieve the running pods from the fake // docker client and runs SyncPod for the given pod. func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod) { runningPods, err := dm.GetPods(false) if err != nil { t.Fatalf("unexpected error: %v", err) } runningPod := kubecontainer.Pods(runningPods).FindPodByID(pod.UID) podStatus, err := dm.GetPodStatus(pod) if err != nil { t.Errorf("unexpected error: %v", err) } fakeDocker.ClearCalls() err = dm.SyncPod(pod, runningPod, *podStatus, []api.Secret{}) if err != nil { t.Errorf("unexpected error: %v", err) } }