func TestStaleUpdates(t *testing.T) { pod := getTestPod() client := fake.NewSimpleClientset(pod) m := newTestManager(client) status := v1.PodStatus{Message: "initial status"} m.SetPodStatus(pod, status) status.Message = "first version bump" m.SetPodStatus(pod, status) status.Message = "second version bump" m.SetPodStatus(pod, status) verifyUpdates(t, m, 3) t.Logf("First sync pushes latest status.") m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}}, core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, }) client.ClearActions() for i := 0; i < 2; i++ { t.Logf("Next 2 syncs should be ignored (%d).", i) m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{}) } t.Log("Unchanged status should not send an update.") m.SetPodStatus(pod, status) verifyUpdates(t, m, 0) t.Log("... unless it's stale.") m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1 m.SetPodStatus(pod, status) m.testSyncBatch() verifyActions(t, m.kubeClient, []core.Action{ core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}}, core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, }) // Nothing stuck in the pipe. verifyUpdates(t, m, 0) }
// updateStatusInternal updates the internal status cache, and queues an update to the api server if // necessary. Returns whether an update was triggered. // This method IS NOT THREAD SAFE and must be called from a locked function. func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate bool) bool { var oldStatus v1.PodStatus cachedStatus, isCached := m.podStatuses[pod.UID] if isCached { oldStatus = cachedStatus.status } else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok { oldStatus = mirrorPod.Status } else { oldStatus = pod.Status } // Set ReadyCondition.LastTransitionTime. if _, readyCondition := v1.GetPodCondition(&status, v1.PodReady); readyCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() _, oldReadyCondition := v1.GetPodCondition(&oldStatus, v1.PodReady) if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { lastTransitionTime = oldReadyCondition.LastTransitionTime } readyCondition.LastTransitionTime = lastTransitionTime } // Set InitializedCondition.LastTransitionTime. if _, initCondition := v1.GetPodCondition(&status, v1.PodInitialized); initCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() _, oldInitCondition := v1.GetPodCondition(&oldStatus, v1.PodInitialized) if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { lastTransitionTime = oldInitCondition.LastTransitionTime } initCondition.LastTransitionTime = lastTransitionTime } // ensure that the start time does not change across updates. if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() { status.StartTime = oldStatus.StartTime } else if status.StartTime.IsZero() { // if the status has no start time, we need to set an initial time now := metav1.Now() status.StartTime = &now } normalizeStatus(pod, &status) // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate { glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } newStatus := versionedPodStatus{ status: status, version: cachedStatus.version + 1, podName: pod.Name, podNamespace: pod.Namespace, } m.podStatuses[pod.UID] = newStatus select { case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}: return true default: // Let the periodic syncBatch handle the update if the channel is full. // We can't block, since we hold the mutex lock. glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v", format.Pod(pod), status) return false } }