// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true. func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges { glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) changes := podContainerSpecChanges{ CreateSandbox: sandboxChanged, SandboxID: sandboxID, Attempt: attempt, ContainersToStart: make(map[int]string), ContainersToKeep: make(map[kubecontainer.ContainerID]int), InitContainersToKeep: make(map[kubecontainer.ContainerID]int), ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo), } // check the status of init containers. initFailed := false // always reset the init containers if the sandbox is changed. if !sandboxChanged { // Keep all successfully completed containers. If there are failing containers, // only keep the first failing one. initFailed = checkAndKeepInitContainers(pod, podStatus, changes.InitContainersToKeep) } changes.InitFailed = initFailed // check the status of containers. for index, container := range pod.Spec.Containers { containerStatus := podStatus.FindContainerStatusByName(container.Name) if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) glog.Info(message) changes.ContainersToStart[index] = message } continue } if sandboxChanged { if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container) glog.Info(message) changes.ContainersToStart[index] = message } continue } if initFailed { // Initialization failed and Container exists. // If we have an initialization failure everything will be killed anyway. // If RestartPolicy is Always or OnFailure we restart containers that were running before. if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) glog.V(1).Info(message) changes.ContainersToStart[index] = message } continue } expectedHash := kubecontainer.HashContainer(&container) containerChanged := containerStatus.Hash != expectedHash if containerChanged { message := fmt.Sprintf("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", pod.Name, container.Name, containerStatus.Hash, expectedHash) glog.Info(message) changes.ContainersToStart[index] = message continue } liveness, found := m.livenessManager.Get(containerStatus.ID) if !found || liveness == proberesults.Success { changes.ContainersToKeep[containerStatus.ID] = index continue } if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) glog.Info(message) changes.ContainersToStart[index] = message } } // Don't keep init containers if they are the only containers to keep. if !sandboxChanged && len(changes.ContainersToStart) == 0 && len(changes.ContainersToKeep) == 0 { changes.InitContainersToKeep = make(map[kubecontainer.ContainerID]int) } // compute containers to be killed runningContainerStatuses := podStatus.GetRunningContainerStatuses() for _, containerStatus := range runningContainerStatuses { _, keep := changes.ContainersToKeep[containerStatus.ID] _, keepInit := changes.InitContainersToKeep[containerStatus.ID] if !keep && !keepInit { var podContainer *api.Container var killMessage string for i, c := range pod.Spec.Containers { if c.Name == containerStatus.Name { podContainer = &pod.Spec.Containers[i] killMessage = changes.ContainersToStart[i] break } } changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{ name: containerStatus.Name, container: podContainer, message: killMessage, } } } return changes }
// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true. func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges { glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) changes := podContainerSpecChanges{ CreateSandbox: sandboxChanged, SandboxID: sandboxID, Attempt: attempt, ContainersToStart: make(map[int]string), ContainersToKeep: make(map[kubecontainer.ContainerID]int), ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo), } for index, container := range pod.Spec.Containers { if sandboxChanged { message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container) glog.Info(message) changes.ContainersToStart[index] = message continue } containerStatus := podStatus.FindContainerStatusByName(container.Name) if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) glog.Info(message) changes.ContainersToStart[index] = message } continue } expectedHash := kubecontainer.HashContainer(&container) containerChanged := containerStatus.Hash != expectedHash if containerChanged { message := fmt.Sprintf("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", pod.Name, container.Name, containerStatus.Hash, expectedHash) glog.Info(message) changes.ContainersToStart[index] = message continue } liveness, found := m.livenessManager.Get(containerStatus.ID) if !found || liveness == proberesults.Success { changes.ContainersToKeep[containerStatus.ID] = index continue } if pod.Spec.RestartPolicy != api.RestartPolicyNever { message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) glog.Info(message) changes.ContainersToStart[index] = message } } // compute containers that to be killed runningContainerStatues := podStatus.GetRunningContainerStatuses() for _, containerStatus := range runningContainerStatues { if _, keep := changes.ContainersToKeep[containerStatus.ID]; !keep { var podContainer *api.Container var killMessage string for i, c := range pod.Spec.Containers { if c.Name == containerStatus.Name { podContainer = &pod.Spec.Containers[i] killMessage = changes.ContainersToStart[i] break } } changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{ name: containerStatus.Name, container: podContainer, message: killMessage, } } } return changes }