// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon func ConvertPodStatusToRunningPod(runtimeName string, podStatus *kubecontainer.PodStatus) kubecontainer.Pod { runningPod := kubecontainer.Pod{ ID: podStatus.ID, Name: podStatus.Name, Namespace: podStatus.Namespace, } for _, containerStatus := range podStatus.ContainerStatuses { if containerStatus.State != kubecontainer.ContainerStateRunning { continue } container := &kubecontainer.Container{ ID: containerStatus.ID, Name: containerStatus.Name, Image: containerStatus.Image, ImageID: containerStatus.ImageID, Hash: containerStatus.Hash, State: containerStatus.State, } runningPod.Containers = append(runningPod.Containers, container) } // Need to place a sandbox in the Pod as well. for _, sandbox := range podStatus.SandboxStatuses { runningPod.Sandboxes = append(runningPod.Sandboxes, &kubecontainer.Container{ ID: kubecontainer.ContainerID{Type: runtimeName, ID: *sandbox.Id}, State: sandboxToKubeContainerState(*sandbox.State), }) } return runningPod }
// GetPods returns a list containers group by pods. The boolean parameter // specifies whether the runtime returns all containers including those already // exited and dead containers (used for garbage collection). func (r *runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) { podInfos, err := r.hyperClient.ListPods() if err != nil { return nil, err } var kubepods []*kubecontainer.Pod for _, podInfo := range podInfos { var pod kubecontainer.Pod var containers []*kubecontainer.Container podID, podName, podNamespace, err := r.parseHyperPodFullName(podInfo.PodName) if err != nil { glog.V(5).Infof("Hyper: pod %s is not managed by kubelet", podInfo.PodName) continue } pod.ID = types.UID(podID) pod.Name = podName pod.Namespace = podNamespace for _, cinfo := range podInfo.PodInfo.Spec.Containers { var container kubecontainer.Container container.ID = kubecontainer.ContainerID{Type: typeHyper, ID: cinfo.ContainerID} container.Image = cinfo.Image for _, cstatus := range podInfo.PodInfo.Status.Status { if cstatus.ContainerID == r.buildContainerID(cinfo.ContainerID) { createAt, err := parseTimeString(cstatus.Running.StartedAt) if err == nil { container.Created = createAt.Unix() } } } _, _, _, containerName, containerHash, err := r.parseHyperContainerFullName(cinfo.Name) if err != nil { glog.V(5).Infof("Hyper: container %s is not managed by kubelet", cinfo.Name) continue } container.Name = containerName hash, err := strconv.ParseUint(containerHash, 16, 64) if err == nil { container.Hash = hash } containers = append(containers, &container) } pod.Containers = containers kubepods = append(kubepods, &pod) } return kubepods, nil }
func getContainerState(pod *kubecontainer.Pod, cid *kubecontainer.ContainerID) plegContainerState { // Default to the non-existent state. state := plegContainerNonExistent if pod == nil { return state } container := pod.FindContainerByID(*cid) if container == nil { return state } return convertState(container.State) }
func getContainerState(pod *kubecontainer.Pod, cid *kubecontainer.ContainerID) plegContainerState { // Default to the non-existent state. state := plegContainerNonExistent if pod == nil { return state } c := pod.FindContainerByID(*cid) if c != nil { return convertState(c.State) } // Search through sandboxes too. c = pod.FindSandboxByID(*cid) if c != nil { return convertState(c.State) } return state }
// makeRuntimePod constructs the container runtime pod. It will: // 1, Construct the pod by the information stored in the unit file. // 2, Construct the pod status from pod info. func (r *runtime) makeRuntimePod(unitName string, podInfos map[string]*podInfo) (*kubecontainer.Pod, error) { f, err := os.Open(path.Join(systemdServiceDir, unitName)) if err != nil { return nil, err } defer f.Close() var pod kubecontainer.Pod opts, err := unit.Deserialize(f) if err != nil { return nil, err } var rktID string for _, opt := range opts { if opt.Section != unitKubernetesSection { continue } switch opt.Name { case unitPodName: err = json.Unmarshal([]byte(opt.Value), &pod) if err != nil { return nil, err } case unitRktID: rktID = opt.Value default: return nil, fmt.Errorf("rkt: Unexpected key: %q", opt.Name) } } if len(rktID) == 0 { return nil, fmt.Errorf("rkt: cannot find rkt ID of pod %v, unit file is broken", pod) } info, found := podInfos[rktID] if !found { return nil, fmt.Errorf("rkt: cannot find info for pod %q, rkt uuid: %q", pod.Name, rktID) } pod.Status = info.toPodStatus(&pod) return &pod, nil }
func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod { var pod kubecontainer.Pod for _, c := range containers { dockerName, hash, err := ParseDockerName(c.Names[0]) if err != nil { continue } pod.Containers = append(pod.Containers, &kubecontainer.Container{ ID: kubecontainer.ContainerID{"docker", c.ID}, Name: dockerName.ContainerName, Hash: hash, Image: c.Image, }) // TODO(yifan): Only one evaluation is enough. pod.ID = dockerName.PodUID name, namespace, _ := kubecontainer.ParsePodFullName(dockerName.PodFullName) pod.Name = name pod.Namespace = namespace } return pod }
// SyncPod syncs the running pod to match the specified desired pod. func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { podFullName := kubeletUtil.FormatPodName(pod) if len(runningPod.Containers) == 0 { glog.V(4).Infof("Pod %q is not running, will start it", podFullName) return r.RunPod(pod, pullSecrets) } // Add references to all containers. unidentifiedContainers := make(map[types.UID]*kubecontainer.Container) for _, c := range runningPod.Containers { unidentifiedContainers[c.ID] = c } restartPod := false for _, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) c := runningPod.FindContainerByName(container.Name) if c == nil { if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus, r.readinessManager) { glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) // TODO(yifan): Containers in one pod are fate-sharing at this moment, see: // https://github.com/appc/spec/issues/276. restartPod = true break } continue } // TODO: check for non-root image directives. See ../docker/manager.go#SyncPod // TODO(yifan): Take care of host network change. containerChanged := c.Hash != 0 && c.Hash != expectedHash if containerChanged { glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash) restartPod = true break } result, err := r.prober.Probe(pod, podStatus, container, string(c.ID), c.Created) // TODO(vmarmol): examine this logic. if err == nil && result != probe.Success { glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result) restartPod = true break } if err != nil { glog.V(2).Infof("Probe container %q failed: %v", container.Name, err) } delete(unidentifiedContainers, c.ID) } // If there is any unidentified containers, restart the pod. if len(unidentifiedContainers) > 0 { restartPod = true } if restartPod { if err := r.KillPod(pod, runningPod); err != nil { return err } if err := r.RunPod(pod, pullSecrets); err != nil { return err } } return nil }
// SyncPod syncs the running pod to match the specified desired pod. func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, _ *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { // Add references to all containers. unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container) for _, c := range runningPod.Containers { unidentifiedContainers[c.ID] = c } restartPod := false for _, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) c := runningPod.FindContainerByName(container.Name) if c == nil { if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) { glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) // TODO(yifan): Containers in one pod are fate-sharing at this moment, see: // https://github.com/appc/spec/issues/276. restartPod = true break } continue } // TODO: check for non-root image directives. See ../docker/manager.go#SyncPod // TODO(yifan): Take care of host network change. containerChanged := c.Hash != 0 && c.Hash != expectedHash if containerChanged { glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, c.Hash, expectedHash) restartPod = true break } liveness, found := r.livenessManager.Get(c.ID) if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever { glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) restartPod = true break } delete(unidentifiedContainers, c.ID) } // If there is any unidentified containers, restart the pod. if len(unidentifiedContainers) > 0 { restartPod = true } if restartPod { // Kill the pod only if the pod is actually running. if len(runningPod.Containers) > 0 { if err := r.KillPod(pod, runningPod); err != nil { return err } } if err := r.RunPod(pod, pullSecrets); err != nil { return err } } return nil }
// Syncs the running pod into the desired pod. func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error { podFullName := r.buildHyperPodFullName(string(pod.UID), string(pod.Name), string(pod.Namespace)) if len(runningPod.Containers) == 0 { glog.V(4).Infof("Pod %q is not running, will start it", podFullName) return r.RunPod(pod, pullSecrets) } // Add references to all containers. unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container) for _, c := range runningPod.Containers { unidentifiedContainers[c.ID] = c } restartPod := false for _, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) c := runningPod.FindContainerByName(container.Name) if c == nil { if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus) { glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) restartPod = true break } continue } containerChanged := c.Hash != 0 && c.Hash != expectedHash if containerChanged { glog.V(4).Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash) restartPod = true break } liveness, found := r.livenessManager.Get(c.ID) if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever { glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name) restartPod = true break } delete(unidentifiedContainers, c.ID) } // If there is any unidentified containers, restart the pod. if len(unidentifiedContainers) > 0 { restartPod = true } if restartPod { if err := r.KillPod(nil, runningPod); err != nil { glog.Errorf("Hyper: kill pod %s failed, error: %s", runningPod.Name, err) return err } if err := r.RunPod(pod, pullSecrets); err != nil { glog.Errorf("Hyper: run pod %s failed, error: %s", pod.Name, err) return err } } return nil }
// GetPods returns a list containers group by pods. The boolean parameter // specifies whether the runtime returns all containers including those already // exited and dead containers (used for garbage collection). func (r *runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) { podInfos, err := r.hyperClient.ListPods() if err != nil { return nil, err } var kubepods []*kubecontainer.Pod for _, podInfo := range podInfos { var pod kubecontainer.Pod var containers []*kubecontainer.Container if !all && podInfo.Status != StatusRunning { continue } podID := podInfo.PodInfo.Spec.Labels[KEY_API_POD_UID] podName, podNamespace, err := kubecontainer.ParsePodFullName(podInfo.PodName) if err != nil { glog.V(5).Infof("Hyper: pod %s is not managed by kubelet", podInfo.PodName) continue } pod.ID = types.UID(podID) pod.Name = podName pod.Namespace = podNamespace for _, cinfo := range podInfo.PodInfo.Spec.Containers { var container kubecontainer.Container container.ID = kubecontainer.ContainerID{Type: typeHyper, ID: cinfo.ContainerID} container.Image = cinfo.Image for _, cstatus := range podInfo.PodInfo.Status.ContainerStatus { if cstatus.ContainerID == cinfo.ContainerID { switch cstatus.Phase { case StatusRunning: container.State = kubecontainer.ContainerStateRunning default: container.State = kubecontainer.ContainerStateExited } // harryz: container.Created is moved to ContainerStatus // createAt, err := parseTimeString(cstatus.Running.StartedAt) // if err == nil { // container.Created = createAt.Unix() // } } } _, _, _, containerName, _, containerHash, err := r.parseHyperContainerFullName(cinfo.Name) if err != nil { glog.V(5).Infof("Hyper: container %s is not managed by kubelet", cinfo.Name) continue } container.Name = containerName hash, err := strconv.ParseUint(containerHash, 16, 64) if err == nil { container.Hash = hash } containers = append(containers, &container) } pod.Containers = containers kubepods = append(kubepods, &pod) } return kubepods, nil }