// PortForward executes socat in the pod's network namespace and copies // data between stream (representing the user's local connection on their // computer) and the specified port in the container. // // TODO: // - match cgroups of container // - should we support nsenter + socat on the host? (current impl) // - should we support nsenter + socat in a container, running with elevated privs and --pid=host? func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { podInfraContainer := pod.FindContainerByName(PodInfraContainerName) if podInfraContainer == nil { return fmt.Errorf("cannot find pod infra container in pod %q", kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)) } container, err := dm.client.InspectContainer(string(podInfraContainer.ID)) if err != nil { return err } if !container.State.Running { return fmt.Errorf("container not running (%s)", container) } containerPid := container.State.Pid // TODO what if the host doesn't have it??? _, lookupErr := exec.LookPath("socat") if lookupErr != nil { return fmt.Errorf("Unable to do port forwarding: socat not found.") } args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "socat", "-", fmt.Sprintf("TCP4:localhost:%d", port)} // TODO use exec.LookPath command := exec.Command("nsenter", args...) command.Stdin = stream command.Stdout = stream return command.Run() }
// isPodRunning returns true if all containers of a manifest are running. func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) { for _, container := range pod.Spec.Containers { c := runningPod.FindContainerByName(container.Name) if c == nil { glog.Infof("container %q not found", container.Name) return false, nil } inspectResult, err := kl.dockerClient.InspectContainer(string(c.ID)) if err != nil { glog.Infof("failed to inspect container %q: %v", container.Name, err) return false, err } if !inspectResult.State.Running { glog.Infof("container %q not running: %#v", container.Name, inspectResult.State) return false, nil } } return true, nil }
// makeRuntimePod constructs the container runtime pod. It will: // 1, Construct the pod by the information stored in the unit file. // 2, Construct the pod status from pod info. func (r *runtime) makeRuntimePod(unitName string, podInfos map[string]*podInfo) (*kubecontainer.Pod, error) { f, err := os.Open(path.Join(systemdServiceDir, unitName)) if err != nil { return nil, err } defer f.Close() var pod kubecontainer.Pod opts, err := unit.Deserialize(f) if err != nil { return nil, err } var rktID string for _, opt := range opts { if opt.Section != unitKubernetesSection { continue } switch opt.Name { case unitPodName: err = json.Unmarshal([]byte(opt.Value), &pod) if err != nil { return nil, err } case unitRktID: rktID = opt.Value default: return nil, fmt.Errorf("rkt: Unexpected key: %q", opt.Name) } } if len(rktID) == 0 { return nil, fmt.Errorf("rkt: cannot find rkt ID of pod %v, unit file is broken", pod) } info, found := podInfos[rktID] if !found { return nil, fmt.Errorf("rkt: cannot find info for pod %q, rkt uuid: %q", pod.Name, rktID) } pod.Status = info.toPodStatus(&pod) return &pod, nil }
func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod { var pod kubecontainer.Pod for _, c := range containers { dockerName, hash, err := ParseDockerName(c.Names[0]) if err != nil { continue } pod.Containers = append(pod.Containers, &kubecontainer.Container{ ID: types.UID(c.ID), Name: dockerName.ContainerName, Hash: hash, Image: c.Image, }) // TODO(yifan): Only one evaluation is enough. pod.ID = dockerName.PodUID name, namespace, _ := kubecontainer.ParsePodFullName(dockerName.PodFullName) pod.Name = name pod.Namespace = namespace } return pod }
// SyncPod syncs the running pod to match the specified desired pod. func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret) error { podFullName := kubecontainer.GetPodFullName(pod) if len(runningPod.Containers) == 0 { glog.V(4).Infof("Pod %q is not running, will start it", podFullName) return r.RunPod(pod) } // Add references to all containers. unidentifiedContainers := make(map[types.UID]*kubecontainer.Container) for _, c := range runningPod.Containers { unidentifiedContainers[c.ID] = c } restartPod := false for _, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) c := runningPod.FindContainerByName(container.Name) if c == nil { if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus, r.readinessManager) { glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) // TODO(yifan): Containers in one pod are fate-sharing at this moment, see: // https://github.com/appc/spec/issues/276. restartPod = true break } continue } // TODO(yifan): Take care of host network change. containerChanged := c.Hash != 0 && c.Hash != expectedHash if containerChanged { glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash) restartPod = true break } result, err := r.prober.Probe(pod, podStatus, container, string(c.ID), c.Created) // TODO(vmarmol): examine this logic. if err == nil && result != probe.Success { glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result) restartPod = true break } if err != nil { glog.V(2).Infof("Probe container %q failed: %v", container.Name, err) } delete(unidentifiedContainers, c.ID) } // If there is any unidentified containers, restart the pod. if len(unidentifiedContainers) > 0 { restartPod = true } if restartPod { // TODO(yifan): Handle network plugin. if err := r.KillPod(runningPod); err != nil { return err } if err := r.RunPod(pod); err != nil { return err } } return nil }