// PortForward executes socat in the pod's network namespace and copies // data between stream (representing the user's local connection on their // computer) and the specified port in the container. // // TODO: // - match cgroups of container // - should we support nsenter + socat on the host? (current impl) // - should we support nsenter + socat in a container, running with elevated privs and --pid=host? func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { podInfraContainer := pod.FindContainerByName(PodInfraContainerName) if podInfraContainer == nil { return fmt.Errorf("cannot find pod infra container in pod %q", kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)) } container, err := dm.client.InspectContainer(string(podInfraContainer.ID)) if err != nil { return err } if !container.State.Running { return fmt.Errorf("container not running (%s)", container) } containerPid := container.State.Pid // TODO what if the host doesn't have it??? _, lookupErr := exec.LookPath("socat") if lookupErr != nil { return fmt.Errorf("Unable to do port forwarding: socat not found.") } args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "socat", "-", fmt.Sprintf("TCP4:localhost:%d", port)} // TODO use exec.LookPath command := exec.Command("nsenter", args...) command.Stdin = stream command.Stdout = stream return command.Run() }
// isPodRunning returns true if all containers of a manifest are running. func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) { for _, container := range pod.Spec.Containers { c := runningPod.FindContainerByName(container.Name) if c == nil { glog.Infof("container %q not found", container.Name) return false, nil } inspectResult, err := kl.dockerClient.InspectContainer(string(c.ID)) if err != nil { glog.Infof("failed to inspect container %q: %v", container.Name, err) return false, err } if !inspectResult.State.Running { glog.Infof("container %q not running: %#v", container.Name, inspectResult.State) return false, nil } } return true, nil }
// SyncPod syncs the running pod to match the specified desired pod. func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret) error { podFullName := kubecontainer.GetPodFullName(pod) if len(runningPod.Containers) == 0 { glog.V(4).Infof("Pod %q is not running, will start it", podFullName) return r.RunPod(pod) } // Add references to all containers. unidentifiedContainers := make(map[types.UID]*kubecontainer.Container) for _, c := range runningPod.Containers { unidentifiedContainers[c.ID] = c } restartPod := false for _, container := range pod.Spec.Containers { expectedHash := kubecontainer.HashContainer(&container) c := runningPod.FindContainerByName(container.Name) if c == nil { if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus, r.readinessManager) { glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) // TODO(yifan): Containers in one pod are fate-sharing at this moment, see: // https://github.com/appc/spec/issues/276. restartPod = true break } continue } // TODO(yifan): Take care of host network change. containerChanged := c.Hash != 0 && c.Hash != expectedHash if containerChanged { glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash) restartPod = true break } result, err := r.prober.Probe(pod, podStatus, container, string(c.ID), c.Created) // TODO(vmarmol): examine this logic. if err == nil && result != probe.Success { glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result) restartPod = true break } if err != nil { glog.V(2).Infof("Probe container %q failed: %v", container.Name, err) } delete(unidentifiedContainers, c.ID) } // If there is any unidentified containers, restart the pod. if len(unidentifiedContainers) > 0 { restartPod = true } if restartPod { // TODO(yifan): Handle network plugin. if err := r.KillPod(runningPod); err != nil { return err } if err := r.RunPod(pod); err != nil { return err } } return nil }