func TestSyncPodsDoesNothing(t *testing.T) { kubelet, _, fakeDocker := newTestKubelet(t) container := api.Container{Name: "bar"} fakeDocker.ContainerList = []docker.APIContainers{ { // format is k8s_<container-id>_<pod-fullname> Names: []string{"/k8s_bar." + strconv.FormatUint(dockertools.HashContainer(&container), 16) + "_foo.new.test"}, ID: "1234", }, { // network container Names: []string{"/k8s_net_foo.new.test_"}, ID: "9876", }, } err := kubelet.SyncPods([]api.BoundPod{ { ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: "new", Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, }, Spec: api.PodSpec{ Containers: []api.Container{ container, }, }, }, }) if err != nil { t.Errorf("unexpected error: %v", err) } kubelet.drainWorkers() verifyCalls(t, fakeDocker, []string{"list", "list", "inspect_container", "inspect_container"}) }
func TestSyncPodsDoesNothing(t *testing.T) { kubelet, _, fakeDocker := newTestKubelet(t) container := api.Container{Name: "bar"} fakeDocker.ContainerList = []docker.APIContainers{ { // format is k8s--<container-id>--<pod-fullname> Names: []string{"/k8s--bar." + strconv.FormatUint(dockertools.HashContainer(&container), 16) + "--foo.test"}, ID: "1234", }, { // network container Names: []string{"/k8s--net--foo.test--"}, ID: "9876", }, } err := kubelet.SyncPods([]Pod{ { Name: "foo", Namespace: "test", Manifest: api.ContainerManifest{ ID: "foo", Containers: []api.Container{ container, }, }, }, }) if err != nil { t.Errorf("unexpected error: %v", err) } verifyCalls(t, fakeDocker, []string{"list", "list"}) }
func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.DockerContainers) error { podFullName := GetPodFullName(pod) uuid := pod.UID containersToKeep := make(map[dockertools.DockerID]empty) killedContainers := make(map[dockertools.DockerID]empty) glog.V(4).Infof("Syncing Pod, podFullName: %q, uuid: %q", podFullName, uuid) // Make sure we have a network container var netID dockertools.DockerID if netDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found { netID = dockertools.DockerID(netDockerContainer.ID) } else { glog.V(2).Infof("Network container doesn't exist for pod %q, killing and re-creating the pod", podFullName) count, err := kl.killContainersInPod(pod, dockerContainers) if err != nil { return err } netID, err = kl.createNetworkContainer(pod) if err != nil { glog.Errorf("Failed to introspect network container: %v; Skipping pod %q", err, podFullName) return err } if count > 0 { // Re-list everything, otherwise we'll think we're ok. dockerContainers, err = dockertools.GetKubeletDockerContainers(kl.dockerClient, false) if err != nil { glog.Errorf("Error listing containers %#v", dockerContainers) return err } } } containersToKeep[netID] = empty{} podVolumes, err := kl.mountExternalVolumes(pod) if err != nil { glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err) return err } podStatus := api.PodStatus{} info, err := kl.GetPodInfo(podFullName, uuid) if err != nil { glog.Errorf("Unable to get pod with name %q and uuid %q info, health checks may be invalid", podFullName, uuid) } netInfo, found := info[networkContainerName] if found { podStatus.PodIP = netInfo.PodIP } for _, container := range pod.Spec.Containers { expectedHash := dockertools.HashContainer(&container) if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uuid, container.Name); found { containerID := dockertools.DockerID(dockerContainer.ID) glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID) // look for changes in the container. if hash == 0 || hash == expectedHash { // TODO: This should probably be separated out into a separate goroutine. healthy, err := kl.healthy(podFullName, uuid, podStatus, container, dockerContainer) if err != nil { glog.V(1).Infof("health check errored: %v", err) containersToKeep[containerID] = empty{} continue } if healthy == health.Healthy { containersToKeep[containerID] = empty{} continue } glog.V(1).Infof("pod %q container %q is unhealthy. Container will be killed and re-created.", podFullName, container.Name, healthy) } else { glog.V(1).Infof("pod %q container %q hash changed (%d vs %d). Container will be killed and re-created.", podFullName, container.Name, hash, expectedHash) } if err := kl.killContainer(dockerContainer); err != nil { glog.V(1).Infof("Failed to kill container %q: %v", dockerContainer.ID, err) continue } killedContainers[containerID] = empty{} // Also kill associated network container if netContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found { if err := kl.killContainer(netContainer); err != nil { glog.V(1).Infof("Failed to kill network container %q: %v", netContainer.ID, err) continue } } } // Check RestartPolicy for container recentContainers, err := dockertools.GetRecentDockerContainersWithNameAndUUID(kl.dockerClient, podFullName, uuid, container.Name) if err != nil { glog.Errorf("Error listing recent containers with name and uuid:%s--%s--%s", podFullName, uuid, container.Name) // TODO(dawnchen): error handling here? } if len(recentContainers) > 0 && pod.Spec.RestartPolicy.Always == nil { if pod.Spec.RestartPolicy.Never != nil { glog.V(3).Infof("Already ran container with name %s--%s--%s, do nothing", podFullName, uuid, container.Name) continue } if pod.Spec.RestartPolicy.OnFailure != nil { // Check the exit code of last run if recentContainers[0].State.ExitCode == 0 { glog.V(3).Infof("Already successfully ran container with name %s--%s--%s, do nothing", podFullName, uuid, container.Name) continue } } } glog.V(3).Infof("Container with name %s--%s--%s doesn't exist, creating %#v", podFullName, uuid, container.Name, container) ref, err := containerRef(pod, &container) if err != nil { glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) } if !api.IsPullNever(container.ImagePullPolicy) { present, err := kl.dockerPuller.IsImagePresent(container.Image) latest := dockertools.RequireLatestImage(container.Image) if err != nil { if ref != nil { record.Eventf(ref, "failed", "failed", "Failed to inspect image %q", container.Image) } glog.Errorf("Failed to inspect image %q: %v; skipping pod %q container %q", container.Image, err, podFullName, container.Name) continue } if api.IsPullAlways(container.ImagePullPolicy) || (api.IsPullIfNotPresent(container.ImagePullPolicy) && (!present || latest)) { if err := kl.pullImage(container.Image, ref); err != nil { continue } } } // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container containerID, err := kl.runContainer(pod, &container, podVolumes, "container:"+string(netID)) if err != nil { // TODO(bburns) : Perhaps blacklist a container after N failures? glog.Errorf("Error running pod %q container %q: %v", podFullName, container.Name, err) continue } containersToKeep[containerID] = empty{} } // Kill any containers in this pod which were not identified above (guards against duplicates). for id, container := range dockerContainers { curPodFullName, curUUID, _, _ := dockertools.ParseDockerName(container.Names[0]) if curPodFullName == podFullName && curUUID == uuid { // Don't kill containers we want to keep or those we already killed. _, keep := containersToKeep[id] _, killed := killedContainers[id] if !keep && !killed { glog.V(1).Infof("Killing unwanted container in pod %q: %+v", curUUID, container) err = kl.killContainer(container) if err != nil { glog.Errorf("Error killing container: %v", err) } } } } return nil }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) podManager, _ := newFakePodManager() kb := &Kubelet{ rootDirectory: "/tmp/kubelet", recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, statusManager: newStatusManager(nil), containerRefManager: kubecontainer.NewRefManager(), readinessManager: kubecontainer.NewReadinessManager(), podManager: podManager, os: FakeOS{}, volumeManager: newVolumeManager(), } kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } podContainers := []docker.APIContainers{ { Names: []string{"/k8s_bar." + strconv.FormatUint(dockertools.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo_new_12345678_42"}, ID: "1234", Status: "running", }, { Names: []string{"/k8s_net_foo.new.test_abcdefgh_42"}, ID: "9876", Status: "running", }, } kb.dockerClient = &testDocker{ listContainersResults: []listContainersResult{ {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: []docker.APIContainers{}}, {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: podContainers}, {label: "list pod container", containers: podContainers}, }, inspectContainersResults: []inspectContainersResult{ { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, }, t: t, } kb.containerManager = dockertools.NewDockerManager( kb.dockerClient, kb.recorder, kb.readinessManager, kb.containerRefManager, dockertools.PodInfraContainerImage, 0, 0) kb.containerManager.Puller = &dockertools.FakeDockerPuller{} pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContainers) error { podFullName := GetPodFullName(pod) uuid := pod.Manifest.UUID containersToKeep := make(map[dockertools.DockerID]empty) killedContainers := make(map[dockertools.DockerID]empty) // Make sure we have a network container var netID dockertools.DockerID if networkDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found { netID = dockertools.DockerID(networkDockerContainer.ID) } else { glog.Infof("Network container doesn't exist, creating") count, err := kl.deleteAllContainers(pod, podFullName, dockerContainers) if err != nil { return err } dockerNetworkID, err := kl.createNetworkContainer(pod) if err != nil { glog.Errorf("Failed to introspect network container. (%v) Skipping pod %s", err, podFullName) return err } netID = dockerNetworkID if count > 0 { // relist everything, otherwise we'll think we're ok dockerContainers, err = dockertools.GetKubeletDockerContainers(kl.dockerClient) if err != nil { glog.Errorf("Error listing containers %#v", dockerContainers) return err } } } containersToKeep[netID] = empty{} podVolumes, err := kl.mountExternalVolumes(&pod.Manifest) if err != nil { glog.Errorf("Unable to mount volumes for pod %s: (%v) Skipping pod.", podFullName, err) return err } podState := api.PodState{Manifest: api.ContainerManifest{UUID: uuid}} info, err := kl.GetPodInfo(podFullName, uuid) if err != nil { glog.Errorf("Unable to get pod with name %s and uuid %s info, health checks may be invalid.", podFullName, uuid) } netInfo, found := info[networkContainerName] if found && netInfo.NetworkSettings != nil { podState.PodIP = netInfo.NetworkSettings.IPAddress } for _, container := range pod.Manifest.Containers { expectedHash := dockertools.HashContainer(&container) if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uuid, container.Name); found { containerID := dockertools.DockerID(dockerContainer.ID) glog.V(1).Infof("pod %s container %s exists as %v", podFullName, container.Name, containerID) // look for changes in the container. if hash == 0 || hash == expectedHash { // TODO: This should probably be separated out into a separate goroutine. healthy, err := kl.healthy(podFullName, podState, container, dockerContainer) if err != nil { glog.V(1).Infof("health check errored: %v", err) containersToKeep[containerID] = empty{} continue } if healthy == health.Healthy { containersToKeep[containerID] = empty{} continue } glog.V(1).Infof("pod %s container %s is unhealthy.", podFullName, container.Name, healthy) } else { glog.V(1).Infof("container hash changed %d vs %d.", hash, expectedHash) } if err := kl.killContainer(dockerContainer); err != nil { glog.V(1).Infof("Failed to kill container %s: %v", dockerContainer.ID, err) continue } killedContainers[containerID] = empty{} } // Check RestartPolicy for container recentContainers, err := dockertools.GetRecentDockerContainersWithNameAndUUID(kl.dockerClient, podFullName, uuid, container.Name) if err != nil { glog.Errorf("Error listing recent containers with name and uuid:%s--%s--%s", podFullName, uuid, container.Name) // TODO(dawnchen): error handling here? } if len(recentContainers) > 0 && pod.Manifest.RestartPolicy.Always == nil { if pod.Manifest.RestartPolicy.Never != nil { glog.Infof("Already ran container with name %s--%s--%s, do nothing", podFullName, uuid, container.Name) continue } if pod.Manifest.RestartPolicy.OnFailure != nil { // Check the exit code of last run if recentContainers[0].State.ExitCode == 0 { glog.Infof("Already successfully ran container with name %s--%s--%s, do nothing", podFullName, uuid, container.Name) continue } } } glog.Infof("Container with name %s--%s--%s doesn't exist, creating %#v", podFullName, uuid, container.Name, container) if err := kl.dockerPuller.Pull(container.Image); err != nil { glog.Errorf("Failed to pull image %s: %v skipping pod %s container %s.", container.Image, err, podFullName, container.Name) continue } // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container containerID, err := kl.runContainer(pod, &container, podVolumes, "container:"+string(netID)) if err != nil { // TODO(bburns) : Perhaps blacklist a container after N failures? glog.Errorf("Error running pod %s container %s: %v", podFullName, container.Name, err) continue } containersToKeep[containerID] = empty{} } // Kill any containers in this pod which were not identified above (guards against duplicates). for id, container := range dockerContainers { curPodFullName, curUUID, _, _ := dockertools.ParseDockerName(container.Names[0]) if curPodFullName == podFullName && curUUID == uuid { // Don't kill containers we want to keep or those we already killed. _, keep := containersToKeep[id] _, killed := killedContainers[id] if !keep && !killed { err = kl.killContainer(container) if err != nil { glog.Errorf("Error killing container: %v", err) } } } } return nil }
func TestRunOnce(t *testing.T) { kb := &Kubelet{ rootDirectory: "/tmp/kubelet", } if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } podContainers := []docker.APIContainers{ { Names: []string{"/k8s_bar." + strconv.FormatUint(dockertools.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo.new.test_12345678_42"}, ID: "1234", Status: "running", }, { Names: []string{"/k8s_net_foo.new.test_abcdefgh_42"}, ID: "9876", Status: "running", }, } kb.dockerClient = &testDocker{ listContainersResults: []listContainersResult{ {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: []docker.APIContainers{}}, {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: podContainers}, {label: "list pod container", containers: podContainers}, }, inspectContainersResults: []inspectContainersResult{ { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true}, }, }, }, t: t, } kb.dockerPuller = &dockertools.FakeDockerPuller{} results, err := kb.runOnce([]api.BoundPod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", Annotations: map[string]string{ConfigSourceAnnotationKey: "test"}, }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, }) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }