// restoreSpecsFromContainerLabels restores all information needed for killing a container. In some // case we may not have pod and container spec when killing a container, e.g. pod is deleted during // kubelet restart. // To solve this problem, we've already written necessary information into container labels. Here we // just need to retrieve them from container labels and restore the specs. // TODO(random-liu): Add a node e2e test to test this behaviour. // TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can // just pass the needed function not create the fake object. func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) { var pod *v1.Pod var container *v1.Container s, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { return nil, nil, err } l := getContainerInfoFromLabels(s.Labels) a := getContainerInfoFromAnnotations(s.Annotations) // Notice that the followings are not full spec. The container killing code should not use // un-restored fields. pod = &v1.Pod{ ObjectMeta: v1.ObjectMeta{ UID: l.PodUID, Name: l.PodName, Namespace: l.PodNamespace, DeletionGracePeriodSeconds: a.PodDeletionGracePeriod, }, Spec: v1.PodSpec{ TerminationGracePeriodSeconds: a.PodTerminationGracePeriod, }, } container = &v1.Container{ Name: l.ContainerName, Ports: a.ContainerPorts, TerminationMessagePath: a.TerminationMessagePath, } if a.PreStopHandler != nil { container.Lifecycle = &v1.Lifecycle{ PreStop: a.PreStopHandler, } } return pod, container, nil }
func podWithSecrets(ns, name string, toAttach secretsToAttach) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: name, }, Spec: v1.PodSpec{}, } for _, name := range toAttach.imagePullSecretNames { pod.Spec.ImagePullSecrets = append( pod.Spec.ImagePullSecrets, v1.LocalObjectReference{Name: name}) } for i, names := range toAttach.containerEnvSecretNames { container := v1.Container{ Name: fmt.Sprintf("container-%d", i), } for _, name := range names { envSource := &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ LocalObjectReference: v1.LocalObjectReference{ Name: name, }, }, } container.Env = append(container.Env, v1.EnvVar{ValueFrom: envSource}) } pod.Spec.Containers = append(pod.Spec.Containers, container) } return pod }
// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no // corresponding field in Pod, the field would not be populated. func (p *Pod) ToAPIPod() *v1.Pod { var pod v1.Pod pod.UID = p.ID pod.Name = p.Name pod.Namespace = p.Namespace for _, c := range p.Containers { var container v1.Container container.Name = c.Name container.Image = c.Image pod.Spec.Containers = append(pod.Spec.Containers, container) } return &pod }
func deepCopy_v1_Container(in v1.Container, out *v1.Container, c *conversion.Cloner) error { out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } else { out.Args = nil } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]v1.ContainerPort, len(in.Ports)) for i := range in.Ports { if err := deepCopy_v1_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil { return err } } } else { out.Ports = nil } if in.Env != nil { out.Env = make([]v1.EnvVar, len(in.Env)) for i := range in.Env { if err := deepCopy_v1_EnvVar(in.Env[i], &out.Env[i], c); err != nil { return err } } } else { out.Env = nil } if err := deepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]v1.VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := deepCopy_v1_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil { return err } } } else { out.VolumeMounts = nil } if in.LivenessProbe != nil { out.LivenessProbe = new(v1.Probe) if err := deepCopy_v1_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(v1.Probe) if err := deepCopy_v1_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(v1.Lifecycle) if err := deepCopy_v1_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = in.ImagePullPolicy if in.SecurityContext != nil { out.SecurityContext = new(v1.SecurityContext) if err := deepCopy_v1_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil }
pollInterval = time.Second * 1 ) var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() { f := framework.NewDefaultFramework("runtime-conformance") Describe("container runtime conformance blackbox test", func() { Context("when starting a container that exits", func() { It("it should run with the expected status [Conformance]", func() { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := v1.Container{ Image: "gcr.io/google_containers/busybox:1.24", VolumeMounts: []v1.VolumeMount{ { MountPath: restartCountVolumePath, Name: restartCountVolumeName, }, }, } testVolumes := []v1.Volume{ { Name: restartCountVolumeName, VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}, }, }, } testCases := []struct { Name string RestartPolicy v1.RestartPolicy
func TestProbe(t *testing.T) { prober := &prober{ refManager: kubecontainer.NewRefManager(), recorder: &record.FakeRecorder{}, } containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} execProbe := &v1.Probe{ Handler: v1.Handler{ Exec: &v1.ExecAction{}, }, } tests := []struct { probe *v1.Probe execError bool expectError bool execResult probe.Result expectedResult results.Result }{ { // No probe probe: nil, expectedResult: results.Success, }, { // No handler probe: &v1.Probe{}, expectError: true, expectedResult: results.Failure, }, { // Probe fails probe: execProbe, execResult: probe.Failure, expectedResult: results.Failure, }, { // Probe succeeds probe: execProbe, execResult: probe.Success, expectedResult: results.Success, }, { // Probe result is unknown probe: execProbe, execResult: probe.Unknown, expectedResult: results.Failure, }, { // Probe has an error probe: execProbe, execError: true, expectError: true, execResult: probe.Unknown, expectedResult: results.Failure, }, } for i, test := range tests { for _, probeType := range [...]probeType{liveness, readiness} { testID := fmt.Sprintf("%d-%s", i, probeType) testContainer := v1.Container{} switch probeType { case liveness: testContainer.LivenessProbe = test.probe case readiness: testContainer.ReadinessProbe = test.probe } if test.execError { prober.exec = fakeExecProber{test.execResult, errors.New("exec error")} } else { prober.exec = fakeExecProber{test.execResult, nil} } result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) if test.expectError && err == nil { t.Errorf("[%s] Expected probe error but no error was returned.", testID) } if !test.expectError && err != nil { t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err) } if test.expectedResult != result { t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result) } } } }