示例#1
0
func TestSyncPodsDoesNothing(t *testing.T) {
	dm, fakeDocker := newTestDockerManager()
	container := api.Container{Name: "bar"}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				container,
			},
		},
	}
	fakeDocker.SetFakeRunningContainers([]*docker.Container{
		{
			ID:   "1234",
			Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0",
		},
		{
			ID:   "9876",
			Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
		}})

	runSyncPod(t, dm, fakeDocker, pod, nil, false)

	verifyCalls(t, fakeDocker, []string{
		// Check the pod infra container.
		"inspect_container",
	})
}
示例#2
0
// newContainerAnnotations creates container annotations from v1.Container and v1.Pod.
func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int) map[string]string {
	annotations := map[string]string{}
	annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
	annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath

	if pod.DeletionGracePeriodSeconds != nil {
		annotations[podDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
	}
	if pod.Spec.TerminationGracePeriodSeconds != nil {
		annotations[podTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
	}

	if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
		// Using json enconding so that the PreStop handler object is readable after writing as a label
		rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
		if err != nil {
			glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
		} else {
			annotations[containerPreStopHandlerLabel] = string(rawPreStop)
		}
	}

	if len(container.Ports) > 0 {
		rawContainerPorts, err := json.Marshal(container.Ports)
		if err != nil {
			glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
		} else {
			annotations[containerPortsLabel] = string(rawContainerPorts)
		}
	}

	return annotations
}
示例#3
0
func TestLabels(t *testing.T) {
	restartCount := 5
	container := &api.Container{
		Name: "test_container",
		TerminationMessagePath: "/tmp",
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      "test_pod",
			Namespace: "test_pod_namespace",
			UID:       "test_pod_uid",
		},
	}
	expected := &labelledContainerInfo{
		PodName:                pod.Name,
		PodNamespace:           pod.Namespace,
		PodUID:                 pod.UID,
		Name:                   container.Name,
		Hash:                   strconv.FormatUint(kubecontainer.HashContainer(container), 16),
		RestartCount:           restartCount,
		TerminationMessagePath: container.TerminationMessagePath,
	}

	labels := newLabels(container, pod, restartCount)
	containerInfo, err := getContainerInfoFromLabel(labels)
	if err != nil {
		t.Errorf("Unexpected error when getContainerInfoFromLabel: %v", err)
	}
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}
}
示例#4
0
func newLabels(container *api.Container, pod *api.Pod, restartCount int) map[string]string {
	labels := map[string]string{}
	labels[kubernetesPodNameLabel] = pod.Name
	labels[kubernetesPodNamespaceLabel] = pod.Namespace
	labels[kubernetesPodUIDLabel] = string(pod.UID)
	if pod.DeletionGracePeriodSeconds != nil {
		labels[kubernetesPodDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
	}
	if pod.Spec.TerminationGracePeriodSeconds != nil {
		labels[kubernetesPodTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
	}

	labels[kubernetesContainerNameLabel] = container.Name
	labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
	labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
	if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
		// Using json enconding so that the PreStop handler object is readable after writing as a label
		rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
		if err != nil {
			glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
		} else {
			labels[kubernetesContainerPreStopHandlerLabel] = string(rawPreStop)
		}
	}

	return labels
}
示例#5
0
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) string {
	containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	return fmt.Sprintf("%s_%s_%s_%s_%08x",
		containerNamePrefix,
		containerName,
		dockerName.PodFullName,
		dockerName.PodUID,
		rand.Uint32())
}
示例#6
0
func (r *runtime) buildHyperContainerFullName(uid, podName, namespace, containerName string, container api.Container) string {
	return fmt.Sprintf("%s_%s_%s_%s_%s_%s",
		hyperContainerNamePrefix,
		uid,
		podName,
		namespace,
		containerName,
		strconv.FormatUint(kubecontainer.HashContainer(&container), 16))
}
示例#7
0
文件: docker.go 项目: copejon/origin
// Creates a name which can be reversed to identify both full pod name and container name.
// This function returns stable name, unique name and a unique id.
// Although rand.Uint32() is not really unique, but it's enough for us because error will
// only occur when instances of the same container in the same pod have the same UID. The
// chance is really slim.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string, string) {
	containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	stableName := fmt.Sprintf("%s_%s_%s_%s",
		containerNamePrefix,
		containerName,
		dockerName.PodFullName,
		dockerName.PodUID)
	UID := fmt.Sprintf("%08x", rand.Uint32())
	return stableName, fmt.Sprintf("%s_%s", stableName, UID), UID
}
示例#8
0
func (r *Runtime) newAppcRuntimeApp(pod *api.Pod, c api.Container, pullSecrets []api.Secret) (*appcschema.RuntimeApp, []kubecontainer.PortMapping, error) {
	if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
		return nil, nil, err
	}
	imgManifest, err := r.getImageManifest(c.Image)
	if err != nil {
		return nil, nil, err
	}

	if imgManifest.App == nil {
		imgManifest.App = new(appctypes.App)
	}

	imageID, err := r.getImageID(c.Image)
	if err != nil {
		return nil, nil, err
	}
	hash, err := appctypes.NewHash(imageID)
	if err != nil {
		return nil, nil, err
	}

	opts, err := r.generator.GenerateRunContainerOptions(pod, &c)
	if err != nil {
		return nil, nil, err
	}

	if err := setApp(imgManifest.App, &c, opts); err != nil {
		return nil, nil, err
	}

	name, err := appctypes.SanitizeACName(c.Name)
	if err != nil {
		return nil, nil, err
	}
	appName := appctypes.MustACName(name)

	kubehash := kubecontainer.HashContainer(&c)

	return &appcschema.RuntimeApp{
		Name:  *appName,
		Image: appcschema.RuntimeImage{ID: *hash},
		App:   imgManifest.App,
		Annotations: []appctypes.Annotation{
			{
				Name:  *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
				Value: strconv.FormatUint(kubehash, 10),
			},
		},
	}, opts.PortMappings, nil
}
示例#9
0
func newLabels(container *api.Container, pod *api.Pod, restartCount int) map[string]string {
	// TODO(random-liu): Move more label initialization here
	labels := map[string]string{}
	labels[kubernetesPodNameLabel] = pod.Name
	labels[kubernetesPodNamespaceLabel] = pod.Namespace
	labels[kubernetesPodUIDLabel] = string(pod.UID)

	labels[kubernetesContainerNameLabel] = container.Name
	labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
	labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath

	return labels
}
示例#10
0
func generatePodInfraContainerHash(pod *api.Pod) uint64 {
	var ports []api.ContainerPort
	if pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork {
		for _, container := range pod.Spec.Containers {
			ports = append(ports, container.Ports...)
		}
	}

	container := &api.Container{
		Name:            PodInfraContainerName,
		Image:           kubetypes.PodInfraContainerImage,
		Ports:           ports,
		ImagePullPolicy: podInfraContainerImagePullPolicy,
	}
	return kubecontainer.HashContainer(container)
}
示例#11
0
// apiPodToruntimePod converts an api.Pod to kubelet/container.Pod.
// we save the this for later reconstruction of the kubelet/container.Pod
// such as in GetPods().
func apiPodToRuntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod {
	p := &kubecontainer.Pod{
		ID:        pod.UID,
		Name:      pod.Name,
		Namespace: pod.Namespace,
	}
	for i := range pod.Spec.Containers {
		c := &pod.Spec.Containers[i]
		p.Containers = append(p.Containers, &kubecontainer.Container{
			ID:      types.UID(buildContainerID(&containerID{uuid, c.Name})),
			Name:    c.Name,
			Image:   c.Image,
			Hash:    kubecontainer.HashContainer(c),
			Created: time.Now().Unix(),
		})
	}
	return p
}
示例#12
0
func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCustomMetrics bool) map[string]string {
	labels := map[string]string{}
	labels[types.KubernetesPodNameLabel] = pod.Name
	labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
	labels[types.KubernetesPodUIDLabel] = string(pod.UID)
	if pod.DeletionGracePeriodSeconds != nil {
		labels[kubernetesPodDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
	}
	if pod.Spec.TerminationGracePeriodSeconds != nil {
		labels[kubernetesPodTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
	}

	labels[types.KubernetesContainerNameLabel] = container.Name
	labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
	labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
	if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
		// Using json enconding so that the PreStop handler object is readable after writing as a label
		rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
		if err != nil {
			glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
		} else {
			labels[kubernetesContainerPreStopHandlerLabel] = string(rawPreStop)
		}
	}
	if len(container.Ports) > 0 {
		rawContainerPorts, err := json.Marshal(container.Ports)
		if err != nil {
			glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
		} else {
			labels[kubernetesContainerPortsLabel] = string(rawContainerPorts)
		}
	}
	if enableCustomMetrics {
		path, err := custommetrics.GetCAdvisorCustomMetricsDefinitionPath(container)
		if path != nil && err == nil {
			labels[cadvisorPrometheusMetricsLabel] = *path
		}
	}

	return labels
}
示例#13
0
文件: rkt.go 项目: Tlacenka/origin
// TODO(yifan): Remove the receiver once we can solve the appName->imageID problem.
func (r *runtime) apiPodToruntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod {
	p := &kubecontainer.Pod{
		ID:        pod.UID,
		Name:      pod.Name,
		Namespace: pod.Namespace,
	}
	for i := range pod.Spec.Containers {
		c := &pod.Spec.Containers[i]
		img, err := r.getImageByName(c.Image)
		if err != nil {
			glog.Warningf("rkt: Cannot get image for %q: %v", c.Image, err)
		}
		p.Containers = append(p.Containers, &kubecontainer.Container{
			ID:      types.UID(buildContainerID(&containerID{uuid, c.Name, img.id})),
			Name:    c.Name,
			Image:   c.Image,
			Hash:    kubecontainer.HashContainer(c),
			Created: time.Now().Unix(),
		})
	}
	return p
}
示例#14
0
func TestContainerAnnotations(t *testing.T) {
	restartCount := 5
	deletionGracePeriod := int64(10)
	terminationGracePeriod := int64(10)
	lifecycle := &api.Lifecycle{
		// Left PostStart as nil
		PreStop: &api.Handler{
			Exec: &api.ExecAction{
				Command: []string{"action1", "action2"},
			},
			HTTPGet: &api.HTTPGetAction{
				Path:   "path",
				Host:   "host",
				Port:   intstr.FromInt(8080),
				Scheme: "scheme",
			},
			TCPSocket: &api.TCPSocketAction{
				Port: intstr.FromString("80"),
			},
		},
	}
	containerPorts := []api.ContainerPort{
		{
			Name:          "http",
			HostPort:      80,
			ContainerPort: 8080,
			Protocol:      api.ProtocolTCP,
		},
		{
			Name:          "https",
			HostPort:      443,
			ContainerPort: 6443,
			Protocol:      api.ProtocolTCP,
		},
	}
	container := &api.Container{
		Name:  "test_container",
		Ports: containerPorts,
		TerminationMessagePath: "/somepath",
		Lifecycle:              lifecycle,
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      "test_pod",
			Namespace: "test_pod_namespace",
			UID:       "test_pod_uid",
			DeletionGracePeriodSeconds: &deletionGracePeriod,
		},
		Spec: api.PodSpec{
			Containers:                    []api.Container{*container},
			TerminationGracePeriodSeconds: &terminationGracePeriod,
		},
	}
	expected := &annotatedContainerInfo{
		ContainerPorts:            containerPorts,
		PodDeletionGracePeriod:    pod.DeletionGracePeriodSeconds,
		PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
		Hash:                   kubecontainer.HashContainer(container),
		RestartCount:           restartCount,
		TerminationMessagePath: container.TerminationMessagePath,
		PreStopHandler:         container.Lifecycle.PreStop,
	}

	// Test whether we can get right information from label
	annotations := newContainerAnnotations(container, pod, restartCount)
	containerInfo := getContainerInfoFromAnnotations(annotations)
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}

	// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
	// the information got from annotations should also be nil
	container.Lifecycle = nil
	pod.DeletionGracePeriodSeconds = nil
	pod.Spec.TerminationGracePeriodSeconds = nil
	expected.PodDeletionGracePeriod = nil
	expected.PodTerminationGracePeriod = nil
	expected.PreStopHandler = nil
	// Because container is changed, the Hash should be updated
	expected.Hash = kubecontainer.HashContainer(container)
	annotations = newContainerAnnotations(container, pod, restartCount)
	containerInfo = getContainerInfoFromAnnotations(annotations)
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}
}
示例#15
0
文件: rkt.go 项目: knobunc/kubernetes
// SyncPod syncs the running pod to match the specified desired pod.
func (r *Runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	// TODO: (random-liu) Stop using running pod in SyncPod()
	// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
	// we may stop using apiPodStatus someday.
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				// TODO(yifan): Containers in one pod are fate-sharing at this moment, see:
				// https://github.com/appc/spec/issues/276.
				restartPod = true
				break
			}
			continue
		}

		// TODO: check for non-root image directives.  See ../docker/manager.go#SyncPod

		// TODO(yifan): Take care of host network change.
		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		// Kill the pod only if the pod is actually running.
		if len(runningPod.Containers) > 0 {
			if err := r.KillPod(pod, runningPod); err != nil {
				return err
			}
		}
		if err := r.RunPod(pod, pullSecrets); err != nil {
			return err
		}
	}
	return nil
}
示例#16
0
func TestLabels(t *testing.T) {
	restartCount := 5
	deletionGracePeriod := int64(10)
	terminationGracePeriod := int64(10)
	lifecycle := &v1.Lifecycle{
		// Left PostStart as nil
		PreStop: &v1.Handler{
			Exec: &v1.ExecAction{
				Command: []string{"action1", "action2"},
			},
			HTTPGet: &v1.HTTPGetAction{
				Path:   "path",
				Host:   "host",
				Port:   intstr.FromInt(8080),
				Scheme: "scheme",
			},
			TCPSocket: &v1.TCPSocketAction{
				Port: intstr.FromString("80"),
			},
		},
	}
	containerPorts := []v1.ContainerPort{
		{
			Name:          "http",
			HostPort:      80,
			ContainerPort: 8080,
			Protocol:      v1.ProtocolTCP,
		},
		{
			Name:          "https",
			HostPort:      443,
			ContainerPort: 6443,
			Protocol:      v1.ProtocolTCP,
		},
	}
	container := &v1.Container{
		Name:  "test_container",
		Ports: containerPorts,
		TerminationMessagePath: "/somepath",
		Lifecycle:              lifecycle,
	}
	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Name:      "test_pod",
			Namespace: "test_pod_namespace",
			UID:       "test_pod_uid",
			DeletionGracePeriodSeconds: &deletionGracePeriod,
		},
		Spec: v1.PodSpec{
			Containers:                    []v1.Container{*container},
			TerminationGracePeriodSeconds: &terminationGracePeriod,
		},
	}
	expected := &labelledContainerInfo{
		PodName:                   pod.Name,
		PodNamespace:              pod.Namespace,
		PodUID:                    pod.UID,
		PodDeletionGracePeriod:    pod.DeletionGracePeriodSeconds,
		PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
		Name:                   container.Name,
		Hash:                   strconv.FormatUint(kubecontainer.HashContainer(container), 16),
		RestartCount:           restartCount,
		TerminationMessagePath: container.TerminationMessagePath,
		PreStopHandler:         container.Lifecycle.PreStop,
		Ports:                  containerPorts,
	}

	// Test whether we can get right information from label
	labels := newLabels(container, pod, restartCount, false)
	containerInfo := getContainerInfoFromLabel(labels)
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}

	// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
	// the information got from label should also be nil
	container.Lifecycle = nil
	pod.DeletionGracePeriodSeconds = nil
	pod.Spec.TerminationGracePeriodSeconds = nil
	expected.PodDeletionGracePeriod = nil
	expected.PodTerminationGracePeriod = nil
	expected.PreStopHandler = nil
	// Because container is changed, the Hash should be updated
	expected.Hash = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	labels = newLabels(container, pod, restartCount, false)
	containerInfo = getContainerInfoFromLabel(labels)
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}

	// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
	// but the old label kubernetesPodLabel is set, the information got from label should also be set
	pod.DeletionGracePeriodSeconds = &deletionGracePeriod
	pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod
	container.Lifecycle = lifecycle
	data, err := runtime.Encode(testapi.Default.Codec(), pod)
	if err != nil {
		t.Fatalf("Failed to encode pod %q into string: %v", format.Pod(pod), err)
	}
	labels[kubernetesPodLabel] = string(data)
	expected.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds
	expected.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
	expected.PreStopHandler = container.Lifecycle.PreStop
	// Do not update expected.Hash here, because we directly use the labels in last test, so we never
	// changed the kubernetesContainerHashLabel in this test, the expected.Hash shouldn't be changed.
	containerInfo = getContainerInfoFromLabel(labels)
	if !reflect.DeepEqual(containerInfo, expected) {
		t.Errorf("expected %v, got %v", expected, containerInfo)
	}
}
示例#17
0
// buildContainerName creates a name which can be reversed to identify container name.
// This function returns stable name, unique name and an unique id.
func buildContainerName(pod *api.Pod, container *api.Container) (string, string, string) {
	// kubelet uses hash to determine whether an existing container matches the desired spec.
	containerName := container.Name + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	return buildKubeGenericName(pod, containerName)
}
示例#18
0
文件: rkt.go 项目: sjtud/kubernetes
func (r *Runtime) newAppcRuntimeApp(pod *api.Pod, c api.Container, pullSecrets []api.Secret, manifest *appcschema.PodManifest) error {
	if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
		return nil
	}
	imgManifest, err := r.getImageManifest(c.Image)
	if err != nil {
		return err
	}

	if imgManifest.App == nil {
		imgManifest.App = new(appctypes.App)
	}

	imageID, err := r.getImageID(c.Image)
	if err != nil {
		return err
	}
	hash, err := appctypes.NewHash(imageID)
	if err != nil {
		return err
	}

	opts, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c)
	if err != nil {
		return err
	}

	// create the container log file and make a mount pair.
	mnt, err := makeContainerLogMount(opts, &c)
	if err != nil {
		return err
	}

	ctx := securitycontext.DetermineEffectiveSecurityContext(pod, &c)
	if err := setApp(imgManifest.App, &c, opts, ctx, pod.Spec.SecurityContext); err != nil {
		return err
	}

	ra := appcschema.RuntimeApp{
		Name:  convertToACName(c.Name),
		Image: appcschema.RuntimeImage{ID: *hash},
		App:   imgManifest.App,
		Annotations: []appctypes.Annotation{
			{
				Name:  *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
				Value: strconv.FormatUint(kubecontainer.HashContainer(&c), 10),
			},
		},
	}

	if mnt != nil {
		ra.Annotations = append(ra.Annotations, appctypes.Annotation{
			Name:  *appctypes.MustACIdentifier(k8sRktTerminationMessagePathAnno),
			Value: mnt.HostPath,
		})

		manifest.Volumes = append(manifest.Volumes, appctypes.Volume{
			Name:   convertToACName(mnt.Name),
			Kind:   "host",
			Source: mnt.HostPath,
		})
	}

	manifest.Apps = append(manifest.Apps, ra)

	// Set global ports.
	for _, port := range opts.PortMappings {
		manifest.Ports = append(manifest.Ports, appctypes.ExposedPort{
			Name:     convertToACName(port.Name),
			HostPort: uint(port.HostPort),
		})
	}

	return nil
}
示例#19
0
// Syncs the running pod into the desired pod.
func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	podFullName := r.buildHyperPodFullName(string(pod.UID), string(pod.Name), string(pod.Namespace))
	if len(runningPod.Containers) == 0 {
		glog.V(4).Infof("Pod %q is not running, will start it", podFullName)
		return r.RunPod(pod, pullSecrets)
	}

	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				restartPod = true
				break
			}
			continue
		}

		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.V(4).Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				podFullName, container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		if err := r.KillPod(nil, runningPod); err != nil {
			glog.Errorf("Hyper: kill pod %s failed, error: %s", runningPod.Name, err)
			return err
		}
		if err := r.RunPod(pod, pullSecrets); err != nil {
			glog.Errorf("Hyper: run pod %s failed, error: %s", pod.Name, err)
			return err
		}
	}
	return nil
}
示例#20
0
func TestSyncPodWithRestartPolicy(t *testing.T) {
	dm, fakeDocker := newTestDockerManager()
	containers := []api.Container{
		{Name: "succeeded"},
		{Name: "failed"},
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers: containers,
		},
	}
	dockerContainers := []*docker.Container{
		{
			ID:     "9876",
			Name:   "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
			Config: &docker.Config{},
			State: docker.State{
				StartedAt: time.Now(),
				Running:   true,
			},
		},
		{
			ID:     "1234",
			Name:   "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0",
			Config: &docker.Config{},
			State: docker.State{
				ExitCode:   0,
				StartedAt:  time.Now(),
				FinishedAt: time.Now(),
			},
		},
		{
			ID:     "5678",
			Name:   "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0",
			Config: &docker.Config{},
			State: docker.State{
				ExitCode:   42,
				StartedAt:  time.Now(),
				FinishedAt: time.Now(),
			},
		}}

	tests := []struct {
		policy  api.RestartPolicy
		calls   []string
		created []string
		stopped []string
	}{
		{
			api.RestartPolicyAlways,
			[]string{
				// Check the pod infra container.
				"inspect_container",
				// Restart both containers.
				"create", "start", "inspect_container", "create", "start", "inspect_container",
			},
			[]string{"succeeded", "failed"},
			[]string{},
		},
		{
			api.RestartPolicyOnFailure,
			[]string{
				// Check the pod infra container.
				"inspect_container",
				// Restart the failed container.
				"create", "start", "inspect_container",
			},
			[]string{"failed"},
			[]string{},
		},
		{
			api.RestartPolicyNever,
			[]string{
				// Check the pod infra container.
				"inspect_container", "inspect_container", "inspect_container",
				// Stop the last pod infra container.
				"stop",
			},
			[]string{},
			[]string{"9876"},
		},
	}

	for i, tt := range tests {
		fakeDocker.SetFakeContainers(dockerContainers)
		pod.Spec.RestartPolicy = tt.policy
		runSyncPod(t, dm, fakeDocker, pod, nil, false)
		// 'stop' is because the pod infra container is killed when no container is running.
		verifyCalls(t, fakeDocker, tt.calls)

		if err := fakeDocker.AssertCreated(tt.created); err != nil {
			t.Errorf("case [%d]: %v", i, err)
		}
		if err := fakeDocker.AssertStopped(tt.stopped); err != nil {
			t.Errorf("case [%d]: %v", i, err)
		}
	}
}
示例#21
0
// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges {
	glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)

	sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
	changes := podContainerSpecChanges{
		CreateSandbox:        sandboxChanged,
		SandboxID:            sandboxID,
		Attempt:              attempt,
		ContainersToStart:    make(map[int]string),
		ContainersToKeep:     make(map[kubecontainer.ContainerID]int),
		InitContainersToKeep: make(map[kubecontainer.ContainerID]int),
		ContainersToKill:     make(map[kubecontainer.ContainerID]containerToKillInfo),
	}

	// check the status of init containers.
	initFailed := false
	// always reset the init containers if the sandbox is changed.
	if !sandboxChanged {
		// Keep all successfully completed containers. If there are failing containers,
		// only keep the first failing one.
		initFailed = checkAndKeepInitContainers(pod, podStatus, changes.InitContainersToKeep)
	}
	changes.InitFailed = initFailed

	// check the status of containers.
	for index, container := range pod.Spec.Containers {
		containerStatus := podStatus.FindContainerStatusByName(container.Name)
		if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
			if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
				message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				glog.Info(message)
				changes.ContainersToStart[index] = message
			}
			continue
		}
		if sandboxChanged {
			if pod.Spec.RestartPolicy != api.RestartPolicyNever {
				message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container)
				glog.Info(message)
				changes.ContainersToStart[index] = message
			}
			continue
		}

		if initFailed {
			// Initialization failed and Container exists.
			// If we have an initialization failure everything will be killed anyway.
			// If RestartPolicy is Always or OnFailure we restart containers that were running before.
			if pod.Spec.RestartPolicy != api.RestartPolicyNever {
				message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
				glog.V(1).Info(message)
				changes.ContainersToStart[index] = message
			}
			continue
		}

		expectedHash := kubecontainer.HashContainer(&container)
		containerChanged := containerStatus.Hash != expectedHash
		if containerChanged {
			message := fmt.Sprintf("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				pod.Name, container.Name, containerStatus.Hash, expectedHash)
			glog.Info(message)
			changes.ContainersToStart[index] = message
			continue
		}

		liveness, found := m.livenessManager.Get(containerStatus.ID)
		if !found || liveness == proberesults.Success {
			changes.ContainersToKeep[containerStatus.ID] = index
			continue
		}
		if pod.Spec.RestartPolicy != api.RestartPolicyNever {
			message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
			glog.Info(message)
			changes.ContainersToStart[index] = message
		}
	}

	// Don't keep init containers if they are the only containers to keep.
	if !sandboxChanged && len(changes.ContainersToStart) == 0 && len(changes.ContainersToKeep) == 0 {
		changes.InitContainersToKeep = make(map[kubecontainer.ContainerID]int)
	}

	// compute containers to be killed
	runningContainerStatuses := podStatus.GetRunningContainerStatuses()
	for _, containerStatus := range runningContainerStatuses {
		_, keep := changes.ContainersToKeep[containerStatus.ID]
		_, keepInit := changes.InitContainersToKeep[containerStatus.ID]
		if !keep && !keepInit {
			var podContainer *api.Container
			var killMessage string
			for i, c := range pod.Spec.Containers {
				if c.Name == containerStatus.Name {
					podContainer = &pod.Spec.Containers[i]
					killMessage = changes.ContainersToStart[i]
					break
				}
			}

			changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
				name:      containerStatus.Name,
				container: podContainer,
				message:   killMessage,
			}
		}
	}

	return changes
}
示例#22
0
func TestSyncPodBackoff(t *testing.T) {
	var fakeClock = util.NewFakeClock(time.Now())
	startTime := fakeClock.Now()

	dm, fakeDocker := newTestDockerManager()
	containers := []api.Container{
		{Name: "good"},
		{Name: "bad"},
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "podfoo",
			Namespace: "nsnew",
		},
		Spec: api.PodSpec{
			Containers: containers,
		},
	}

	stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678"
	dockerContainers := []*docker.Container{
		{
			ID:   "9876",
			Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				StartedAt: startTime,
				Running:   true,
			},
		},
		{
			ID:   "1234",
			Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				StartedAt: startTime,
				Running:   true,
			},
		},
		{
			ID:   "5678",
			Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				ExitCode:   42,
				StartedAt:  startTime,
				FinishedAt: fakeClock.Now(),
			},
		},
	}

	startCalls := []string{"inspect_container", "create", "start", "inspect_container"}
	backOffCalls := []string{"inspect_container"}
	startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
	backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
	tests := []struct {
		tick      int
		backoff   int
		killDelay int
		result    []string
		expectErr bool
	}{
		{1, 1, 1, startCalls, false},
		{2, 2, 2, startCalls, false},
		{3, 2, 3, backOffCalls, true},
		{4, 4, 4, startCalls, false},
		{5, 4, 5, backOffCalls, true},
		{6, 4, 6, backOffCalls, true},
		{7, 4, 7, backOffCalls, true},
		{8, 8, 129, startCalls, false},
		{130, 1, 0, startCalls, false},
	}

	backOff := util.NewBackOff(time.Second, time.Minute)
	backOff.Clock = fakeClock
	for _, c := range tests {
		fakeDocker.SetFakeContainers(dockerContainers)
		fakeClock.SetTime(startTime.Add(time.Duration(c.tick) * time.Second))

		result := runSyncPod(t, dm, fakeDocker, pod, backOff, c.expectErr)
		verifyCalls(t, fakeDocker, c.result)

		// Verify whether the correct sync pod result is generated
		if c.expectErr {
			verifySyncResults(t, []*kubecontainer.SyncResult{backoffResult}, result)
		} else {
			verifySyncResults(t, []*kubecontainer.SyncResult{startResult}, result)
		}

		if backOff.Get(stableId) != time.Duration(c.backoff)*time.Second {
			t.Errorf("At tick %s expected backoff=%s got=%s", time.Duration(c.tick)*time.Second, time.Duration(c.backoff)*time.Second, backOff.Get(stableId))
		}

		if len(fakeDocker.Created) > 0 {
			// pretend kill the container
			fakeDocker.Created = nil
			dockerContainers[2].State.FinishedAt = startTime.Add(time.Duration(c.killDelay) * time.Second)
		}
	}
}
示例#23
0
// Syncs the running pod into the desired pod.
func (r *runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	// TODO: (random-liu) Stop using running pod in SyncPod()
	// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
	// we may stop using apiPodStatus someday.
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				restartPod = true
				break
			}
			continue
		}

		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.V(4).Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				podFullName, container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		restartCount := 0
		// Only kill existing pod
		podID, err := r.hyperClient.GetPodIDByName(podFullName)
		if err == nil && len(podID) > 0 {
			// Update pod restart count
			restartCount, err = r.GetPodStartCount(podID)
			if err != nil {
				glog.Errorf("Hyper: get pod startcount failed: %v", err)
				return err
			}
			restartCount += 1

			if err := r.KillPod(nil, runningPod); err != nil {
				glog.Errorf("Hyper: kill pod %s failed, error: %s", runningPod.Name, err)
				return err
			}
		}

		if err := r.RunPod(pod, restartCount, pullSecrets); err != nil {
			glog.Errorf("Hyper: run pod %s failed, error: %s", pod.Name, err)
			return err
		}
	}
	return nil
}
示例#24
0
// SyncPod syncs the running pod to match the specified desired pod.
func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	podFullName := kubeletUtil.FormatPodName(pod)
	if len(runningPod.Containers) == 0 {
		glog.V(4).Infof("Pod %q is not running, will start it", podFullName)
		return r.RunPod(pod, pullSecrets)
	}

	// Add references to all containers.
	unidentifiedContainers := make(map[types.UID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestarted(&container, pod, &podStatus, r.readinessManager) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				// TODO(yifan): Containers in one pod are fate-sharing at this moment, see:
				// https://github.com/appc/spec/issues/276.
				restartPod = true
				break
			}
			continue
		}

		// TODO: check for non-root image directives.  See ../docker/manager.go#SyncPod

		// TODO(yifan): Take care of host network change.
		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		result, err := r.prober.Probe(pod, podStatus, container, string(c.ID), c.Created)
		// TODO(vmarmol): examine this logic.
		if err == nil && result != probe.Success {
			glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
			restartPod = true
			break
		}

		if err != nil {
			glog.V(2).Infof("Probe container %q failed: %v", container.Name, err)
		}
		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		if err := r.KillPod(pod, runningPod); err != nil {
			return err
		}
		if err := r.RunPod(pod, pullSecrets); err != nil {
			return err
		}
	}
	return nil
}
示例#25
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisor.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)

	podManager, _ := newFakePodManager()

	kb := &Kubelet{
		rootDirectory:       "/tmp/kubelet",
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		statusManager:       newStatusManager(nil),
		containerRefManager: kubecontainer.NewRefManager(),
		readinessManager:    kubecontainer.NewReadinessManager(),
		podManager:          podManager,
		os:                  kubecontainer.FakeOS{},
		volumeManager:       newVolumeManager(),
	}
	kb.containerManager, _ = newContainerManager(cadvisor, "", "", "")

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}
	podContainers := []docker.APIContainers{
		{
			Names:  []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo_new_12345678_42"},
			ID:     "1234",
			Status: "running",
		},
		{
			Names:  []string{"/k8s_net_foo.new.test_abcdefgh_42"},
			ID:     "9876",
			Status: "running",
		},
	}
	kb.dockerClient = &testDocker{
		listContainersResults: []listContainersResult{
			{label: "list pod container", containers: []docker.APIContainers{}},
			{label: "syncPod", containers: []docker.APIContainers{}},
			{label: "list pod container", containers: []docker.APIContainers{}},
			{label: "syncPod", containers: podContainers},
			{label: "list pod container", containers: podContainers},
			{label: "list pod container", containers: podContainers},
		},
		inspectContainersResults: []inspectContainersResult{
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
		},
		t: t,
	}

	kb.containerRuntime = dockertools.NewFakeDockerManager(
		kb.dockerClient,
		kb.recorder,
		kb.readinessManager,
		kb.containerRefManager,
		dockertools.PodInfraContainerImage,
		0,
		0,
		"",
		kubecontainer.FakeOS{},
		kb.networkPlugin,
		kb,
		nil,
		newKubeletRuntimeHooks(kb.recorder))

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
示例#26
0
// getStableKey generates a key (string) to uniquely identify a
// (pod, container) tuple. The key should include the content of the
// container, so that any change to the container generates a new key.
func getStableKey(pod *v1.Pod, container *v1.Container) string {
	hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16)
	return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
}
示例#27
0
// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges {
	glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)

	sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
	changes := podContainerSpecChanges{
		CreateSandbox:     sandboxChanged,
		SandboxID:         sandboxID,
		Attempt:           attempt,
		ContainersToStart: make(map[int]string),
		ContainersToKeep:  make(map[kubecontainer.ContainerID]int),
		ContainersToKill:  make(map[kubecontainer.ContainerID]containerToKillInfo),
	}

	for index, container := range pod.Spec.Containers {
		if sandboxChanged {
			message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container)
			glog.Info(message)
			changes.ContainersToStart[index] = message
			continue
		}

		containerStatus := podStatus.FindContainerStatusByName(container.Name)
		if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
			if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
				message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				glog.Info(message)
				changes.ContainersToStart[index] = message
			}
			continue
		}

		expectedHash := kubecontainer.HashContainer(&container)
		containerChanged := containerStatus.Hash != expectedHash
		if containerChanged {
			message := fmt.Sprintf("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				pod.Name, container.Name, containerStatus.Hash, expectedHash)
			glog.Info(message)
			changes.ContainersToStart[index] = message
			continue
		}

		liveness, found := m.livenessManager.Get(containerStatus.ID)
		if !found || liveness == proberesults.Success {
			changes.ContainersToKeep[containerStatus.ID] = index
			continue
		}
		if pod.Spec.RestartPolicy != api.RestartPolicyNever {
			message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
			glog.Info(message)
			changes.ContainersToStart[index] = message
		}
	}

	// compute containers that to be killed
	runningContainerStatues := podStatus.GetRunningContainerStatuses()
	for _, containerStatus := range runningContainerStatues {
		if _, keep := changes.ContainersToKeep[containerStatus.ID]; !keep {
			var podContainer *api.Container
			var killMessage string
			for i, c := range pod.Spec.Containers {
				if c.Name == containerStatus.Name {
					podContainer = &pod.Spec.Containers[i]
					killMessage = changes.ContainersToStart[i]
					break
				}
			}

			changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
				name:      containerStatus.Name,
				container: podContainer,
				message:   killMessage,
			}
		}
	}

	return changes
}