Exemplo n.º 1
0
// setApp merges the container spec with the image's manifest.
func setApp(app *appctypes.App, c *api.Container, opts *kubecontainer.RunContainerOptions, ctx *api.SecurityContext, podCtx *api.PodSecurityContext) error {
	// TODO(yifan): If ENTRYPOINT and CMD are both specified in the image,
	// we cannot override just one of these at this point as they are already mixed.
	command, args := kubecontainer.ExpandContainerCommandAndArgs(c, opts.Envs)
	exec := append(command, args...)
	if len(exec) > 0 {
		app.Exec = exec
	}

	// Set UID and GIDs.
	if err := verifyNonRoot(app, ctx); err != nil {
		return err
	}
	if ctx != nil && ctx.RunAsUser != nil {
		app.User = strconv.Itoa(int(*ctx.RunAsUser))
	}
	setSupplementaryGIDs(app, podCtx)

	// Set working directory.
	if len(c.WorkingDir) > 0 {
		app.WorkingDirectory = c.WorkingDir
	}

	// Notes that we don't create Mounts section in the pod manifest here,
	// as Mounts will be automatically generated by rkt.
	mergeMounts(app, opts.Mounts)
	mergeEnv(app, opts.Envs)
	mergePortMappings(app, opts.PortMappings)

	return setIsolators(app, c, ctx)
}
Exemplo n.º 2
0
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeapi.ContainerConfig, error) {
	opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
	if err != nil {
		return nil, err
	}

	uid, username, err := m.getImageUser(container.Image)
	if err != nil {
		return nil, err
	}
	if uid != nil {
		// Verify RunAsNonRoot. Non-root verification only supports numeric user.
		if err := verifyRunAsNonRoot(pod, container, *uid); err != nil {
			return nil, err
		}
	} else {
		glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", *username)
	}

	command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
	containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
	restartCountUint32 := uint32(restartCount)
	config := &runtimeapi.ContainerConfig{
		Metadata: &runtimeapi.ContainerMetadata{
			Name:    &container.Name,
			Attempt: &restartCountUint32,
		},
		Image:       &runtimeapi.ImageSpec{Image: &container.Image},
		Command:     command,
		Args:        args,
		WorkingDir:  &container.WorkingDir,
		Labels:      newContainerLabels(container, pod),
		Annotations: newContainerAnnotations(container, pod, restartCount),
		Devices:     makeDevices(opts),
		Mounts:      m.makeMounts(opts, container),
		LogPath:     &containerLogsPath,
		Stdin:       &container.Stdin,
		StdinOnce:   &container.StdinOnce,
		Tty:         &container.TTY,
		Linux:       m.generateLinuxContainerConfig(container, pod, uid, username),
	}

	// set environment variables
	envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
	for idx := range opts.Envs {
		e := opts.Envs[idx]
		envs[idx] = &runtimeapi.KeyValue{
			Key:   &e.Name,
			Value: &e.Value,
		}
	}
	config.Envs = envs

	return config, nil
}
Exemplo n.º 3
0
// generateContainerConfig generates container config for kubelet runtime api.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *api.Container, pod *api.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) {
	opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
	if err != nil {
		return nil, err
	}

	command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
	containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
	podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
	restartCountUint32 := uint32(restartCount)
	config := &runtimeApi.ContainerConfig{
		Metadata: &runtimeApi.ContainerMetadata{
			Name:    &container.Name,
			Attempt: &restartCountUint32,
		},
		Image:       &runtimeApi.ImageSpec{Image: &container.Image},
		Command:     command,
		Args:        args,
		WorkingDir:  &container.WorkingDir,
		Labels:      newContainerLabels(container, pod),
		Annotations: newContainerAnnotations(container, pod, restartCount),
		Mounts:      m.makeMounts(opts, container, podHasSELinuxLabel),
		Devices:     makeDevices(opts),
		LogPath:     &containerLogsPath,
		Stdin:       &container.Stdin,
		StdinOnce:   &container.StdinOnce,
		Tty:         &container.TTY,
		Linux:       m.generateLinuxContainerConfig(container, pod),
	}

	// set privileged and readonlyRootfs
	if container.SecurityContext != nil {
		securityContext := container.SecurityContext
		if securityContext.Privileged != nil {
			config.Privileged = securityContext.Privileged
		}
		if securityContext.ReadOnlyRootFilesystem != nil {
			config.ReadonlyRootfs = securityContext.ReadOnlyRootFilesystem
		}
	}

	// set environment variables
	envs := make([]*runtimeApi.KeyValue, len(opts.Envs))
	for idx := range opts.Envs {
		e := opts.Envs[idx]
		envs[idx] = &runtimeApi.KeyValue{
			Key:   &e.Name,
			Value: &e.Value,
		}
	}
	config.Envs = envs

	return config, nil
}
Exemplo n.º 4
0
// setApp merges the container spec with the image's manifest.
func setApp(imgManifest *appcschema.ImageManifest, c *api.Container, opts *kubecontainer.RunContainerOptions, ctx *api.SecurityContext, podCtx *api.PodSecurityContext) error {
	app := imgManifest.App

	// Set up Exec.
	var command, args []string
	cmd, ok := imgManifest.Annotations.Get(appcDockerEntrypoint)
	if ok {
		err := json.Unmarshal([]byte(cmd), &command)
		if err != nil {
			return fmt.Errorf("cannot unmarshal ENTRYPOINT %q: %v", cmd, err)
		}
	}
	ag, ok := imgManifest.Annotations.Get(appcDockerCmd)
	if ok {
		err := json.Unmarshal([]byte(ag), &args)
		if err != nil {
			return fmt.Errorf("cannot unmarshal CMD %q: %v", ag, err)
		}
	}
	userCommand, userArgs := kubecontainer.ExpandContainerCommandAndArgs(c, opts.Envs)

	if len(userCommand) > 0 {
		command = userCommand
		args = nil // If 'command' is specified, then drop the default args.
	}
	if len(userArgs) > 0 {
		args = userArgs
	}

	exec := append(command, args...)
	if len(exec) > 0 {
		app.Exec = exec
	}

	// Set UID and GIDs.
	if err := verifyNonRoot(app, ctx); err != nil {
		return err
	}
	if ctx != nil && ctx.RunAsUser != nil {
		app.User = strconv.Itoa(int(*ctx.RunAsUser))
	}
	setSupplementaryGIDs(app, podCtx)

	// If 'User' or 'Group' are still empty at this point,
	// then apply the root UID and GID.
	// TODO(yifan): Instead of using root GID, we should use
	// the GID which the user is in.
	if app.User == "" {
		app.User = "******"
	}
	if app.Group == "" {
		app.Group = "0"
	}

	// Set working directory.
	if len(c.WorkingDir) > 0 {
		app.WorkingDirectory = c.WorkingDir
	}

	// Notes that we don't create Mounts section in the pod manifest here,
	// as Mounts will be automatically generated by rkt.
	mergeMounts(app, opts.Mounts)
	mergeEnv(app, opts.Envs)
	mergePortMappings(app, opts.PortMappings)

	return setIsolators(app, c, ctx)
}
Exemplo n.º 5
0
func (r *runtime) buildHyperPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) ([]byte, error) {
	// check and pull image
	for _, c := range pod.Spec.Containers {
		if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
			return nil, err
		}
	}

	// build hyper volume spec
	specMap := make(map[string]interface{})
	volumes := make([]map[string]interface{}, 0, 1)

	volumeMap, found := r.runtimeHelper.ListVolumesForPod(pod.UID)
	if found {
		// process rbd volume globally
		for name, mounter := range volumeMap {
			glog.V(4).Infof("Hyper: volume %s, path %s, meta %s", name, mounter.GetPath(), mounter.GetMetaData())
			v := make(map[string]interface{})
			v[KEY_NAME] = name

			// Process rbd volume
			metadata := mounter.GetMetaData()
			if metadata != nil && metadata["volume_type"].(string) == "rbd" {
				v[KEY_VOLUME_DRIVE] = metadata["volume_type"]
				v["source"] = "rbd:" + metadata["name"].(string)
				monitors := make([]string, 0, 1)
				for _, host := range metadata["hosts"].([]interface{}) {
					for _, port := range metadata["ports"].([]interface{}) {
						monitors = append(monitors, fmt.Sprintf("%s:%s", host.(string), port.(string)))
					}
				}
				v["option"] = map[string]interface{}{
					"user":     metadata["auth_username"],
					"keyring":  metadata["keyring"],
					"monitors": monitors,
				}
			} else {
				glog.V(4).Infof("Hyper: volume %s %s", name, mounter.GetPath())

				v[KEY_VOLUME_DRIVE] = VOLUME_TYPE_VFS
				v[KEY_VOLUME_SOURCE] = mounter.GetPath()
			}

			volumes = append(volumes, v)
		}

		glog.V(4).Infof("Hyper volumes: %v", volumes)
	}

	if !r.disableHyperInternalService {
		services := r.buildHyperPodServices(pod)
		if services == nil {
			// services can't be null for kubernetes, so fake one if it is null
			services = []grpctypes.UserService{
				{
					ServiceIP:   "127.0.0.2",
					ServicePort: 65534,
				},
			}
		}
		specMap["services"] = services
	}

	// build hyper containers spec
	var containers []map[string]interface{}
	var k8sHostNeeded = true
	dnsServers := make(map[string]string)
	terminationMsgLabels := make(map[string]string)
	for _, container := range pod.Spec.Containers {
		c := make(map[string]interface{})
		c[KEY_NAME] = r.buildHyperContainerFullName(
			string(pod.UID),
			string(pod.Name),
			string(pod.Namespace),
			container.Name,
			restartCount,
			container)
		c[KEY_IMAGE] = container.Image
		c[KEY_TTY] = container.TTY

		if container.WorkingDir != "" {
			c[KEY_WORKDIR] = container.WorkingDir
		}

		opts, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &container, "")
		if err != nil {
			return nil, err
		}

		command, args := kubecontainer.ExpandContainerCommandAndArgs(&container, opts.Envs)
		if len(command) > 0 {
			c[KEY_ENTRYPOINT] = command
		}
		if len(args) > 0 {
			c[KEY_COMMAND] = args
		}

		// dns
		for _, dns := range opts.DNS {
			dnsServers[dns] = dns
		}

		// envs
		envs := make([]map[string]string, 0, 1)
		for _, e := range opts.Envs {
			envs = append(envs, map[string]string{
				"env":   e.Name,
				"value": e.Value,
			})
		}
		c[KEY_ENVS] = envs

		// port-mappings
		var ports []map[string]interface{}
		for _, mapping := range opts.PortMappings {
			p := make(map[string]interface{})
			p[KEY_CONTAINER_PORT] = mapping.ContainerPort
			if mapping.HostPort != 0 {
				p[KEY_HOST_PORT] = mapping.HostPort
			}
			p[KEY_PROTOCOL] = mapping.Protocol
			ports = append(ports, p)
		}
		c[KEY_PORTS] = ports

		// NOTE: PodContainerDir is from TerminationMessagePath, TerminationMessagePath  is default to /dev/termination-log
		if opts.PodContainerDir != "" && container.TerminationMessagePath != "" {
			// In docker runtime, the container log path contains the container ID.
			// However, for hyper runtime, we cannot get the container ID before the
			// the container is launched, so here we generate a random uuid to enable
			// us to map a container's termination message path to an unique log file
			// on the disk.
			randomUID := util.NewUUID()
			containerLogPath := path.Join(opts.PodContainerDir, string(randomUID))
			fs, err := os.Create(containerLogPath)
			if err != nil {
				return nil, err
			}

			if err := fs.Close(); err != nil {
				return nil, err
			}
			mnt := &kubecontainer.Mount{
				// Use a random name for the termination message mount, so that
				// when a container restarts, it will not overwrite the old termination
				// message.
				Name:          fmt.Sprintf("termination-message-%s", randomUID),
				ContainerPath: container.TerminationMessagePath,
				HostPath:      containerLogPath,
				ReadOnly:      false,
			}
			opts.Mounts = append(opts.Mounts, *mnt)

			// set termination msg labels with host path
			terminationMsgLabels[container.Name] = mnt.HostPath
		}

		// volumes
		if len(opts.Mounts) > 0 {
			var containerVolumes []map[string]interface{}
			for _, volume := range opts.Mounts {
				v := make(map[string]interface{})
				v[KEY_MOUNTPATH] = volume.ContainerPath
				v[KEY_VOLUME] = volume.Name
				v[KEY_READONLY] = volume.ReadOnly
				containerVolumes = append(containerVolumes, v)

				if k8sHostNeeded {
					// Setup global hosts volume
					if volume.Name == "k8s-managed-etc-hosts" {
						k8sHostNeeded = false
						volumes = append(volumes, map[string]interface{}{
							KEY_NAME:          volume.Name,
							KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
							KEY_VOLUME_SOURCE: volume.HostPath,
						})
					}

					// Setup global termination msg volume
					if strings.HasPrefix(volume.Name, "termination-message") {
						k8sHostNeeded = false

						volumes = append(volumes, map[string]interface{}{
							KEY_NAME:          volume.Name,
							KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
							KEY_VOLUME_SOURCE: volume.HostPath,
						})
					}
				}
			}
			c[KEY_VOLUMES] = containerVolumes
		}

		containers = append(containers, c)
	}
	specMap[KEY_CONTAINERS] = containers
	specMap[KEY_VOLUMES] = volumes

	// dns
	if len(dnsServers) > 0 {
		dns := []string{}
		for d := range dnsServers {
			dns = append(dns, d)
		}
		specMap[KEY_DNS] = dns
	}

	// build hyper pod resources spec
	var podCPULimit, podMemLimit int64
	var labels map[string]string
	podResource := make(map[string]int64)
	for _, container := range pod.Spec.Containers {
		resource := container.Resources.Limits
		var containerCPULimit, containerMemLimit int64
		for name, limit := range resource {
			switch name {
			case api.ResourceCPU:
				containerCPULimit = limit.MilliValue()
			case api.ResourceMemory:
				containerMemLimit = limit.MilliValue()
			}
		}
		if containerCPULimit == 0 {
			containerCPULimit = hyperDefaultContainerCPU
		}
		if containerMemLimit == 0 {
			containerMemLimit = hyperDefaultContainerMem * 1024 * 1024 * 1000
		}
		podCPULimit += containerCPULimit
		podMemLimit += containerMemLimit

		// generate heapster needed labels
		// TODO: keep these labels up to date if the pod changes
		labels = newLabels(&container, pod, restartCount, false)
	}

	podResource[KEY_VCPU] = (podCPULimit + 999) / 1000
	podResource[KEY_MEMORY] = ((podMemLimit) / 1000 / 1024) / 1024
	specMap[KEY_RESOURCE] = podResource
	glog.V(5).Infof("Hyper: pod limit vcpu=%v mem=%vMiB", podResource[KEY_VCPU], podResource[KEY_MEMORY])

	// Setup labels
	podLabels := map[string]string{KEY_API_POD_UID: string(pod.UID)}
	for k, v := range pod.Labels {
		podLabels[k] = v
	}
	// append heapster needed labels
	// NOTE(harryz): this only works for one pod one container model for now.
	for k, v := range labels {
		podLabels[k] = v
	}

	// append termination message label
	for k, v := range terminationMsgLabels {
		podLabels[k] = v
	}

	specMap[KEY_LABELS] = podLabels

	// other params required
	specMap[KEY_ID] = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
	const hostnameMaxLen = 63
	podHostname := pod.Name
	if len(podHostname) > hostnameMaxLen {
		podHostname = podHostname[:hostnameMaxLen]
	}
	specMap[KEY_HOSTNAME] = podHostname

	podData, err := json.Marshal(specMap)
	if err != nil {
		return nil, err
	}

	return podData, nil
}
Exemplo n.º 6
0
func (r *runtime) buildHyperPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) ([]byte, error) {
	// check and pull image
	for _, c := range pod.Spec.Containers {
		if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
			return nil, err
		}
	}

	// build hyper volume spec
	specMap := make(map[string]interface{})
	volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID)
	if !ok {
		return nil, fmt.Errorf("cannot get the volumes for pod %q", kubecontainer.GetPodFullName(pod))
	}

	volumes := make([]map[string]interface{}, 0, 1)
	for name, volume := range volumeMap {
		glog.V(4).Infof("Hyper: volume %s, path %s, meta %s", name, volume.Builder.GetPath(), volume.Builder.GetMetaData())
		v := make(map[string]interface{})
		v[KEY_NAME] = name

		// Process rbd volume
		metadata := volume.Builder.GetMetaData()
		if metadata != nil && metadata["volume_type"].(string) == "rbd" {
			v[KEY_VOLUME_DRIVE] = metadata["volume_type"]
			v["source"] = "rbd:" + metadata["name"].(string)
			monitors := make([]string, 0, 1)
			for _, host := range metadata["hosts"].([]interface{}) {
				for _, port := range metadata["ports"].([]interface{}) {
					monitors = append(monitors, fmt.Sprintf("%s:%s", host.(string), port.(string)))
				}
			}
			v["option"] = map[string]interface{}{
				"user":     metadata["auth_username"],
				"keyring":  metadata["keyring"],
				"monitors": monitors,
			}
		} else {
			glog.V(4).Infof("Hyper: volume %s %s", name, volume.Builder.GetPath())

			v[KEY_VOLUME_DRIVE] = VOLUME_TYPE_VFS
			v[KEY_VOLUME_SOURCE] = volume.Builder.GetPath()
		}

		volumes = append(volumes, v)
	}

	glog.V(4).Infof("Hyper volumes: %v", volumes)

	if !r.disableHyperInternalService {
		services := r.buildHyperPodServices(pod)
		if services == nil {
			// services can't be null for kubernetes, so fake one if it is null
			services = []HyperService{
				{
					ServiceIP:   "127.0.0.2",
					ServicePort: 65534,
				},
			}
		}
		specMap["services"] = services
	}

	// build hyper containers spec
	var containers []map[string]interface{}
	var k8sHostNeeded = true
	dnsServers := make(map[string]string)
	for _, container := range pod.Spec.Containers {
		c := make(map[string]interface{})
		c[KEY_NAME] = r.buildHyperContainerFullName(
			string(pod.UID),
			string(pod.Name),
			string(pod.Namespace),
			container.Name,
			restartCount,
			container)
		c[KEY_IMAGE] = container.Image
		c[KEY_TTY] = container.TTY

		if container.WorkingDir != "" {
			c[KEY_WORKDIR] = container.WorkingDir
		}

		opts, err := r.generator.GenerateRunContainerOptions(pod, &container)
		if err != nil {
			return nil, err
		}

		command, args := kubecontainer.ExpandContainerCommandAndArgs(&container, opts.Envs)
		if len(command) > 0 {
			c[KEY_ENTRYPOINT] = command
		}
		if len(args) > 0 {
			c[KEY_COMMAND] = args
		}

		// dns
		for _, dns := range opts.DNS {
			dnsServers[dns] = dns
		}

		// envs
		envs := make([]map[string]string, 0, 1)
		for _, e := range opts.Envs {
			envs = append(envs, map[string]string{
				"env":   e.Name,
				"value": e.Value,
			})
		}
		c[KEY_ENVS] = envs

		// port-mappings
		var ports []map[string]interface{}
		for _, mapping := range opts.PortMappings {
			p := make(map[string]interface{})
			p[KEY_CONTAINER_PORT] = mapping.ContainerPort
			if mapping.HostPort != 0 {
				p[KEY_HOST_PORT] = mapping.HostPort
			}
			p[KEY_PROTOCOL] = mapping.Protocol
			ports = append(ports, p)
		}
		c[KEY_PORTS] = ports

		// volumes
		if len(opts.Mounts) > 0 {
			var containerVolumes []map[string]interface{}
			for _, volume := range opts.Mounts {
				v := make(map[string]interface{})
				v[KEY_MOUNTPATH] = volume.ContainerPath
				v[KEY_VOLUME] = volume.Name
				v[KEY_READONLY] = volume.ReadOnly
				containerVolumes = append(containerVolumes, v)

				// Setup global hosts volume
				if volume.Name == "k8s-managed-etc-hosts" && k8sHostNeeded {
					k8sHostNeeded = false
					volumes = append(volumes, map[string]interface{}{
						KEY_NAME:          volume.Name,
						KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
						KEY_VOLUME_SOURCE: volume.HostPath,
					})
				}
			}
			c[KEY_VOLUMES] = containerVolumes
		}

		containers = append(containers, c)
	}
	specMap[KEY_CONTAINERS] = containers
	specMap[KEY_VOLUMES] = volumes

	// dns
	if len(dnsServers) > 0 {
		dns := []string{}
		for d := range dnsServers {
			dns = append(dns, d)
		}
		specMap[KEY_DNS] = dns
	}

	// build hyper pod resources spec
	var podCPULimit, podMemLimit int64
	podResource := make(map[string]int64)
	for _, container := range pod.Spec.Containers {
		resource := container.Resources.Limits
		var containerCPULimit, containerMemLimit int64
		for name, limit := range resource {
			switch name {
			case api.ResourceCPU:
				containerCPULimit = limit.MilliValue()
			case api.ResourceMemory:
				containerMemLimit = limit.MilliValue()
			}
		}
		if containerCPULimit == 0 {
			containerCPULimit = hyperDefaultContainerCPU
		}
		if containerMemLimit == 0 {
			containerMemLimit = hyperDefaultContainerMem * 1024 * 1024 * 1000
		}
		podCPULimit += containerCPULimit
		podMemLimit += containerMemLimit
	}

	podResource[KEY_VCPU] = (podCPULimit + 999) / 1000
	podResource[KEY_MEMORY] = ((podMemLimit) / 1000 / 1024) / 1024
	specMap[KEY_RESOURCE] = podResource
	glog.V(5).Infof("Hyper: pod limit vcpu=%v mem=%vMiB", podResource[KEY_VCPU], podResource[KEY_MEMORY])

	// Setup labels
	podLabels := map[string]string{KEY_API_POD_UID: string(pod.UID)}
	for k, v := range pod.Labels {
		podLabels[k] = v
	}
	specMap[KEY_LABELS] = podLabels

	// other params required
	specMap[KEY_ID] = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)
	specMap[KEY_TTY] = false

	// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
	const hostnameMaxLen = 63
	podHostname := pod.Name
	if len(podHostname) > hostnameMaxLen {
		podHostname = podHostname[:hostnameMaxLen]
	}
	specMap[KEY_HOSTNAME] = podHostname

	podData, err := json.Marshal(specMap)
	if err != nil {
		return nil, err
	}

	return podData, nil
}