// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi.PodSandbox, error) {
	var filter *runtimeApi.PodSandboxFilter
	if !all {
		readyState := runtimeApi.PodSandBoxState_READY
		filter = &runtimeApi.PodSandboxFilter{
			State: &readyState,
		}
	}

	resp, err := m.runtimeService.ListPodSandbox(filter)
	if err != nil {
		glog.Errorf("ListPodSandbox failed: %v", err)
		return nil, err
	}

	result := []*runtimeApi.PodSandbox{}
	for _, s := range resp {
		if !isManagedByKubelet(s.Labels) {
			glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
				s.Metadata.GetName(), s.Metadata.GetNamespace()))
			continue
		}

		result = append(result, s)
	}

	return result, nil
}
// GetPodContainerID gets pod sandbox ID
func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (kubecontainer.ContainerID, error) {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)
	if len(pod.Sandboxes) == 0 {
		glog.Errorf("No sandboxes are found for pod %q", podFullName)
		return kubecontainer.ContainerID{}, fmt.Errorf("sandboxes for pod %q not found", podFullName)
	}

	// return sandboxID of the first sandbox since it is the latest one
	return pod.Sandboxes[0].ID, nil
}
Exemple #3
0
// GetPodStatus currently invokes GetPods() to return the status.
// TODO(yifan): Split the get status logic from GetPods().
func (r *runtime) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
	pods, err := r.GetPods(true)
	if err != nil {
		return nil, err
	}
	p := kubecontainer.Pods(pods).FindPodByID(pod.UID)
	if len(p.Containers) == 0 {
		return nil, fmt.Errorf("cannot find status for pod: %q", kubecontainer.BuildPodFullName(pod.Name, pod.Namespace))
	}
	return &p.Status, nil
}
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visble in Runtime.
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
	// Now we retain restart count of container as a container label. Each time a container
	// restarts, pod will read the restart count from the registered dead container, increment
	// it to get the new restart count, and then add a label with the new restart count on
	// the newly started container.
	// However, there are some limitations of this method:
	//	1. When all dead containers were garbage collected, the container status could
	//	not get the historical value and would be *inaccurate*. Fortunately, the chance
	//	is really slim.
	//	2. When working with old version containers which have no restart count label,
	//	we can only assume their restart count is 0.
	// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
	// these limitations now.
	// TODO: move this comment to SyncPod.
	podFullName := kubecontainer.BuildPodFullName(name, namespace)
	podSandboxIDs, err := m.getSandboxIDByPodUID(string(uid), nil)
	if err != nil {
		return nil, err
	}
	glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q(UID:%q)", podSandboxIDs, podFullName, string(uid))

	sandboxStatuses := make([]*runtimeApi.PodSandboxStatus, len(podSandboxIDs))
	containerStatuses := []*kubecontainer.ContainerStatus{}
	podIP := ""
	for idx, podSandboxID := range podSandboxIDs {
		podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
		if err != nil {
			glog.Errorf("PodSandboxStatus for pod (uid:%v, name:%s, namespace:%s) error: %v", uid, name, namespace, err)
			return nil, err
		}
		sandboxStatuses[idx] = podSandboxStatus

		// Only get pod IP from latest sandbox
		if idx == 0 && podSandboxStatus.GetState() == runtimeApi.PodSandBoxState_READY {
			podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
		}

		containerStatus, err := m.getKubeletContainerStatuses(podSandboxID)
		if err != nil {
			glog.Errorf("getKubeletContainerStatuses for sandbox %s failed: %v", podSandboxID, err)
			return nil, err
		}
		containerStatuses = append(containerStatuses, containerStatus...)
	}

	return &kubecontainer.PodStatus{
		ID:                uid,
		Name:              name,
		Namespace:         namespace,
		IP:                podIP,
		SandboxStatuses:   sandboxStatuses,
		ContainerStatuses: containerStatuses,
	}, nil
}
Exemple #5
0
// KillPod kills all the containers of a pod.
func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
	if len(runningPod.Name) == 0 {
		return nil
	}

	var podID string
	namespace := runningPod.Namespace
	podName := kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace)
	glog.V(4).Infof("Hyper: killing pod %q.", podName)

	podInfos, err := r.hyperClient.ListPods()
	if err != nil {
		glog.Errorf("Hyper: ListPods failed, error: %s", err)
		return err
	}

	for _, podInfo := range podInfos {
		if podInfo.PodName == podName {
			podID = podInfo.PodID
			break
		}
	}

	cmds := append([]string{}, "rm", podID)
	_, err = r.runCommand(cmds...)
	if err != nil {
		glog.Errorf("Hyper: remove pod %s failed, error: %s", podID, err)
		return err
	}

	// Teardown pod's network
	err = r.networkPlugin.TearDownPod(namespace, podName, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.TearDownPod failed, error: %v", err)
		return err
	}

	// Delete pod spec file
	specFileName := path.Join(hyperPodSpecDir, podName)
	_, err = os.Stat(specFileName)
	if err == nil {
		e := os.Remove(specFileName)
		if e != nil {
			glog.Errorf("Hyper: delete spec file for %s failed, error: %v", runningPod.Name, e)
		}
	}

	return nil
}
Exemple #6
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	podData, err := r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, podFullName, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	podSpec, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}
	result, err := r.hyperClient.CreatePod(podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	podID := string(result["ID"].(string))

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		destroyErr := r.hyperClient.RemovePod(podID)
		if destroyErr != nil {
			glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
		}
		return err
	}

	return nil
}
Exemple #7
0
// GetPodStatus retrieves the status of the pod, including the information of
// all containers in the pod. Clients of this interface assume the containers
// statuses in a pod always have a deterministic ordering (eg: sorted by name).
func (r *runtime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
	status := &kubecontainer.PodStatus{
		ID:        uid,
		Name:      name,
		Namespace: namespace,
	}

	podInfos, err := r.hyperClient.ListPods()
	if err != nil {
		glog.Errorf("Hyper: ListPods failed, error: %s", err)
		return nil, err
	}

	podFullName := kubecontainer.BuildPodFullName(name, namespace)
	for _, podInfo := range podInfos {
		if podInfo.PodName != podFullName {
			continue
		}

		if len(podInfo.PodInfo.Status.PodIP) > 0 {
			status.IP = podInfo.PodInfo.Status.PodIP[0]
		}

		for _, containerInfo := range podInfo.PodInfo.Status.ContainerStatus {
			for _, container := range podInfo.PodInfo.Spec.Containers {
				if container.ContainerID == containerInfo.ContainerID {
					c := r.getContainerStatus(containerInfo, container.Image, container.ImageID,
						podInfo.PodInfo.Status.StartTime, podInfo.PodInfo.Spec.Labels)
					status.ContainerStatuses = append(
						status.ContainerStatuses,
						c)
				}
			}
		}
	}

	glog.V(5).Infof("Hyper: get pod %s status %s", podFullName, status)

	return status, nil
}
Exemple #8
0
// Syncs the running pod into the desired pod.
func (r *runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	// TODO: (random-liu) Stop using running pod in SyncPod()
	// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
	// we may stop using apiPodStatus someday.
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				restartPod = true
				break
			}
			continue
		}

		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.V(4).Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				podFullName, container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		restartCount := 0
		// Only kill existing pod
		podID, err := r.hyperClient.GetPodIDByName(podFullName)
		if err == nil && len(podID) > 0 {
			// Update pod restart count
			restartCount, err = r.GetPodStartCount(podID)
			if err != nil {
				glog.Errorf("Hyper: get pod startcount failed: %v", err)
				return err
			}
			restartCount += 1

			if err := r.KillPod(nil, runningPod); err != nil {
				glog.Errorf("Hyper: kill pod %s failed, error: %s", runningPod.Name, err)
				return err
			}
		}

		if err := r.RunPod(pod, restartCount, pullSecrets); err != nil {
			glog.Errorf("Hyper: run pod %s failed, error: %s", pod.Name, err)
			return err
		}
	}
	return nil
}
Exemple #9
0
func (r *runtime) buildHyperPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) ([]byte, error) {
	// check and pull image
	for _, c := range pod.Spec.Containers {
		if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
			return nil, err
		}
	}

	// build hyper volume spec
	specMap := make(map[string]interface{})
	volumeMap, ok := r.volumeGetter.GetVolumes(pod.UID)
	if !ok {
		return nil, fmt.Errorf("cannot get the volumes for pod %q", kubecontainer.GetPodFullName(pod))
	}

	volumes := make([]map[string]interface{}, 0, 1)
	for name, volume := range volumeMap {
		glog.V(4).Infof("Hyper: volume %s, path %s, meta %s", name, volume.Builder.GetPath(), volume.Builder.GetMetaData())
		v := make(map[string]interface{})
		v[KEY_NAME] = name

		// Process rbd volume
		metadata := volume.Builder.GetMetaData()
		if metadata != nil && metadata["volume_type"].(string) == "rbd" {
			v[KEY_VOLUME_DRIVE] = metadata["volume_type"]
			v["source"] = "rbd:" + metadata["name"].(string)
			monitors := make([]string, 0, 1)
			for _, host := range metadata["hosts"].([]interface{}) {
				for _, port := range metadata["ports"].([]interface{}) {
					monitors = append(monitors, fmt.Sprintf("%s:%s", host.(string), port.(string)))
				}
			}
			v["option"] = map[string]interface{}{
				"user":     metadata["auth_username"],
				"keyring":  metadata["keyring"],
				"mointors": monitors,
			}
		} else {
			glog.V(4).Infof("Hyper: volume %s %s", name, volume.Builder.GetPath())

			v[KEY_VOLUME_DRIVE] = VOLUME_TYPE_VFS
			v[KEY_VOLUME_SOURCE] = volume.Builder.GetPath()
		}

		volumes = append(volumes, v)
	}

	glog.V(4).Infof("Hyper volumes: %v", volumes)

	services := r.buildHyperPodServices(pod)
	if services == nil {
		// services can't be null for kubernetes, so fake one if it is null
		services = []HyperService{
			{
				ServiceIP:   "127.0.0.2",
				ServicePort: 65534,
			},
		}
	}
	specMap["services"] = services

	// build hyper containers spec
	var containers []map[string]interface{}
	var k8sHostNeeded = true
	for _, container := range pod.Spec.Containers {
		c := make(map[string]interface{})
		c[KEY_NAME] = r.buildHyperContainerFullName(
			string(pod.UID),
			string(pod.Name),
			string(pod.Namespace),
			container.Name,
			restartCount,
			container)
		c[KEY_IMAGE] = container.Image
		c[KEY_TTY] = container.TTY

		containerCommands := make([]string, 0, 1)
		for _, cmd := range container.Command {
			containerCommands = append(containerCommands, cmd)
		}
		for _, arg := range container.Args {
			containerCommands = append(containerCommands, arg)
		}
		if len(containerCommands) > 0 {
			c[KEY_COMMAND] = containerCommands
		}

		if container.WorkingDir != "" {
			c[KEY_WORKDIR] = container.WorkingDir
		}

		opts, err := r.generator.GenerateRunContainerOptions(pod, &container)
		if err != nil {
			return nil, err
		}

		// dns
		if len(opts.DNS) > 0 {
			c[KEY_DNS] = opts.DNS
		}

		// envs
		envs := make([]map[string]string, 0, 1)
		for _, e := range opts.Envs {
			envs = append(envs, map[string]string{
				"env":   e.Name,
				"value": e.Value,
			})
		}
		c[KEY_ENVS] = envs

		// port-mappings
		var ports []map[string]interface{}
		for _, mapping := range opts.PortMappings {
			p := make(map[string]interface{})
			p[KEY_CONTAINER_PORT] = mapping.ContainerPort
			if mapping.HostPort != 0 {
				p[KEY_HOST_PORT] = mapping.HostPort
			}
			p[KEY_PROTOCOL] = mapping.Protocol
			ports = append(ports, p)
		}
		c[KEY_PORTS] = ports

		// volumes
		if len(opts.Mounts) > 0 {
			var containerVolumes []map[string]interface{}
			for _, volume := range opts.Mounts {
				v := make(map[string]interface{})
				v[KEY_MOUNTPATH] = volume.ContainerPath
				v[KEY_VOLUME] = volume.Name
				v[KEY_READONLY] = volume.ReadOnly
				containerVolumes = append(containerVolumes, v)

				// Setup global hosts volume
				if volume.Name == "k8s-managed-etc-hosts" && k8sHostNeeded {
					k8sHostNeeded = false
					volumes = append(volumes, map[string]interface{}{
						KEY_NAME:          volume.Name,
						KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
						KEY_VOLUME_SOURCE: volume.HostPath,
					})
				}
			}
			c[KEY_VOLUMES] = containerVolumes
		}

		containers = append(containers, c)
	}
	specMap[KEY_CONTAINERS] = containers
	specMap[KEY_VOLUMES] = volumes

	// build hyper pod resources spec
	var podCPULimit, podMemLimit int64
	podResource := make(map[string]int64)
	for _, container := range pod.Spec.Containers {
		resource := container.Resources.Limits
		var containerCPULimit, containerMemLimit int64
		for name, limit := range resource {
			switch name {
			case api.ResourceCPU:
				containerCPULimit = limit.MilliValue()
			case api.ResourceMemory:
				containerMemLimit = limit.MilliValue()
			}
		}
		if containerCPULimit == 0 {
			containerCPULimit = hyperDefaultContainerCPU
		}
		if containerMemLimit == 0 {
			containerMemLimit = hyperDefaultContainerMem * 1024 * 1024 * 1000
		}
		podCPULimit += containerCPULimit
		podMemLimit += containerMemLimit
	}

	podResource[KEY_VCPU] = (podCPULimit + 999) / 1000
	podResource[KEY_MEMORY] = int64(hyperBaseMemory) + ((podMemLimit)/1000/1024)/1024
	specMap[KEY_RESOURCE] = podResource
	glog.V(5).Infof("Hyper: pod limit vcpu=%v mem=%vMiB", podResource[KEY_VCPU], podResource[KEY_MEMORY])

	// other params required
	specMap[KEY_ID] = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)
	specMap[KEY_LABELS] = map[string]string{"UID": string(pod.UID)}
	specMap[KEY_TTY] = true

	podData, err := json.Marshal(specMap)
	if err != nil {
		return nil, err
	}

	return podData, nil
}
Exemple #10
0
// KillPod kills all the containers of a pod.
func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
	if len(runningPod.Name) == 0 {
		return nil
	}

	// preStop hook
	for _, c := range runningPod.Containers {
		var container *api.Container
		if pod != nil {
			for i, containerSpec := range pod.Spec.Containers {
				if c.Name == containerSpec.Name {
					container = &pod.Spec.Containers[i]
					break
				}
			}
		}

		gracePeriod := int64(minimumGracePeriodInSeconds)
		if pod != nil {
			switch {
			case pod.DeletionGracePeriodSeconds != nil:
				gracePeriod = *pod.DeletionGracePeriodSeconds
			case pod.Spec.TerminationGracePeriodSeconds != nil:
				gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
			}
		}

		start := unversioned.Now()
		if pod != nil && container != nil && container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
			glog.V(4).Infof("Running preStop hook for container %q", container.Name)
			done := make(chan struct{})
			go func() {
				defer close(done)
				defer util.HandleCrash()
				if err := r.runner.Run(c.ID, pod, container, container.Lifecycle.PreStop); err != nil {
					glog.Errorf("preStop hook for container %q failed: %v", container.Name, err)
				}
			}()
			select {
			case <-time.After(time.Duration(gracePeriod) * time.Second):
				glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", container.Name, gracePeriod)
			case <-done:
				glog.V(4).Infof("preStop hook for container %q completed", container.Name)
			}
			gracePeriod -= int64(unversioned.Now().Sub(start.Time).Seconds())
		}

		// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
		if gracePeriod < minimumGracePeriodInSeconds {
			gracePeriod = minimumGracePeriodInSeconds
		}
	}

	var podID string
	namespace := runningPod.Namespace
	podName := kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace)
	glog.V(4).Infof("Hyper: killing pod %q.", podName)

	podInfos, err := r.hyperClient.ListPods()
	if err != nil {
		glog.Errorf("Hyper: ListPods failed, error: %s", err)
		return err
	}

	for _, podInfo := range podInfos {
		if podInfo.PodName == podName {
			podID = podInfo.PodID
			break
		}
	}

	cmds := append([]string{}, "rm", podID)
	_, err = r.runCommand(cmds...)
	if err != nil {
		glog.Errorf("Hyper: remove pod %s failed, error: %s", podID, err)
		return err
	}

	// Teardown pod's network
	err = r.networkPlugin.TearDownPod(namespace, podName, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.TearDownPod failed, error: %v", err)
		return err
	}

	// Delete pod spec file
	specFileName := path.Join(hyperPodSpecDir, podName)
	_, err = os.Stat(specFileName)
	if err == nil {
		e := os.Remove(specFileName)
		if e != nil {
			glog.Errorf("Hyper: delete spec file for %s failed, error: %v", runningPod.Name, e)
		}
	}

	return nil
}
Exemple #11
0
func (r *runtime) buildHyperPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) ([]byte, error) {
	// check and pull image
	for _, c := range pod.Spec.Containers {
		if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil {
			return nil, err
		}
	}

	// build hyper volume spec
	specMap := make(map[string]interface{})
	volumes := make([]map[string]interface{}, 0, 1)

	volumeMap, found := r.runtimeHelper.ListVolumesForPod(pod.UID)
	if found {
		// process rbd volume globally
		for name, mounter := range volumeMap {
			glog.V(4).Infof("Hyper: volume %s, path %s, meta %s", name, mounter.GetPath(), mounter.GetMetaData())
			v := make(map[string]interface{})
			v[KEY_NAME] = name

			// Process rbd volume
			metadata := mounter.GetMetaData()
			if metadata != nil && metadata["volume_type"].(string) == "rbd" {
				v[KEY_VOLUME_DRIVE] = metadata["volume_type"]
				v["source"] = "rbd:" + metadata["name"].(string)
				monitors := make([]string, 0, 1)
				for _, host := range metadata["hosts"].([]interface{}) {
					for _, port := range metadata["ports"].([]interface{}) {
						monitors = append(monitors, fmt.Sprintf("%s:%s", host.(string), port.(string)))
					}
				}
				v["option"] = map[string]interface{}{
					"user":     metadata["auth_username"],
					"keyring":  metadata["keyring"],
					"monitors": monitors,
				}
			} else {
				glog.V(4).Infof("Hyper: volume %s %s", name, mounter.GetPath())

				v[KEY_VOLUME_DRIVE] = VOLUME_TYPE_VFS
				v[KEY_VOLUME_SOURCE] = mounter.GetPath()
			}

			volumes = append(volumes, v)
		}

		glog.V(4).Infof("Hyper volumes: %v", volumes)
	}

	if !r.disableHyperInternalService {
		services := r.buildHyperPodServices(pod)
		if services == nil {
			// services can't be null for kubernetes, so fake one if it is null
			services = []grpctypes.UserService{
				{
					ServiceIP:   "127.0.0.2",
					ServicePort: 65534,
				},
			}
		}
		specMap["services"] = services
	}

	// build hyper containers spec
	var containers []map[string]interface{}
	var k8sHostNeeded = true
	dnsServers := make(map[string]string)
	terminationMsgLabels := make(map[string]string)
	for _, container := range pod.Spec.Containers {
		c := make(map[string]interface{})
		c[KEY_NAME] = r.buildHyperContainerFullName(
			string(pod.UID),
			string(pod.Name),
			string(pod.Namespace),
			container.Name,
			restartCount,
			container)
		c[KEY_IMAGE] = container.Image
		c[KEY_TTY] = container.TTY

		if container.WorkingDir != "" {
			c[KEY_WORKDIR] = container.WorkingDir
		}

		opts, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &container, "")
		if err != nil {
			return nil, err
		}

		command, args := kubecontainer.ExpandContainerCommandAndArgs(&container, opts.Envs)
		if len(command) > 0 {
			c[KEY_ENTRYPOINT] = command
		}
		if len(args) > 0 {
			c[KEY_COMMAND] = args
		}

		// dns
		for _, dns := range opts.DNS {
			dnsServers[dns] = dns
		}

		// envs
		envs := make([]map[string]string, 0, 1)
		for _, e := range opts.Envs {
			envs = append(envs, map[string]string{
				"env":   e.Name,
				"value": e.Value,
			})
		}
		c[KEY_ENVS] = envs

		// port-mappings
		var ports []map[string]interface{}
		for _, mapping := range opts.PortMappings {
			p := make(map[string]interface{})
			p[KEY_CONTAINER_PORT] = mapping.ContainerPort
			if mapping.HostPort != 0 {
				p[KEY_HOST_PORT] = mapping.HostPort
			}
			p[KEY_PROTOCOL] = mapping.Protocol
			ports = append(ports, p)
		}
		c[KEY_PORTS] = ports

		// NOTE: PodContainerDir is from TerminationMessagePath, TerminationMessagePath  is default to /dev/termination-log
		if opts.PodContainerDir != "" && container.TerminationMessagePath != "" {
			// In docker runtime, the container log path contains the container ID.
			// However, for hyper runtime, we cannot get the container ID before the
			// the container is launched, so here we generate a random uuid to enable
			// us to map a container's termination message path to an unique log file
			// on the disk.
			randomUID := util.NewUUID()
			containerLogPath := path.Join(opts.PodContainerDir, string(randomUID))
			fs, err := os.Create(containerLogPath)
			if err != nil {
				return nil, err
			}

			if err := fs.Close(); err != nil {
				return nil, err
			}
			mnt := &kubecontainer.Mount{
				// Use a random name for the termination message mount, so that
				// when a container restarts, it will not overwrite the old termination
				// message.
				Name:          fmt.Sprintf("termination-message-%s", randomUID),
				ContainerPath: container.TerminationMessagePath,
				HostPath:      containerLogPath,
				ReadOnly:      false,
			}
			opts.Mounts = append(opts.Mounts, *mnt)

			// set termination msg labels with host path
			terminationMsgLabels[container.Name] = mnt.HostPath
		}

		// volumes
		if len(opts.Mounts) > 0 {
			var containerVolumes []map[string]interface{}
			for _, volume := range opts.Mounts {
				v := make(map[string]interface{})
				v[KEY_MOUNTPATH] = volume.ContainerPath
				v[KEY_VOLUME] = volume.Name
				v[KEY_READONLY] = volume.ReadOnly
				containerVolumes = append(containerVolumes, v)

				if k8sHostNeeded {
					// Setup global hosts volume
					if volume.Name == "k8s-managed-etc-hosts" {
						k8sHostNeeded = false
						volumes = append(volumes, map[string]interface{}{
							KEY_NAME:          volume.Name,
							KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
							KEY_VOLUME_SOURCE: volume.HostPath,
						})
					}

					// Setup global termination msg volume
					if strings.HasPrefix(volume.Name, "termination-message") {
						k8sHostNeeded = false

						volumes = append(volumes, map[string]interface{}{
							KEY_NAME:          volume.Name,
							KEY_VOLUME_DRIVE:  VOLUME_TYPE_VFS,
							KEY_VOLUME_SOURCE: volume.HostPath,
						})
					}
				}
			}
			c[KEY_VOLUMES] = containerVolumes
		}

		containers = append(containers, c)
	}
	specMap[KEY_CONTAINERS] = containers
	specMap[KEY_VOLUMES] = volumes

	// dns
	if len(dnsServers) > 0 {
		dns := []string{}
		for d := range dnsServers {
			dns = append(dns, d)
		}
		specMap[KEY_DNS] = dns
	}

	// build hyper pod resources spec
	var podCPULimit, podMemLimit int64
	var labels map[string]string
	podResource := make(map[string]int64)
	for _, container := range pod.Spec.Containers {
		resource := container.Resources.Limits
		var containerCPULimit, containerMemLimit int64
		for name, limit := range resource {
			switch name {
			case api.ResourceCPU:
				containerCPULimit = limit.MilliValue()
			case api.ResourceMemory:
				containerMemLimit = limit.MilliValue()
			}
		}
		if containerCPULimit == 0 {
			containerCPULimit = hyperDefaultContainerCPU
		}
		if containerMemLimit == 0 {
			containerMemLimit = hyperDefaultContainerMem * 1024 * 1024 * 1000
		}
		podCPULimit += containerCPULimit
		podMemLimit += containerMemLimit

		// generate heapster needed labels
		// TODO: keep these labels up to date if the pod changes
		labels = newLabels(&container, pod, restartCount, false)
	}

	podResource[KEY_VCPU] = (podCPULimit + 999) / 1000
	podResource[KEY_MEMORY] = ((podMemLimit) / 1000 / 1024) / 1024
	specMap[KEY_RESOURCE] = podResource
	glog.V(5).Infof("Hyper: pod limit vcpu=%v mem=%vMiB", podResource[KEY_VCPU], podResource[KEY_MEMORY])

	// Setup labels
	podLabels := map[string]string{KEY_API_POD_UID: string(pod.UID)}
	for k, v := range pod.Labels {
		podLabels[k] = v
	}
	// append heapster needed labels
	// NOTE(harryz): this only works for one pod one container model for now.
	for k, v := range labels {
		podLabels[k] = v
	}

	// append termination message label
	for k, v := range terminationMsgLabels {
		podLabels[k] = v
	}

	specMap[KEY_LABELS] = podLabels

	// other params required
	specMap[KEY_ID] = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
	const hostnameMaxLen = 63
	podHostname := pod.Name
	if len(podHostname) > hostnameMaxLen {
		podHostname = podHostname[:hostnameMaxLen]
	}
	specMap[KEY_HOSTNAME] = podHostname

	podData, err := json.Marshal(specMap)
	if err != nil {
		return nil, err
	}

	return podData, nil
}
Exemple #12
0
// legacyLogSymlink composes the legacy container log path. It is only used for legacy cluster
// logging support.
func legacyLogSymlink(containerID string, containerName, podName, podNamespace string) string {
	return dockertools.LogSymlink(legacyContainerLogsDir, kubecontainer.BuildPodFullName(podName, podNamespace),
		containerName, containerID)
}
Exemple #13
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	podData, err := r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, pod.Name, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	podSpec, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}
	result, err := r.hyperClient.CreatePod(podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	podID := string(result["ID"].(string))

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		destroyErr := r.hyperClient.RemovePod(podID)
		if destroyErr != nil {
			glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
		}
		return err
	}

	podStatus, err := r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		// Update container references
		ref, err := kubecontainer.GenerateContainerRef(pod, &container)
		if err != nil {
			glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", pod.Name, container.Name, err)
		} else {
			r.containerRefManager.SetRef(containerID, ref)
		}

		// Create a symbolic link to the Hyper container log file using a name
		// which captures the full pod name, the container name and the
		// container ID. Cluster level logging will capture these symbolic
		// filenames which can be used for search terms in Elasticsearch or for
		// labels for Cloud Logging.
		containerLogFile := path.Join(hyperLogsDir, podID, fmt.Sprintf("%s-json.log", containerID.ID))
		symlinkFile := LogSymlink(r.containerLogsDir, podFullName, container.Name, containerID.ID)
		if err = r.os.Symlink(containerLogFile, symlinkFile); err != nil {
			glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err := fmt.Errorf("PostStart handler: %v", handlerErr)
				if e := r.KillPod(pod, runningPod); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}
Exemple #14
0
func logError(containerInfo *labelledContainerInfo, label string, err error) {
	glog.Errorf("Unable to get %q for container %q of pod %q: %v", label, containerInfo.Name,
		kubecontainer.BuildPodFullName(containerInfo.PodName, containerInfo.PodNamespace), err)
}
Exemple #15
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	var (
		err         error
		podData     []byte
		podFullName string
		podID       string
		podStatus   *kubecontainer.PodStatus
	)

	podData, err = r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	podFullName = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)
	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	defer func() {
		if err != nil {
			specFileName := path.Join(hyperPodSpecDir, podFullName)
			_, err = os.Stat(specFileName)
			if err == nil {
				e := os.Remove(specFileName)
				if e != nil {
					glog.Warningf("Hyper: delete spec file for %s failed, error: %v", podFullName, e)
				}
			}

			if podID != "" {
				destroyErr := r.hyperClient.RemovePod(podID)
				if destroyErr != nil {
					glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
				}
			}

			tearDownError := r.networkPlugin.TearDownPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{}, "hyper")
			if tearDownError != nil {
				glog.Warningf("Hyper: networkPlugin.TearDownPod failed: %v, kubelet will continue to rm pod %s", tearDownError, pod.Name)
			}
		}
	}()

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{}, "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	specData, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}

	var podSpec grpctypes.UserPod
	err = json.Unmarshal([]byte(specData), &podSpec)
	if err != nil {
		glog.Errorf("Hyper: marshal pod %s from specData error: %v", podFullName, err)
	}

	podID, err = r.hyperClient.CreatePod(&podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		return err
	}

	podStatus, err = r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		// Update container references
		ref, err := kubecontainer.GenerateContainerRef(pod, &container)
		if err != nil {
			glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", pod.Name, container.Name, err)
		} else {
			r.containerRefManager.SetRef(containerID, ref)
		}

		// Create a symbolic link to the Hyper container log file using a name
		// which captures the full pod name, the container name and the
		// container ID. Cluster level logging will capture these symbolic
		// filenames which can be used for search terms in Elasticsearch or for
		// labels for Cloud Logging.
		containerLogFile := path.Join(hyperLogsDir, podID, fmt.Sprintf("%s-json.log", containerID.ID))
		symlinkFile := LogSymlink(r.containerLogsDir, podFullName, container.Name, containerID.ID)
		if err = r.os.Symlink(containerLogFile, symlinkFile); err != nil {
			glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			msg, handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err = fmt.Errorf("PostStart handler: %v, error msg is: %v", handlerErr, msg)
				if e := r.KillPod(pod, runningPod, nil); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}
Exemple #16
0
func (pm *basicManager) GetPodByName(namespace, name string) (*api.Pod, bool) {
	podFullName := kubecontainer.BuildPodFullName(name, namespace)
	return pm.GetPodByFullName(podFullName)
}
Exemple #17
0
// KillPod kills all the containers of a pod.
func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
	var (
		podID        string
		podFullName  string
		podName      string
		podNamespace string
		err          error
	)

	podName = runningPod.Name
	podNamespace = runningPod.Namespace
	if len(podName) == 0 && pod != nil {
		podName = pod.Name
		podNamespace = pod.Namespace
	}
	if len(podName) == 0 {
		return nil
	}

	podFullName = kubecontainer.BuildPodFullName(podName, podNamespace)
	glog.V(4).Infof("Hyper: killing pod %q.", podFullName)

	defer func() {
		// Teardown pod's network
		err = r.networkPlugin.TearDownPod(podNamespace, podName, kubecontainer.ContainerID{}, "hyper")
		if err != nil {
			glog.Warningf("Hyper: networkPlugin.TearDownPod failed, error: %v", err)
		}

		// Delete pod spec file
		specFileName := path.Join(hyperPodSpecDir, podFullName)
		_, err = os.Stat(specFileName)
		if err == nil {
			e := os.Remove(specFileName)
			if e != nil {
				glog.Warningf("Hyper: delete spec file for %s failed, error: %v", runningPod.Name, e)
			}
		}
	}()

	// preStop hook
	for _, c := range runningPod.Containers {
		r.containerRefManager.ClearRef(c.ID)

		var container *api.Container
		if pod != nil {
			for i, containerSpec := range pod.Spec.Containers {
				if c.Name == containerSpec.Name {
					container = &pod.Spec.Containers[i]
					break
				}
			}
		}

		// TODO(harryz) not sure how to use gracePeriodOverride here
		gracePeriod := int64(minimumGracePeriodInSeconds)
		if pod != nil {
			switch {
			case pod.DeletionGracePeriodSeconds != nil:
				gracePeriod = *pod.DeletionGracePeriodSeconds
			case pod.Spec.TerminationGracePeriodSeconds != nil:
				gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
			}
		}

		start := unversioned.Now()
		if pod != nil && container != nil && container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
			glog.V(4).Infof("Running preStop hook for container %q", container.Name)
			done := make(chan struct{})
			go func() {
				defer close(done)
				defer utilruntime.HandleCrash()
				if msg, err := r.runner.Run(c.ID, pod, container, container.Lifecycle.PreStop); err != nil {
					glog.Errorf("preStop hook for container %q failed: %v, error msg is: %v", container.Name, err, msg)
				}
			}()
			select {
			case <-time.After(time.Duration(gracePeriod) * time.Second):
				glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", container.Name, gracePeriod)
			case <-done:
				glog.V(4).Infof("preStop hook for container %q completed", container.Name)
			}
			gracePeriod -= int64(unversioned.Now().Sub(start.Time).Seconds())
		}

		// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
		if gracePeriod < minimumGracePeriodInSeconds {
			gracePeriod = minimumGracePeriodInSeconds
		}
	}

	podInfos, err := r.hyperClient.ListPods()
	if err != nil {
		glog.Errorf("Hyper: ListPods failed, error: %s", err)
		return err
	}

	for _, podInfo := range podInfos {
		if podInfo.PodName == podFullName {
			podID = podInfo.PodID

			// Remove log links
			for _, c := range podInfo.PodInfo.Status.ContainerStatus {
				_, _, _, containerName, _, _, err := r.parseHyperContainerFullName(c.Name)
				if err != nil {
					continue
				}
				symlinkFile := LogSymlink(r.containerLogsDir, podFullName, containerName, c.ContainerID)
				err = os.Remove(symlinkFile)
				if err != nil && !os.IsNotExist(err) {
					glog.Warningf("Failed to remove container log symlink %q: %v", symlinkFile, err)
				}
			}

			break
		}
	}

	err = r.hyperClient.RemovePod(podID)
	if err != nil {
		glog.Errorf("Hyper: remove pod %s failed, error: %s", podID, err)
		return err
	}

	return nil
}
Exemple #18
0
// determinePodSandboxIP determines the IP address of the given pod sandbox.
// TODO: remove determinePodSandboxIP after networking is delegated to the container runtime.
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeApi.PodSandboxStatus) string {
	ip := ""

	if podSandbox.Network != nil {
		ip = podSandbox.Network.GetIp()
	}

	if m.networkPlugin.Name() != network.DefaultPluginName {
		// TODO: podInfraContainerID in GetPodNetworkStatus() interface should be renamed to sandboxID
		netStatus, err := m.networkPlugin.GetPodNetworkStatus(podNamespace, podName, kubecontainer.ContainerID{
			Type: m.runtimeName,
			ID:   podSandbox.GetId(),
		})
		if err != nil {
			glog.Errorf("NetworkPlugin %s failed on the status hook for pod '%s' - %v", m.networkPlugin.Name(), kubecontainer.BuildPodFullName(podName, podNamespace), err)
		} else if netStatus != nil {
			ip = netStatus.IP.String()
		}
	}

	return ip
}
Exemple #19
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	podData, err := r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, podFullName, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	podSpec, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}
	result, err := r.hyperClient.CreatePod(podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	podID := string(result["ID"].(string))

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		destroyErr := r.hyperClient.RemovePod(podID)
		if destroyErr != nil {
			glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
		}
		return err
	}

	podStatus, err := r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err := fmt.Errorf("PostStart handler: %v", handlerErr)
				if e := r.KillPod(pod, runningPod); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}