Exemple #1
0
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
	var p kubecontainer.Pod
	if runningPod != nil {
		p = *runningPod
	} else if status != nil {
		p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
	}
	return kl.containerRuntime.KillPod(pod, p, gracePeriodOverride)
}
Exemple #2
0
// SyncPod syncs the running pod to match the specified desired pod.
func (r *Runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	// TODO: (random-liu) Stop using running pod in SyncPod()
	// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
	// we may stop using apiPodStatus someday.
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				// TODO(yifan): Containers in one pod are fate-sharing at this moment, see:
				// https://github.com/appc/spec/issues/276.
				restartPod = true
				break
			}
			continue
		}

		// TODO: check for non-root image directives.  See ../docker/manager.go#SyncPod

		// TODO(yifan): Take care of host network change.
		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		// Kill the pod only if the pod is actually running.
		if len(runningPod.Containers) > 0 {
			if err := r.KillPod(pod, runningPod); err != nil {
				return err
			}
		}
		if err := r.RunPod(pod, pullSecrets); err != nil {
			return err
		}
	}
	return nil
}
Exemple #3
0
// SyncPod syncs the running pod into the desired pod by executing following steps:
//
//  1. Compute sandbox and container changes.
//  2. Kill pod sandbox if necessary.
//  3. Kill any containers that should not be running.
//  4. Create sandbox if necessary.
//  5. Create init containers.
//  6. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
	// Step 1: Compute sandbox and container changes.
	podContainerChanges := m.computePodContainerChanges(pod, podStatus)
	glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod))
	if podContainerChanges.CreateSandbox {
		ref, err := api.GetReference(pod)
		if err != nil {
			glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
		}
		if podContainerChanges.SandboxID != "" {
			m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.")
		} else {
			m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.")
		}

	}

	// Step 2: Kill the pod if the sandbox has changed.
	if podContainerChanges.CreateSandbox || (len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0) {
		if len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0 {
			glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod))
		} else {
			glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
		}

		killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
		result.AddPodSyncResult(killResult)
		if killResult.Error() != nil {
			glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())
			return
		}
	} else {
		// Step 3: kill any running containers in this pod which are not to keep.
		for containerID, containerInfo := range podContainerChanges.ContainersToKill {
			glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod))
			killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
			result.AddSyncResult(killContainerResult)
			if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil {
				killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
				glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err)
				return
			}
		}
	}

	// Keep terminated init containers fairly aggressively controlled
	m.pruneInitContainersBeforeStart(pod, podStatus, podContainerChanges.InitContainersToKeep)

	// We pass the value of the podIP down to generatePodSandboxConfig and
	// generateContainerConfig, which in turn passes it to various other
	// functions, in order to facilitate functionality that requires this
	// value (hosts file and downward API) and avoid races determining
	// the pod IP in cases where a container requires restart but the
	// podIP isn't in the status manager yet.
	//
	// We default to the IP in the passed-in pod status, and overwrite it if the
	// sandbox needs to be (re)started.
	podIP := ""
	if podStatus != nil {
		podIP = podStatus.IP
	}

	// Step 4: Create a sandbox for the pod if necessary.
	podSandboxID := podContainerChanges.SandboxID
	if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 {
		var msg string
		var err error

		glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod))
		createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
		result.AddSyncResult(createSandboxResult)
		podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
		if err != nil {
			createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
			glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err)
			return
		}

		setupNetworkResult := kubecontainer.NewSyncResult(kubecontainer.SetupNetwork, podSandboxID)
		result.AddSyncResult(setupNetworkResult)
		if !kubecontainer.IsHostNetworkPod(pod) {
			glog.V(3).Infof("Calling network plugin %s to setup pod for %s", m.networkPlugin.Name(), format.Pod(pod))
			// Setup pod network plugin with sandbox id
			// TODO: rename the last param to sandboxID
			err = m.networkPlugin.SetUpPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{
				Type: m.runtimeName,
				ID:   podSandboxID,
			})
			if err != nil {
				message := fmt.Sprintf("Failed to setup network for pod %q using network plugins %q: %v", format.Pod(pod), m.networkPlugin.Name(), err)
				setupNetworkResult.Fail(kubecontainer.ErrSetupNetwork, message)
				glog.Error(message)

				killPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, format.Pod(pod))
				result.AddSyncResult(killPodSandboxResult)
				if err := m.runtimeService.StopPodSandbox(podSandboxID); err != nil {
					killPodSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
					glog.Errorf("Kill sandbox %q failed for pod %q: %v", podSandboxID, format.Pod(pod), err)
				}
				return
			}

			podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
			if err != nil {
				glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod))
				result.Fail(err)
				return
			}

			// Overwrite the podIP passed in the pod status, since we just started the infra container.
			podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus)
			glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod))
		}
	}

	// Get podSandboxConfig for containers to start.
	configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
	result.AddSyncResult(configPodSandboxResult)
	podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt)
	if err != nil {
		message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
		glog.Error(message)
		configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
		return
	}

	// Step 5: start init containers.
	status, next, done := findNextInitContainerToRun(pod, podStatus)
	if status != nil && status.ExitCode != 0 {
		// container initialization has failed, flag the pod as failed
		initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
		initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
		result.AddSyncResult(initContainerResult)
		if pod.Spec.RestartPolicy == api.RestartPolicyNever {
			utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
			return
		}
		utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status))
	}
	if next != nil {
		if len(podContainerChanges.ContainersToStart) == 0 {
			glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod))
			return
		}

		// If we need to start the next container, do so now then exit
		container := next
		startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
		result.AddSyncResult(startContainerResult)

		isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
		if isInBackOff {
			startContainerResult.Fail(err, msg)
			glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
			return
		}

		glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
		if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
			startContainerResult.Fail(err, msg)
			utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
			return
		}

		// Successfully started the container; clear the entry in the failure
		glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
		return
	}
	if !done {
		// init container still running
		glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod))
		return
	}
	if podContainerChanges.InitFailed {
		glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod))
		return
	}

	// Step 6: start containers in podContainerChanges.ContainersToStart.
	for idx := range podContainerChanges.ContainersToStart {
		container := &pod.Spec.Containers[idx]
		startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
		result.AddSyncResult(startContainerResult)

		isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
		if isInBackOff {
			startContainerResult.Fail(err, msg)
			glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
			continue
		}

		glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
		if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
			startContainerResult.Fail(err, msg)
			utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
			continue
		}
	}

	return
}
Exemple #4
0
// Syncs the running pod into the desired pod.
func (r *runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
	// TODO: (random-liu) Stop using running pod in SyncPod()
	// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
	// we may stop using apiPodStatus someday.
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	// Add references to all containers.
	unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
	for _, c := range runningPod.Containers {
		unidentifiedContainers[c.ID] = c
	}

	restartPod := false
	for _, container := range pod.Spec.Containers {
		expectedHash := kubecontainer.HashContainer(&container)

		c := runningPod.FindContainerByName(container.Name)
		if c == nil {
			if kubecontainer.ShouldContainerBeRestartedOldVersion(&container, pod, &podStatus) {
				glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
				restartPod = true
				break
			}
			continue
		}

		containerChanged := c.Hash != 0 && c.Hash != expectedHash
		if containerChanged {
			glog.V(4).Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.",
				podFullName, container.Name, c.Hash, expectedHash)
			restartPod = true
			break
		}

		liveness, found := r.livenessManager.Get(c.ID)
		if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
			glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
			restartPod = true
			break
		}

		delete(unidentifiedContainers, c.ID)
	}

	// If there is any unidentified containers, restart the pod.
	if len(unidentifiedContainers) > 0 {
		restartPod = true
	}

	if restartPod {
		restartCount := 0
		// Only kill existing pod
		podID, err := r.hyperClient.GetPodIDByName(podFullName)
		if err == nil && len(podID) > 0 {
			// Update pod restart count
			restartCount, err = r.GetPodStartCount(podID)
			if err != nil {
				glog.Errorf("Hyper: get pod startcount failed: %v", err)
				return err
			}
			restartCount += 1

			if err := r.KillPod(nil, runningPod); err != nil {
				glog.Errorf("Hyper: kill pod %s failed, error: %s", runningPod.Name, err)
				return err
			}
		}

		if err := r.RunPod(pod, restartCount, pullSecrets); err != nil {
			glog.Errorf("Hyper: run pod %s failed, error: %s", pod.Name, err)
			return err
		}
	}
	return nil
}
Exemple #5
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	var (
		err         error
		podData     []byte
		podFullName string
		podID       string
		podStatus   *kubecontainer.PodStatus
	)

	podData, err = r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	podFullName = kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)
	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	defer func() {
		if err != nil {
			specFileName := path.Join(hyperPodSpecDir, podFullName)
			_, err = os.Stat(specFileName)
			if err == nil {
				e := os.Remove(specFileName)
				if e != nil {
					glog.Warningf("Hyper: delete spec file for %s failed, error: %v", podFullName, e)
				}
			}

			if podID != "" {
				destroyErr := r.hyperClient.RemovePod(podID)
				if destroyErr != nil {
					glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
				}
			}

			tearDownError := r.networkPlugin.TearDownPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{}, "hyper")
			if tearDownError != nil {
				glog.Warningf("Hyper: networkPlugin.TearDownPod failed: %v, kubelet will continue to rm pod %s", tearDownError, pod.Name)
			}
		}
	}()

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, pod.Name, kubecontainer.ContainerID{}, "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	specData, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}

	var podSpec grpctypes.UserPod
	err = json.Unmarshal([]byte(specData), &podSpec)
	if err != nil {
		glog.Errorf("Hyper: marshal pod %s from specData error: %v", podFullName, err)
	}

	podID, err = r.hyperClient.CreatePod(&podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		return err
	}

	podStatus, err = r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		// Update container references
		ref, err := kubecontainer.GenerateContainerRef(pod, &container)
		if err != nil {
			glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", pod.Name, container.Name, err)
		} else {
			r.containerRefManager.SetRef(containerID, ref)
		}

		// Create a symbolic link to the Hyper container log file using a name
		// which captures the full pod name, the container name and the
		// container ID. Cluster level logging will capture these symbolic
		// filenames which can be used for search terms in Elasticsearch or for
		// labels for Cloud Logging.
		containerLogFile := path.Join(hyperLogsDir, podID, fmt.Sprintf("%s-json.log", containerID.ID))
		symlinkFile := LogSymlink(r.containerLogsDir, podFullName, container.Name, containerID.ID)
		if err = r.os.Symlink(containerLogFile, symlinkFile); err != nil {
			glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			msg, handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err = fmt.Errorf("PostStart handler: %v, error msg is: %v", handlerErr, msg)
				if e := r.KillPod(pod, runningPod, nil); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}
Exemple #6
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	podData, err := r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, pod.Name, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	podSpec, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}
	result, err := r.hyperClient.CreatePod(podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	podID := string(result["ID"].(string))

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		destroyErr := r.hyperClient.RemovePod(podID)
		if destroyErr != nil {
			glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
		}
		return err
	}

	podStatus, err := r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		// Update container references
		ref, err := kubecontainer.GenerateContainerRef(pod, &container)
		if err != nil {
			glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", pod.Name, container.Name, err)
		} else {
			r.containerRefManager.SetRef(containerID, ref)
		}

		// Create a symbolic link to the Hyper container log file using a name
		// which captures the full pod name, the container name and the
		// container ID. Cluster level logging will capture these symbolic
		// filenames which can be used for search terms in Elasticsearch or for
		// labels for Cloud Logging.
		containerLogFile := path.Join(hyperLogsDir, podID, fmt.Sprintf("%s-json.log", containerID.ID))
		symlinkFile := LogSymlink(r.containerLogsDir, podFullName, container.Name, containerID.ID)
		if err = r.os.Symlink(containerLogFile, symlinkFile); err != nil {
			glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err := fmt.Errorf("PostStart handler: %v", handlerErr)
				if e := r.KillPod(pod, runningPod); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}
Exemple #7
0
func (r *runtime) RunPod(pod *api.Pod, restartCount int, pullSecrets []api.Secret) error {
	podFullName := kubecontainer.BuildPodFullName(pod.Name, pod.Namespace)

	podData, err := r.buildHyperPod(pod, restartCount, pullSecrets)
	if err != nil {
		glog.Errorf("Hyper: buildHyperPod failed, error: %v", err)
		return err
	}

	err = r.savePodSpec(string(podData), podFullName)
	if err != nil {
		glog.Errorf("Hyper: savePodSpec failed, error: %v", err)
		return err
	}

	// Setup pod's network by network plugin
	err = r.networkPlugin.SetUpPod(pod.Namespace, podFullName, "", "hyper")
	if err != nil {
		glog.Errorf("Hyper: networkPlugin.SetUpPod %s failed, error: %v", pod.Name, err)
		return err
	}

	// Create and start hyper pod
	podSpec, err := r.getPodSpec(podFullName)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podFullName, err)
		return err
	}
	result, err := r.hyperClient.CreatePod(podSpec)
	if err != nil {
		glog.Errorf("Hyper: create pod %s failed, error: %v", podData, err)
		return err
	}

	podID := string(result["ID"].(string))

	err = r.hyperClient.StartPod(podID)
	if err != nil {
		glog.Errorf("Hyper: start pod %s (ID:%s) failed, error: %v", pod.Name, podID, err)
		destroyErr := r.hyperClient.RemovePod(podID)
		if destroyErr != nil {
			glog.Errorf("Hyper: destory pod %s (ID:%s) failed: %v", pod.Name, podID, destroyErr)
		}
		return err
	}

	podStatus, err := r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	if err != nil {
		return err
	}
	runningPod := kubecontainer.ConvertPodStatusToRunningPod(podStatus)

	for _, container := range pod.Spec.Containers {
		var containerID kubecontainer.ContainerID

		for _, runningContainer := range runningPod.Containers {
			if container.Name == runningContainer.Name {
				containerID = runningContainer.ID
			}
		}

		if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
			handlerErr := r.runner.Run(containerID, pod, &container, container.Lifecycle.PostStart)
			if handlerErr != nil {
				err := fmt.Errorf("PostStart handler: %v", handlerErr)
				if e := r.KillPod(pod, runningPod); e != nil {
					glog.Errorf("KillPod %v failed: %v", podFullName, e)
				}
				return err
			}
		}
	}

	return nil
}