//openPodHostports opens all hostport for pod and returns the map of hostport and socket
func (plugin *kubenetNetworkPlugin) openPodHostports(pod *api.Pod) (map[hostport]closeable, error) {
	var retErr error
	hostportMap := make(map[hostport]closeable)
	for _, container := range pod.Spec.Containers {
		for _, port := range container.Ports {
			if port.HostPort <= 0 {
				// Ignore
				continue
			}
			hp := hostport{
				port:     port.HostPort,
				protocol: strings.ToLower(string(port.Protocol)),
			}
			socket, err := openLocalPort(&hp)
			if err != nil {
				retErr = fmt.Errorf("Cannot open hostport %d for pod %s: %v", port.HostPort, kubecontainer.GetPodFullName(pod), err)
				break
			}
			hostportMap[hp] = socket
		}
		if retErr != nil {
			break
		}
	}
	// If encounter any error, close all hostports that just got opened.
	if retErr != nil {
		for hp, socket := range hostportMap {
			if err := socket.Close(); err != nil {
				glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, kubecontainer.GetPodFullName(pod), err)
			}
		}
	}
	return hostportMap, retErr
}
// gatherAllHostports returns all hostports that should be presented on node
func (plugin *kubenetNetworkPlugin) gatherAllHostports() (map[api.ContainerPort]targetPod, error) {
	podHostportMap := make(map[api.ContainerPort]targetPod)
	pods, err := plugin.host.GetRuntime().GetPods(false)
	if err != nil {
		return nil, fmt.Errorf("Failed to retrieve pods from runtime: %v", err)
	}
	for _, p := range pods {
		var podInfraContainerId kubecontainer.ContainerID
		for _, c := range p.Containers {
			if c.Name == dockertools.PodInfraContainerName {
				podInfraContainerId = c.ID
				break
			}
		}
		// Assuming if kubenet has the pod's ip, the pod is alive and its host port should be presented.
		podIP, ok := plugin.podIPs[podInfraContainerId]
		if !ok {
			// The POD has been delete. Ignore
			continue
		}
		// Need the complete api.Pod object
		pod, ok := plugin.host.GetPodByName(p.Namespace, p.Name)
		if ok {
			for _, container := range pod.Spec.Containers {
				for _, port := range container.Ports {
					if port.HostPort != 0 {
						podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(pod), podIP: podIP}
					}
				}
			}
		}
	}
	return podHostportMap, nil
}
Example #3
0
// handleRun handles requests to run a command inside a container.
func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) {
	u, err := url.ParseRequestURI(req.RequestURI)
	if err != nil {
		s.error(w, err)
		return
	}
	podNamespace, podID, uid, container, err := parseContainerCoordinates(u.Path)
	if err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	pod, ok := s.host.GetPodByName(podNamespace, podID)
	if !ok {
		http.Error(w, "Pod does not exist", http.StatusNotFound)
		return
	}
	command := strings.Split(u.Query().Get("cmd"), " ")
	data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), uid, container, command)
	if err != nil {
		s.error(w, err)
		return
	}
	w.Header().Add("Content-type", "text/plain")
	w.Write(data)
}
Example #4
0
// ProbeReadiness probes and sets the readiness of a container.
func (pb *prober) ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) {
	var ready probe.Result
	var output string
	var err error
	p := container.ReadinessProbe
	if p == nil {
		ready = probe.Success
	} else {
		ready, output, err = pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries)
	}
	ctrName := fmt.Sprintf("%s:%s", kubecontainer.GetPodFullName(pod), container.Name)
	if err != nil || ready == probe.Failure {
		// Readiness failed in one way or another.
		ref, ok := pb.refManager.GetRef(containerID)
		if !ok {
			glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name)
		}
		if err != nil {
			glog.V(1).Infof("readiness probe for %q errored: %v", ctrName, err)
			if ok {
				pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe errored: %v", err)
			}
		} else { // ready != probe.Success
			glog.V(1).Infof("Readiness probe for %q failed (%v): %s", ctrName, ready, output)
			if ok {
				pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe failed: %s", output)
			}
		}
		return probe.Failure, err
	}

	glog.V(3).Infof("Readiness probe for %q succeeded", ctrName)
	return ready, nil
}
Example #5
0
// syncBatch syncs pods statuses with the apiserver.
func (s *statusManager) syncBatch() error {
	syncRequest := <-s.podStatusChannel
	pod := syncRequest.pod
	podFullName := kubecontainer.GetPodFullName(pod)
	status := syncRequest.status

	var err error
	statusPod := &api.Pod{
		ObjectMeta: pod.ObjectMeta,
	}
	// TODO: make me easier to express from client code
	statusPod, err = s.kubeClient.Pods(statusPod.Namespace).Get(statusPod.Name)
	if err == nil {
		statusPod.Status = status
		_, err = s.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod)
		// TODO: handle conflict as a retry, make that easier too.
		if err == nil {
			glog.V(3).Infof("Status for pod %q updated successfully", kubeletUtil.FormatPodName(pod))
			return nil
		}
	}

	// We failed to update status. In order to make sure we retry next time
	// we delete cached value. This may result in an additional update, but
	// this is ok.
	// Doing this synchronously will lead to a deadlock if the podStatusChannel
	// is full, and the pod worker holding the lock is waiting on this method
	// to clear the channel. Even if this delete never runs subsequent container
	// changes on the node should trigger updates.
	go s.DeletePodStatus(podFullName)
	return fmt.Errorf("error updating status for pod %q: %v", kubeletUtil.FormatPodName(pod), err)
}
Example #6
0
// getPortForward handles a new restful port forward request. It determines the
// pod name and uid and then calls ServePortForward.
func (s *Server) getPortForward(request *restful.Request, response *restful.Response) {
	params := getRequestParams(request)
	pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
	if !ok {
		response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
		return
	}
	if len(params.podUID) > 0 && pod.UID != params.podUID {
		response.WriteError(http.StatusNotFound, fmt.Errorf("pod not found"))
		return
	}

	redirect, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID)
	if err != nil {
		response.WriteError(streaming.HTTPStatus(err), err)
		return
	}
	if redirect != nil {
		http.Redirect(response.ResponseWriter, request.Request, redirect.String(), http.StatusFound)
		return
	}

	portforward.ServePortForward(response.ResponseWriter,
		request.Request,
		s.host,
		kubecontainer.GetPodFullName(pod),
		params.podUID,
		s.host.StreamingConnectionIdleTimeout(),
		remotecommand.DefaultStreamCreationTimeout)
}
Example #7
0
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
	names := sets.String{}
	for i, pod := range pods {
		var errlist []error
		if errs := validation.ValidatePod(pod); len(errs) != 0 {
			errlist = append(errlist, errs...)
			// If validation fails, don't trust it any further -
			// even Name could be bad.
		} else {
			name := kubecontainer.GetPodFullName(pod)
			if names.Has(name) {
				errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name))
			} else {
				names.Insert(name)
			}
		}
		if len(errlist) > 0 {
			name := bestPodIdentString(pod)
			err := utilerrors.NewAggregate(errlist)
			glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
			recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
			continue
		}
		filtered = append(filtered, pod)
	}
	return
}
Example #8
0
// handleExec handles requests to run a command inside a container.
func (s *Server) handleExec(w http.ResponseWriter, req *http.Request) {
	u, err := url.ParseRequestURI(req.RequestURI)
	if err != nil {
		s.error(w, err)
		return
	}
	podNamespace, podID, uid, container, err := parseContainerCoordinates(u.Path)
	if err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	pod, ok := s.host.GetPodByName(podNamespace, podID)
	if !ok {
		http.Error(w, "Pod does not exist", http.StatusNotFound)
		return
	}
	stdinStream, stdoutStream, stderrStream, errorStream, conn, tty, ok := s.createStreams(w, req)
	if conn != nil {
		defer conn.Close()
	}
	if !ok {
		return
	}
	err = s.host.ExecInContainer(kubecontainer.GetPodFullName(pod), uid, container, u.Query()[api.ExecCommandParamm], stdinStream, stdoutStream, stderrStream, tty)
	if err != nil {
		msg := fmt.Sprintf("Error executing command in container: %v", err)
		glog.Error(msg)
		errorStream.Write([]byte(msg))
	}
}
Example #9
0
// probeLiveness probes the liveness of a container.
func (pb *prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
	var live probe.Result
	var output string
	var err error
	p := container.LivenessProbe
	if p == nil {
		return probe.Success, nil
	}
	live, output, err = pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries)
	ctrName := fmt.Sprintf("%s:%s", kubecontainer.GetPodFullName(pod), container.Name)
	if err != nil || live != probe.Success {
		// Liveness failed in one way or another.
		ref, ok := pb.refManager.GetRef(containerID)
		if !ok {
			glog.Warningf("No ref for pod %q - '%v'", containerID, container.Name)
		}
		if err != nil {
			glog.V(1).Infof("Liveness probe for %q errored: %v", ctrName, err)
			if ok {
				pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe errored: %v", err)
			}
			return probe.Unknown, err
		} else { // live != probe.Success
			glog.V(1).Infof("Liveness probe for %q failed (%v): %s", ctrName, live, output)
			if ok {
				pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe failed: %s", output)
			}
			return live, nil
		}
	}
	glog.V(3).Infof("Liveness probe for %q succeeded", ctrName)
	return probe.Success, nil
}
Example #10
0
func TestExecInContainerNoSuchPod(t *testing.T) {
	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
	kubelet := testKubelet.kubelet
	fakeRuntime := testKubelet.fakeRuntime
	fakeCommandRunner := fakeContainerCommandRunner{}
	kubelet.runner = &fakeCommandRunner
	fakeRuntime.PodList = []*containertest.FakePod{}

	podName := "podFoo"
	podNamespace := "nsFoo"
	containerID := "containerFoo"
	err := kubelet.ExecInContainer(
		kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
		"",
		containerID,
		[]string{"ls"},
		nil,
		nil,
		nil,
		false,
		nil,
	)
	require.Error(t, err)
	require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer")
}
// getExec handles requests to run a command inside a container.
func (s *Server) getExec(request *restful.Request, response *restful.Response) {
	params := getRequestParams(request)
	pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
	if !ok {
		response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
		return
	}

	podFullName := kubecontainer.GetPodFullName(pod)
	redirect, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, params.streamOpts)
	if err != nil {
		response.WriteError(streaming.HTTPStatus(err), err)
		return
	}
	if redirect != nil {
		http.Redirect(response.ResponseWriter, request.Request, redirect.String(), http.StatusFound)
		return
	}

	remotecommand.ServeExec(response.ResponseWriter,
		request.Request,
		s.host,
		podFullName,
		params.podUID,
		params.containerName,
		s.host.StreamingConnectionIdleTimeout(),
		remotecommand.DefaultStreamCreationTimeout,
		remotecommand.SupportedStreamingProtocols)
}
Example #12
0
func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
	timeout := time.Duration(p.TimeoutSeconds) * time.Second
	if p.Exec != nil {
		glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
		return pb.exec.Probe(pb.newExecInContainer(container, containerID, p.Exec.Command))
	}
	if p.HTTPGet != nil {
		scheme := strings.ToLower(string(p.HTTPGet.Scheme))
		host := p.HTTPGet.Host
		if host == "" {
			host = status.PodIP
		}
		port, err := extractPort(p.HTTPGet.Port, container)
		if err != nil {
			return probe.Unknown, "", err
		}
		path := p.HTTPGet.Path
		glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path)
		url := formatURL(scheme, host, port, path)
		return pb.http.Probe(url, timeout)
	}
	if p.TCPSocket != nil {
		port, err := extractPort(p.TCPSocket.Port, container)
		if err != nil {
			return probe.Unknown, "", err
		}
		glog.V(4).Infof("TCP-Probe PodIP: %v, Port: %v, Timeout: %v", status.PodIP, port, timeout)
		return pb.tcp.Probe(status.PodIP, port, timeout)
	}
	glog.Warningf("Failed to find probe builder for container: %v", container)
	return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", kubecontainer.GetPodFullName(pod), container.Name)
}
Example #13
0
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
	names := sets.String{}
	for i, pod := range pods {
		var errlist field.ErrorList
		if errs := validation.ValidatePod(pod); len(errs) != 0 {
			errlist = append(errlist, errs...)
			// If validation fails, don't trust it any further -
			// even Name could be bad.
		} else {
			name := kubecontainer.GetPodFullName(pod)
			if names.Has(name) {
				// TODO: when validation becomes versioned, this gets a bit
				// more complicated.
				errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name))
			} else {
				names.Insert(name)
			}
		}
		if len(errlist) > 0 {
			name := bestPodIdentString(pod)
			err := errlist.ToAggregate()
			glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
			recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
			continue
		}
		filtered = append(filtered, pod)
	}
	return
}
func (fmc *fakeMirrorClient) CreateMirrorPod(pod *api.Pod) error {
	fmc.mirrorPodLock.Lock()
	defer fmc.mirrorPodLock.Unlock()
	podFullName := kubecontainer.GetPodFullName(pod)
	fmc.mirrorPods.Insert(podFullName)
	fmc.createCounts[podFullName]++
	return nil
}
Example #15
0
func (h *fakeHandler) SyncHostports(natInterfaceName string, runningPods []*hostport.RunningPod) error {
	for _, r := range runningPods {
		if r.IP.To4() == nil {
			return fmt.Errorf("Invalid or missing pod %s IP", kubecontainer.GetPodFullName(r.Pod))
		}
	}

	return nil
}
func TestNewStatus(t *testing.T) {
	syncer := newTestStatusManager()
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	verifyUpdates(t, syncer, 1)

	status, _ := syncer.GetPodStatus(kubecontainer.GetPodFullName(testPod))
	if status.StartTime.IsZero() {
		t.Errorf("SetPodStatus did not set a proper start time value")
	}
}
Example #17
0
// getContainerLogs handles containerLogs request against the Kubelet
func (s *Server) getContainerLogs(request *restful.Request, response *restful.Response) {
	podNamespace := request.PathParameter("podNamespace")
	podID := request.PathParameter("podID")
	containerName := request.PathParameter("containerName")

	if len(podID) == 0 {
		// TODO: Why return JSON when the rest return plaintext errors?
		response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podID."}`))
		return
	}
	if len(containerName) == 0 {
		// TODO: Why return JSON when the rest return plaintext errors?
		response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing container name."}`))
		return
	}
	if len(podNamespace) == 0 {
		// TODO: Why return JSON when the rest return plaintext errors?
		response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podNamespace."}`))
		return
	}

	follow, _ := strconv.ParseBool(request.QueryParameter("follow"))
	previous, _ := strconv.ParseBool(request.QueryParameter("previous"))
	tail := request.QueryParameter("tail")

	pod, ok := s.host.GetPodByName(podNamespace, podID)
	if !ok {
		response.WriteError(http.StatusNotFound, fmt.Errorf("Pod %q does not exist", podID))
		return
	}
	// Check if containerName is valid.
	containerExists := false
	for _, container := range pod.Spec.Containers {
		if container.Name == containerName {
			containerExists = true
		}
	}
	if !containerExists {
		response.WriteError(http.StatusNotFound, fmt.Errorf("Container %q not found in Pod %q", containerName, podID))
		return
	}

	if _, ok := response.ResponseWriter.(http.Flusher); !ok {
		response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher", response))
		return
	}
	fw := flushwriter.Wrap(response)
	response.Header().Set("Transfer-Encoding", "chunked")
	response.WriteHeader(http.StatusOK)
	err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, tail, follow, previous, fw, fw)
	if err != nil {
		response.WriteError(http.StatusInternalServerError, err)
		return
	}
}
Example #18
0
func podStatusData(pod *api.Pod, status api.PodStatus) ([]byte, string, error) {
	podFullName := container.GetPodFullName(pod)
	data, err := json.Marshal(api.PodStatusResult{
		ObjectMeta: api.ObjectMeta{
			Name:     podFullName,
			SelfLink: "/podstatusresult",
		},
		Status: status,
	})
	return data, podFullName, err
}
Example #19
0
func (pm *basicManager) UpdatePod(pod *api.Pod) {
	pm.lock.Lock()
	defer pm.lock.Unlock()
	podFullName := kubecontainer.GetPodFullName(pod)
	if IsMirrorPod(pod) {
		pm.mirrorPodByUID[pod.UID] = pod
		pm.mirrorPodByFullName[podFullName] = pod
	} else {
		pm.podByUID[pod.UID] = pod
		pm.podByFullName[podFullName] = pod
	}
}
Example #20
0
// getPortForward handles a new restful port forward request. It determines the
// pod name and uid and then calls ServePortForward.
func (s *Server) getPortForward(request *restful.Request, response *restful.Response) {
	podNamespace, podID, uid := getPodCoordinates(request)
	pod, ok := s.host.GetPodByName(podNamespace, podID)
	if !ok {
		response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
		return
	}

	podName := kubecontainer.GetPodFullName(pod)

	ServePortForward(response.ResponseWriter, request.Request, s.host, podName, uid, s.host.StreamingConnectionIdleTimeout(), defaultStreamCreationTimeout)
}
Example #21
0
func (pm *basicManager) DeletePod(pod *api.Pod) {
	pm.lock.Lock()
	defer pm.lock.Unlock()
	podFullName := kubecontainer.GetPodFullName(pod)
	if IsMirrorPod(pod) {
		delete(pm.mirrorPodByUID, pod.UID)
		delete(pm.mirrorPodByFullName, podFullName)
	} else {
		delete(pm.podByUID, pod.UID)
		delete(pm.podByFullName, podFullName)
	}
}
Example #22
0
//openPodHostports opens all hostport for pod and returns the map of hostport and socket
func (h *handler) openHostports(pod *api.Pod) error {
	var retErr error
	ports := make(map[hostport]closeable)
	for _, container := range pod.Spec.Containers {
		for _, port := range container.Ports {
			if port.HostPort <= 0 {
				// Ignore
				continue
			}
			hp := hostport{
				port:     port.HostPort,
				protocol: strings.ToLower(string(port.Protocol)),
			}
			socket, err := h.portOpener(&hp)
			if err != nil {
				retErr = fmt.Errorf("Cannot open hostport %d for pod %s: %v", port.HostPort, kubecontainer.GetPodFullName(pod), err)
				break
			}
			ports[hp] = socket
		}
		if retErr != nil {
			break
		}
	}
	// If encounter any error, close all hostports that just got opened.
	if retErr != nil {
		for hp, socket := range ports {
			if err := socket.Close(); err != nil {
				glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, kubecontainer.GetPodFullName(pod), err)
			}
		}
		return retErr
	}

	for hostPort, socket := range ports {
		h.hostPortMap[hostPort] = socket
	}

	return nil
}
Example #23
0
// gatherAllHostports returns all hostports that should be presented on node,
// given the list of pods running on that node and ignoring host network
// pods (which don't need hostport <-> container port mapping).
func gatherAllHostports(activePods []*ActivePod) (map[api.ContainerPort]targetPod, error) {
	podHostportMap := make(map[api.ContainerPort]targetPod)
	for _, r := range activePods {
		if r.IP.To4() == nil {
			return nil, fmt.Errorf("Invalid or missing pod %s IP", kubecontainer.GetPodFullName(r.Pod))
		}

		// should not handle hostports for hostnetwork pods
		if r.Pod.Spec.SecurityContext != nil && r.Pod.Spec.SecurityContext.HostNetwork {
			continue
		}

		for _, container := range r.Pod.Spec.Containers {
			for _, port := range container.Ports {
				if port.HostPort != 0 {
					podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(r.Pod), podIP: r.IP.String()}
				}
			}
		}
	}
	return podHostportMap, nil
}
Example #24
0
// isPodRunning returns true if all containers of a manifest are running.
func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) {
	status, err := kl.containerRuntime.GetPodStatus(pod)
	if err != nil {
		glog.Infof("Failed to get the status of pod %q: %v", kubecontainer.GetPodFullName(pod), err)
		return false, err
	}
	for _, st := range status.ContainerStatuses {
		if st.State.Running == nil {
			glog.Infof("Container %q not running: %#v", st.Name, st.State)
			return false, nil
		}
	}
	return true, nil
}
Example #25
0
func TestSyncPodWithPullPolicy(t *testing.T) {
	dm, fakeDocker := newTestDockerManager()
	puller := dm.dockerPuller.(*FakeDockerPuller)
	puller.HasImages = []string{"existing_one", "want:latest"}
	dm.podInfraContainerImage = "pod_infra_image"

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways},
				{Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent},
				{Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent},
				{Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent},
				{Name: "bar5", Image: "pull_never_image", ImagePullPolicy: api.PullNever},
			},
		},
	}

	expectedResults := []*kubecontainer.SyncResult{
		//Sync result for infra container
		{kubecontainer.StartContainer, PodInfraContainerName, nil, ""},
		{kubecontainer.SetupNetwork, kubecontainer.GetPodFullName(pod), nil, ""},
		//Sync result for user containers
		{kubecontainer.StartContainer, "bar", nil, ""},
		{kubecontainer.StartContainer, "bar2", nil, ""},
		{kubecontainer.StartContainer, "bar3", nil, ""},
		{kubecontainer.StartContainer, "bar4", nil, ""},
		{kubecontainer.StartContainer, "bar5", kubecontainer.ErrImageNeverPull,
			"Container image \"pull_never_image\" is not present with pull policy of Never"},
	}

	result := runSyncPod(t, dm, fakeDocker, pod, nil, true)
	verifySyncResults(t, expectedResults, result)

	fakeDocker.Lock()
	defer fakeDocker.Unlock()

	pulledImageSorted := puller.ImagesPulled[:]
	sort.Strings(pulledImageSorted)
	assert.Equal(t, []string{"pod_infra_image", "pull_always_image", "pull_if_not_present_image"}, pulledImageSorted)

	if len(fakeDocker.Created) != 5 {
		t.Errorf("Unexpected containers created %v", fakeDocker.Created)
	}
}
Example #26
0
func (pm *basicManager) updatePodsInternal(pods ...*api.Pod) {
	for _, pod := range pods {
		podFullName := kubecontainer.GetPodFullName(pod)
		if IsMirrorPod(pod) {
			pm.mirrorPodByUID[pod.UID] = pod
			pm.mirrorPodByFullName[podFullName] = pod
			if p, ok := pm.podByFullName[podFullName]; ok {
				pm.translationByUID[pod.UID] = p.UID
			}
		} else {
			pm.podByUID[pod.UID] = pod
			pm.podByFullName[podFullName] = pod
		}
	}
}
Example #27
0
// If the UID belongs to a mirror pod, maps it to the UID of its static pod.
// Otherwise, return the original UID. All public-facing functions should
// perform this translation for UIDs because user may provide a mirror pod UID,
// which is not recognized by internal Kubelet functions.
func (pm *basicManager) TranslatePodUID(uid types.UID) types.UID {
	if uid == "" {
		return uid
	}

	pm.lock.RLock()
	defer pm.lock.RUnlock()
	if mirrorPod, ok := pm.mirrorPodByUID[uid]; ok {
		podFullName := kubecontainer.GetPodFullName(mirrorPod)
		if pod, ok := pm.podByFullName[podFullName]; ok {
			return pod.UID
		}
	}
	return uid
}
Example #28
0
// getRun handles requests to run a command inside a container.
func (s *Server) getRun(request *restful.Request, response *restful.Response) {
	podNamespace, podID, uid, container := getContainerCoordinates(request)
	pod, ok := s.host.GetPodByName(podNamespace, podID)
	if !ok {
		response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
		return
	}
	command := strings.Split(request.QueryParameter("cmd"), " ")
	data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), uid, container, command)
	if err != nil {
		response.WriteError(http.StatusInternalServerError, err)
		return
	}
	response.Write(data)
}
func TestChangedStatusKeepsStartTime(t *testing.T) {
	syncer := newTestStatusManager()
	now := util.Now()
	firstStatus := getRandomPodStatus()
	firstStatus.StartTime = &now
	syncer.SetPodStatus(testPod, firstStatus)
	syncer.SetPodStatus(testPod, getRandomPodStatus())
	verifyUpdates(t, syncer, 2)
	finalStatus, _ := syncer.GetPodStatus(kubecontainer.GetPodFullName(testPod))
	if finalStatus.StartTime.IsZero() {
		t.Errorf("StartTime should not be zero")
	}
	if !finalStatus.StartTime.Time.Equal(now.Time) {
		t.Errorf("Expected %v, but got %v", now.Time, finalStatus.StartTime.Time)
	}
}
Example #30
0
func (pm *basicManager) DeletePod(pod *v1.Pod) {
	pm.lock.Lock()
	defer pm.lock.Unlock()
	if pm.secretManager != nil {
		pm.secretManager.UnregisterPod(pod)
	}
	podFullName := kubecontainer.GetPodFullName(pod)
	if IsMirrorPod(pod) {
		delete(pm.mirrorPodByUID, pod.UID)
		delete(pm.mirrorPodByFullName, podFullName)
		delete(pm.translationByUID, pod.UID)
	} else {
		delete(pm.podByUID, pod.UID)
		delete(pm.podByFullName, podFullName)
	}
}