// probe probes the container. func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (results.Result, error) { var probeSpec *api.Probe switch probeType { case readiness: probeSpec = container.ReadinessProbe case liveness: probeSpec = container.LivenessProbe default: return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType) } ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name) if probeSpec == nil { glog.Warningf("%s probe for %s is nil", probeType, ctrName) return results.Success, nil } result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries) if err != nil || result != probe.Success { // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) if !hasRef { glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) } if err != nil { glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err } glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) return results.Success, nil }
func (hr *FakeHandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { hr.Lock() defer hr.Unlock() if hr.Err != nil { return "", hr.Err } switch { case handler.Exec != nil: hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("exec on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) case handler.HTTPGet != nil: hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("http-get on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) case handler.TCPSocket != nil: hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("tcp-socket on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) default: return "", fmt.Errorf("Invalid handler: %v", handler) } return "", nil }
func TestSetContainerReadiness(t *testing.T) { cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"} cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"} containerStatuses := []api.ContainerStatus{ { Name: "c1", ContainerID: cID1.String(), Ready: false, }, { Name: "c2", ContainerID: cID2.String(), Ready: false, }, } status := api.PodStatus{ ContainerStatuses: containerStatuses, Conditions: []api.PodCondition{{ Type: api.PodReady, Status: api.ConditionFalse, }}, } pod := getTestPod() pod.Spec.Containers = []api.Container{{Name: "c1"}, {Name: "c2"}} // Verify expected readiness of containers & pod. verifyReadiness := func(step string, status *api.PodStatus, c1Ready, c2Ready, podReady bool) { for _, c := range status.ContainerStatuses { switch c.ContainerID { case cID1.String(): if c.Ready != c1Ready { t.Errorf("[%s] Expected readiness of c1 to be %v but was %v", step, c1Ready, c.Ready) } case cID2.String(): if c.Ready != c2Ready { t.Errorf("[%s] Expected readiness of c2 to be %v but was %v", step, c2Ready, c.Ready) } default: t.Fatalf("[%s] Unexpected container: %+v", step, c) } } if status.Conditions[0].Type != api.PodReady { t.Fatalf("[%s] Unexpected condition: %+v", step, status.Conditions[0]) } else if ready := (status.Conditions[0].Status == api.ConditionTrue); ready != podReady { t.Errorf("[%s] Expected readiness of pod to be %v but was %v", step, podReady, ready) } } m := newTestManager(&fake.Clientset{}) // Add test pod because the container spec has been changed. m.podManager.AddPod(pod) t.Log("Setting readiness before status should fail.") m.SetContainerReadiness(pod.UID, cID1, true) verifyUpdates(t, m, 0) if status, ok := m.GetPodStatus(pod.UID); ok { t.Errorf("Unexpected PodStatus: %+v", status) } t.Log("Setting initial status.") m.SetPodStatus(pod, status) verifyUpdates(t, m, 1) status = expectPodStatus(t, m, pod) verifyReadiness("initial", &status, false, false, false) t.Log("Setting unchanged readiness should do nothing.") m.SetContainerReadiness(pod.UID, cID1, false) verifyUpdates(t, m, 0) status = expectPodStatus(t, m, pod) verifyReadiness("unchanged", &status, false, false, false) t.Log("Setting container readiness should generate update but not pod readiness.") m.SetContainerReadiness(pod.UID, cID1, true) verifyUpdates(t, m, 1) status = expectPodStatus(t, m, pod) verifyReadiness("c1 ready", &status, true, false, false) t.Log("Setting both containers to ready should update pod readiness.") m.SetContainerReadiness(pod.UID, cID2, true) verifyUpdates(t, m, 1) status = expectPodStatus(t, m, pod) verifyReadiness("all ready", &status, true, true, true) t.Log("Setting non-existant container readiness should fail.") m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true) verifyUpdates(t, m, 0) status = expectPodStatus(t, m, pod) verifyReadiness("ignore non-existant", &status, true, true, true) }