// This test assumes that the client implementation backs off exponentially, for an individual request. func TestBackoffLifecycle(t *testing.T) { count := 0 testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { count++ t.Logf("Attempt %d", count) if count == 5 || count == 9 { w.WriteHeader(http.StatusOK) return } else { w.WriteHeader(http.StatusGatewayTimeout) return } })) // TODO: Uncomment when fix #19254 // defer testServer.Close() c := testRESTClient(t, testServer) // Test backoff recovery and increase. This correlates to the constants // which are used in the server implementation returning StatusOK above. seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0} request := c.Verb("POST").Prefix("backofftest").Suffix("abc") request.backoffMgr = &URLBackoff{ Backoff: util.NewBackOff( time.Duration(1)*time.Second, time.Duration(200)*time.Second)} for _, sec := range seconds { start := time.Now() request.DoRaw() finish := time.Since(start) t.Logf("%v finished in %v", sec, finish) if finish < time.Duration(sec)*time.Second || finish >= time.Duration(sec+5)*time.Second { t.Fatalf("%v not in range %v", finish, sec) } } }
// TestURLBackoffFunctionality generally tests the URLBackoff wrapper. We avoid duplicating tests from backoff and request. func TestURLBackoffFunctionality(t *testing.T) { myBackoff := &URLBackoff{ Backoff: util.NewBackOff(1*time.Second, 60*time.Second), } // Now test that backoff increases, then recovers. // 200 and 300 should both result in clearing the backoff. // all others like 429 should result in increased backoff. seconds := []int{0, 1, 2, 4, 8, 0, 1, 2} returnCodes := []int{ 429, 500, 501, 502, 300, 500, 501, 502, } if len(seconds) != len(returnCodes) { t.Fatalf("responseCode to backoff arrays should be the same length... sanity check failed.") } for i, sec := range seconds { backoffSec := myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) if backoffSec < time.Duration(sec)*time.Second || backoffSec > time.Duration(sec+5)*time.Second { t.Errorf("Backoff out of range %v: %v %v", i, sec, backoffSec) } myBackoff.UpdateBackoff(parse("http://1.2.3.4:100/responseCodeForFuncTest"), nil, returnCodes[i]) } if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) == 0 { t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7]) } }
// readExpBackoffConfig handles the internal logic of determining what the // backoff policy is. By default if no information is available, NoBackoff. // TODO Generalize this see #17727 . func readExpBackoffConfig() BackoffManager { backoffBase := os.Getenv(envBackoffBase) backoffDuration := os.Getenv(envBackoffDuration) backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) if errBase != nil || errDuration != nil { return &NoBackoff{} } return &URLBackoff{ Backoff: util.NewBackOff( time.Duration(backoffBaseInt)*time.Second, time.Duration(backoffDurationInt)*time.Second)} }
func TestURLBackoffFunctionalityCollisions(t *testing.T) { myBackoff := &URLBackoff{ Backoff: util.NewBackOff(1*time.Second, 60*time.Second), } // Add some noise and make sure backoff for a clean URL is zero. myBackoff.UpdateBackoff(parse("http://100.200.300.400:8080"), nil, 500) myBackoff.UpdateBackoff(parse("http://1.2.3.4:8080"), nil, 500) if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) > 0 { t.Errorf("URLs are colliding in the backoff map!") } }
// readExpBackoffConfig handles the internal logic of determining what the // backoff policy is. By default if no information is available, NoBackoff. // TODO Generalize this see #17727 . func readExpBackoffConfig() BackoffManager { backoffBase := os.Getenv(envBackoffBase) backoffDuration := os.Getenv(envBackoffDuration) backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) if errBase != nil || errDuration != nil { glog.V(2).Infof("Configuring no exponential backoff.") return &NoBackoff{} } else { glog.V(2).Infof("Configuring exponential backoff as %v, %v", backoffBaseInt, backoffDurationInt) return &URLBackoff{ Backoff: util.NewBackOff( time.Duration(backoffBaseInt)*time.Second, time.Duration(backoffDurationInt)*time.Second)} } }
// runSyncPod is a helper function to retrieve the running pods from the fake // docker client and runs SyncPod for the given pod. func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *util.Backoff, expectErr bool) kubecontainer.PodSyncResult { podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { t.Errorf("unexpected error: %v", err) } fakeDocker.ClearCalls() if backOff == nil { backOff = util.NewBackOff(time.Second, time.Minute) } // api.PodStatus is not used in SyncPod now, pass in an empty one. result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) err = result.Error() if err != nil && !expectErr { t.Errorf("unexpected error: %v", err) } else if err == nil && expectErr { t.Errorf("expected error didn't occur") } return result }
func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) { fakeDocker := NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, &fakeRuntimeHelper{}, fakeHTTPClient, util.NewBackOff(time.Second, 300*time.Second)) return dockerManager, fakeDocker }
func TestSyncPodBackoff(t *testing.T) { var fakeClock = util.NewFakeClock(time.Now()) startTime := fakeClock.Now() dm, fakeDocker := newTestDockerManager() containers := []api.Container{ {Name: "good"}, {Name: "bad"}, } pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "podfoo", Namespace: "nsnew", }, Spec: api.PodSpec{ Containers: containers, }, } stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678" dockerContainers := []*docker.Container{ { ID: "9876", Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_podfoo_nsnew_12345678_0", State: docker.State{ StartedAt: startTime, Running: true, }, }, { ID: "1234", Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_nsnew_12345678_0", State: docker.State{ StartedAt: startTime, Running: true, }, }, { ID: "5678", Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678_0", State: docker.State{ ExitCode: 42, StartedAt: startTime, FinishedAt: fakeClock.Now(), }, }, } startCalls := []string{"inspect_container", "create", "start", "inspect_container"} backOffCalls := []string{"inspect_container"} startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""} backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""} tests := []struct { tick int backoff int killDelay int result []string expectErr bool }{ {1, 1, 1, startCalls, false}, {2, 2, 2, startCalls, false}, {3, 2, 3, backOffCalls, true}, {4, 4, 4, startCalls, false}, {5, 4, 5, backOffCalls, true}, {6, 4, 6, backOffCalls, true}, {7, 4, 7, backOffCalls, true}, {8, 8, 129, startCalls, false}, {130, 1, 0, startCalls, false}, } backOff := util.NewBackOff(time.Second, time.Minute) backOff.Clock = fakeClock for _, c := range tests { fakeDocker.SetFakeContainers(dockerContainers) fakeClock.SetTime(startTime.Add(time.Duration(c.tick) * time.Second)) result := runSyncPod(t, dm, fakeDocker, pod, backOff, c.expectErr) verifyCalls(t, fakeDocker, c.result) // Verify whether the correct sync pod result is generated if c.expectErr { verifySyncResults(t, []*kubecontainer.SyncResult{backoffResult}, result) } else { verifySyncResults(t, []*kubecontainer.SyncResult{startResult}, result) } if backOff.Get(stableId) != time.Duration(c.backoff)*time.Second { t.Errorf("At tick %s expected backoff=%s got=%s", time.Duration(c.tick)*time.Second, time.Duration(c.backoff)*time.Second, backOff.Get(stableId)) } if len(fakeDocker.Created) > 0 { // pretend kill the container fakeDocker.Created = nil dockerContainers[2].State.FinishedAt = startTime.Add(time.Duration(c.killDelay) * time.Second) } } }
func TestSerializedPuller(t *testing.T) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test_pod", Namespace: "test-ns", UID: "bar", ResourceVersion: "42", SelfLink: "/api/v1/pods/foo", }} cases := []struct { containerImage string policy api.PullPolicy calledFunctions []string inspectErr error pullerErr error expectedErr []error }{ { // pull missing image containerImage: "missing_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil}}, { // image present, dont pull containerImage: "present_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // image present, pull it {containerImage: "present_image", policy: api.PullAlways, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // missing image, error PullNever {containerImage: "missing_image", policy: api.PullNever, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, // missing image, unable to inspect {containerImage: "missing_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: errors.New("unknown inspectError"), pullerErr: nil, expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, // missing image, unable to fetch {containerImage: "typo_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: errors.New("404"), expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}}, } for i, c := range cases { container := &api.Container{ Name: "container_name", Image: c.containerImage, ImagePullPolicy: c.policy, } backOff := util.NewBackOff(time.Second, time.Minute) fakeClock := util.NewFakeClock(time.Now()) backOff.Clock = fakeClock fakeRuntime := &FakeRuntime{} fakeRecorder := &record.FakeRecorder{} puller := NewSerializedImagePuller(fakeRecorder, fakeRuntime, backOff) fakeRuntime.ImageList = []Image{{"present_image", nil, 0}} fakeRuntime.Err = c.pullerErr fakeRuntime.InspectErr = c.inspectErr for tick, expected := range c.expectedErr { fakeClock.Step(time.Second) err, _ := puller.PullImage(pod, container, nil) fakeRuntime.AssertCalls(c.calledFunctions) assert.Equal(t, expected, err, "in test %d tick=%d", i, tick) } } }
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { glog.V(4).Infof("Disabling backoff strategy") b.Backoff = util.NewBackOff(0*time.Second, 0*time.Second) }