func TestUpdateSelectorControllerRef(t *testing.T) { manager, fakePodControl := setupManagerWithGCEnabled() labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(2, labelMap) // put 2 pods in the podStore newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod") // update the RS so that its selector no longer matches the pods updatedRS := *rs updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"} // put the updatedRS into the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updateRS() in this case). manager.rsStore.Store.Add(&updatedRS) manager.updateRS(rs, &updatedRS) // verifies that the rs is added to the queue rsKey := getKey(rs, t) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } manager.queue.Done(queueRS) err := manager.syncReplicaSet(rsKey) if err != nil { t.Fatal(err) } // expect 2 patches to be sent to remove the controllerRef for the pods. // expect 2 creates because the rc.Spec.Replicas=2 and there exists no // matching pod. validateSyncReplicaSet(t, fakePodControl, 2, 0, 2) fakePodControl.Clear() }
func TestValidateResponse(t *testing.T) { valid := &http.Response{ Request: &http.Request{}, StatusCode: http.StatusOK, Body: ioutil.NopCloser(strings.NewReader("OK")), } if err := validateResponse(valid); err != nil { t.Errorf("ValidateResponse with valid response returned error %+v", err) } invalid := &http.Response{ Request: &http.Request{}, StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(strings.NewReader(`{ "error" : { "statuscode": 400, "statusdesc": "Bad Request", "errormessage": "This is an error" } }`)), } want := &PingdomError{400, "Bad Request", "This is an error"} if err := validateResponse(invalid); !reflect.DeepEqual(err, want) { t.Errorf("ValidateResponse with invalid response returned %+v, want %+v", err, want) } }
func TestQueue(t *testing.T) { queue := NewQueue(10) t.Logf("=====%p", &queue.bs[0]) t.Logf("=====%p", &queue.bs[1]) queue.Push(tt{a: 10}) t.Logf("=====%v", queue.bs[0]) }
func TestGraphNodeConfigModuleExpandFlatten(t *testing.T) { mod := testModule(t, "graph-node-module-flatten") node := &GraphNodeConfigModule{ Path: []string{RootModuleName, "child"}, Module: &config.Module{}, Tree: nil, } g, err := node.Expand(&BasicGraphBuilder{ Steps: []GraphTransformer{ &ConfigTransformer{Module: mod}, }, }) if err != nil { t.Fatalf("err: %s", err) } fg := g.(GraphNodeFlatGraph) actual := strings.TrimSpace(fg.FlattenGraph().String()) expected := strings.TrimSpace(testGraphNodeModuleExpandFlattenStr) if actual != expected { t.Fatalf("bad:\n\n%s", actual) } }
func TestEventerOnce(t *testing.T) { e := NewEventer() e.AddEvent("test") sem := make(chan bool) e.Once("test", func(data interface{}) { sem <- true }) go func() { e.Publish("test", true) }() select { case <-sem: case <-time.After(10 * time.Millisecond): t.Errorf("Once was not called") } go func() { e.Publish("test", true) }() select { case <-sem: t.Errorf("Once was called twice") case <-time.After(10 * time.Millisecond): } }
func startSleepCommand(t *testing.T) *exec.Cmd { cmd := exec.Command("sh", "-c", "sleep 100") if err := cmd.Start(); err != nil { t.Error(err) } return cmd }
func TestConnQueryScan(t *testing.T) { t.Parallel() conn := mustConnect(t, *defaultConnConfig) defer closeConn(t, conn) var sum, rowCount int32 rows, err := conn.Query("select generate_series(1,$1)", 10) if err != nil { t.Fatalf("conn.Query failed: ", err) } defer rows.Close() for rows.Next() { var n int32 rows.Scan(&n) sum += n rowCount++ } if rows.Err() != nil { t.Fatalf("conn.Query failed: ", err) } if rowCount != 10 { t.Error("Select called onDataRow wrong number of times") } if sum != 55 { t.Error("Wrong values returned") } }
func TestInspectContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 2) server.buildMuxer() recorder := httptest.NewRecorder() path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) request, _ := http.NewRequest("GET", path, nil) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } expected := server.containers[0] var got docker.Container err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got.Config, expected.Config) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } got.State.StartedAt = expected.State.StartedAt got.State.FinishedAt = expected.State.FinishedAt got.Config = expected.Config got.Created = expected.Created got.NetworkSettings = expected.NetworkSettings if !reflect.DeepEqual(got, *expected) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } }
// Tests is requested authenticated function, tests replies for s3 errors. func TestIsReqAuthenticated(t *testing.T) { path, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("unable initialize config file, %s", err) } defer removeAll(path) serverConfig.SetCredential(credential{"myuser", "mypassword"}) // List of test cases for validating http request authentication. testCases := []struct { req *http.Request s3Error APIErrorCode }{ // When request is nil, internal error is returned. {nil, ErrInternalError}, // When request is unsigned, access denied is returned. {mustNewRequest("GET", "http://localhost:9000", 0, nil, t), ErrAccessDenied}, // When request is properly signed, but has bad Content-MD5 header. {mustNewSignedRequest("PUT", "http://localhost:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest}, // When request is properly signed, error is none. {mustNewSignedRequest("GET", "http://localhost:9000", 0, nil, t), ErrNone}, } // Validates all testcases. for _, testCase := range testCases { if testCase.s3Error == ErrBadDigest { testCase.req.Header.Set("Content-Md5", "garbage") } if s3Error := isReqAuthenticated(testCase.req); s3Error != testCase.s3Error { t.Fatalf("Unexpected s3error returned wanted %d, got %d", testCase.s3Error, s3Error) } } }
// Provides a fully populated http request instance, fails otherwise. func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { req, err := newTestRequest(method, urlStr, contentLength, body) if err != nil { t.Fatalf("Unable to initialize new http request %s", err) } return req }
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. func TestIsRequestPresignedSignatureV4(t *testing.T) { testCases := []struct { inputQueryKey string inputQueryValue string expectedResult bool }{ // Test case - 1. // Test case with query key ""X-Amz-Credential" set. {"", "", false}, // Test case - 2. {"X-Amz-Credential", "", true}, // Test case - 3. {"X-Amz-Content-Sha256", "", false}, } for i, testCase := range testCases { // creating an input HTTP request. // Only the query parameters are relevant for this particular test. inputReq, err := http.NewRequest("GET", "http://example.com", nil) if err != nil { t.Fatalf("Error initializing input HTTP request: %v", err) } q := inputReq.URL.Query() q.Add(testCase.inputQueryKey, testCase.inputQueryValue) inputReq.URL.RawQuery = q.Encode() actualResult := isRequestPresignedSignatureV4(inputReq) if testCase.expectedResult != actualResult { t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) } } }
func TestOverlappingRSs(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) labelMap := map[string]string{"foo": "bar"} for i := 0; i < 5; i++ { manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0) manager.podStoreSynced = alwaysReady // Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store var controllers []*extensions.ReplicaSet for j := 1; j < 10; j++ { rsSpec := newReplicaSet(1, labelMap) rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) rsSpec.Name = string(uuid.NewUUID()) controllers = append(controllers, rsSpec) } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { manager.rsStore.Store.Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest ReplicaSet is synced pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod") rsKey := getKey(controllers[0], t) manager.addPod(&pods.Items[0]) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } } }
func TestRSManagerNotReady(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 2, 0) manager.podControl = &fakePodControl manager.podStoreSynced = func() bool { return false } // Simulates the ReplicaSet reflector running before the pod reflector. We don't // want to end up creating replicas in this case until the pod reflector // has synced, so the ReplicaSet controller should just requeue the ReplicaSet. rsSpec := newReplicaSet(1, map[string]string{"foo": "bar"}) manager.rsStore.Store.Add(rsSpec) rsKey := getKey(rsSpec, t) manager.syncReplicaSet(rsKey) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } manager.podStoreSynced = alwaysReady manager.syncReplicaSet(rsKey) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) }
func TestSyncReplicaSetDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "{}", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod") // Creates a replica and sets expectations rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // Expectations prevents replicas but not an update on status rsSpec.Status.Replicas = 0 rsSpec.Status.ReadyReplicas = 0 fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) // Get the key for the controller rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(rsKey) rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 fakePodControl.Clear() fakePodControl.Err = fmt.Errorf("Fake Error") manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.Clear() fakePodControl.Err = nil manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // 1 PUT for the ReplicaSet status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
func TestDeleteFinalStateUnknown(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl received := make(chan string) manager.syncHandler = func(key string) error { received <- key return nil } // The DeletedFinalStateUnknown object should cause the ReplicaSet manager to insert // the controller matching the selectors of the deleted pod into the work queue. labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) manager.rsStore.Store.Add(rsSpec) pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod") manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) go manager.worker() expected := getKey(rsSpec, t) select { case key := <-received: if key != expected { t.Errorf("Unexpected sync all for ReplicaSet %v, expected %v", key, expected) } case <-time.After(wait.ForeverTestTimeout): t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") } }
func TestWatchExit(t *testing.T) { if skipTest(t) { return } tw := newTestWatcher(t) cmd := startSleepCommand(t) childPid := cmd.Process.Pid // watch for exit event of our child process if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil { t.Error(err) } // kill our child process, triggers exit event syscall.Kill(childPid, syscall.SIGTERM) cmd.Wait() tw.close() expectEvents(t, 0, "forks", tw.events.forks) expectEvents(t, 0, "execs", tw.events.execs) if expectEvents(t, 1, "exits", tw.events.exits) { expectEventPid(t, "exit", childPid, tw.events.exits[0]) } }
// General purpose Watcher wrapper for all tests func newTestWatcher(t *testing.T) *testWatcher { watcher, err := NewWatcher() if err != nil { t.Fatal(err) } events := &anyEvent{ done: make(chan bool, 1), } tw := &testWatcher{ t: t, watcher: watcher, events: events, } go func() { for { select { case <-events.done: return case ev := <-watcher.Fork: events.forks = append(events.forks, ev.ParentPid) case ev := <-watcher.Exec: events.execs = append(events.execs, ev.Pid) case ev := <-watcher.Exit: events.exits = append(events.exits, ev.Pid) case err := <-watcher.Error: events.errors = append(events.errors, err) } } }() return tw }
// TestIsRequestUnsignedPayload - Test validates the Unsigned payload detection logic. func TestIsRequestUnsignedPayload(t *testing.T) { testCases := []struct { inputAmzContentHeader string expectedResult bool }{ // Test case - 1. // Test case with "X-Amz-Content-Sha256" header set to empty value. {"", false}, // Test case - 2. // Test case with "X-Amz-Content-Sha256" header set to "UNSIGNED-PAYLOAD" // The payload is flagged as unsigned When "X-Amz-Content-Sha256" header is set to "UNSIGNED-PAYLOAD". {"UNSIGNED-PAYLOAD", true}, // Test case - 3. // set to a random value. {"abcd", false}, } // creating an input HTTP request. // Only the headers are relevant for this particular test. inputReq, err := http.NewRequest("GET", "http://example.com", nil) if err != nil { t.Fatalf("Error initializing input HTTP request: %v", err) } for i, testCase := range testCases { inputReq.Header.Set("X-Amz-Content-Sha256", testCase.inputAmzContentHeader) actualResult := isRequestUnsignedPayload(inputReq) if testCase.expectedResult != actualResult { t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) } } }
func runCommand(t *testing.T, name string) *exec.Cmd { cmd := exec.Command(name) if err := cmd.Run(); err != nil { t.Error(err) } return cmd }
func RandIdentityOrFatal(t *testing.T) Identity { p, err := RandPeerNetParams() if err != nil { t.Fatal(err) } return &identity{*p} }
func TestServerURLNoListener(t *testing.T) { server := DockerServer{} url := server.URL() if url != "" { t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url) } }
func TestMatchFn(t *testing.T) { tests := []matchTest{ matchTest{ []OpCode{PUSH1, PUSH1, MSTORE, JUMP}, []OpCode{PUSH1, MSTORE}, 1, }, matchTest{ []OpCode{PUSH1, PUSH1, MSTORE, JUMP}, []OpCode{PUSH1, MSTORE, PUSH1}, 0, }, matchTest{ []OpCode{}, []OpCode{PUSH1}, 0, }, } for i, test := range tests { var matchCount int MatchFn(test.input, test.match, func(i int) bool { matchCount++ return true }) if matchCount != test.matches { t.Errorf("match count failed on test[%d]: expected %d matches, got %d", i, test.matches, matchCount) } } }
func newTestHost(t *testing.T, fakeKubeClient client.Interface) volume.VolumeHost { tempDir, err := ioutil.TempDir("/tmp", "persistent_volume_test.") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } return volume.NewFakeVolumeHost(tempDir, fakeKubeClient, testProbeVolumePlugins()) }
func TestExpandPolynomial(t *testing.T) { pol := Pol(0x3DA3358B4DC173) s := pol.Expand() if s != "x^53+x^52+x^51+x^50+x^48+x^47+x^45+x^41+x^40+x^37+x^36+x^34+x^32+x^31+x^27+x^25+x^24+x^22+x^19+x^18+x^16+x^15+x^14+x^8+x^6+x^5+x^4+x+1" { t.Fatal("wrong result") } }
// HandleListObjectsInfoSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that // responds with a `List` response when full info is requested. func HandleListObjectsInfoSuccessfully(t *testing.T) { th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { th.TestMethod(t, r, "GET") th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) th.TestHeader(t, r, "Accept", "application/json") w.Header().Set("Content-Type", "application/json") r.ParseForm() marker := r.Form.Get("marker") switch marker { case "": fmt.Fprintf(w, `[ { "hash": "451e372e48e0f6b1114fa0724aa79fa1", "last_modified": "2009-11-10 23:00:00 +0000 UTC", "bytes": 14, "name": "goodbye", "content_type": "application/octet-stream" }, { "hash": "451e372e48e0f6b1114fa0724aa79fa1", "last_modified": "2009-11-10 23:00:00 +0000 UTC", "bytes": 14, "name": "hello", "content_type": "application/octet-stream" } ]`) case "hello": fmt.Fprintf(w, `[]`) default: t.Fatalf("Unexpected marker: [%s]", marker) } }) }
func expectEvents(t *testing.T, num int, name string, pids []int) bool { if len(pids) != num { t.Errorf("Expected %d %s events, got=%v", num, name, pids) return false } return true }
func TestSecureSecretoxKey(t *testing.T) { key1, ok := secretbox.GenerateKey() if !ok { fmt.Println("pwkey: failed to generate test key") t.FailNow() } skey, ok := SecureSecretboxKey(testPass, key1) if !ok { fmt.Println("pwkey: failed to secure secretbox key") t.FailNow() } key, ok := RecoverSecretboxKey(testPass, skey) if !ok { fmt.Println("pwkey: failed to recover box private key") t.FailNow() } if !bytes.Equal(key[:], key1[:]) { fmt.Println("pwkey: recovered key doesn't match original") t.FailNow() } if _, ok = RecoverBoxKey([]byte("Password"), skey); ok { fmt.Println("pwkey: recover should fail with bad password") t.FailNow() } }
func expectEventPid(t *testing.T, name string, expect int, pid int) bool { if expect != pid { t.Errorf("Expected %s pid=%d, received=%d", name, expect, pid) return false } return true }
func TestWatchFork(t *testing.T) { if skipTest(t) { return } pid := os.Getpid() tw := newTestWatcher(t) // no watches added yet, so this fork event will no be captured runCommand(t, "date") // watch fork events for this process if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil { t.Error(err) } // this fork event will be captured, // the exec and exit events will not be captured runCommand(t, "cal") tw.close() if expectEvents(t, 1, "forks", tw.events.forks) { expectEventPid(t, "fork", pid, tw.events.forks[0]) } expectEvents(t, 0, "execs", tw.events.execs) expectEvents(t, 0, "exits", tw.events.exits) }
func TestCreateSnapshot(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() for i := 0; i < testCreateSnapshots; i++ { restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0) } snapshots, err := restic.LoadAllSnapshots(repo) if err != nil { t.Fatal(err) } if len(snapshots) != testCreateSnapshots { t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1) } sn := snapshots[0] if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) { t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time) } if sn.Tree == nil { t.Fatalf("tree id is nil") } if sn.Tree.IsNull() { t.Fatalf("snapshot has zero tree ID") } checker.TestCheckRepo(t, repo) }