func TestUpdateSelectorControllerRef(t *testing.T) { manager, fakePodControl := setupManagerWithGCEnabled() labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(2, labelMap) // put 2 pods in the podStore newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod") // update the RS so that its selector no longer matches the pods updatedRS := *rs updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"} // put the updatedRS into the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updateRS() in this case). manager.rsStore.Store.Add(&updatedRS) manager.updateRS(rs, &updatedRS) // verifies that the rs is added to the queue rsKey := getKey(rs, t) queueRS, _ := manager.queue.Get() if queueRS != rsKey { t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) } manager.queue.Done(queueRS) err := manager.syncReplicaSet(rsKey) if err != nil { t.Fatal(err) } // expect 2 patches to be sent to remove the controllerRef for the pods. // expect 2 creates because the rc.Spec.Replicas=2 and there exists no // matching pod. validateSyncReplicaSet(t, fakePodControl, 2, 0, 2) fakePodControl.Clear() }
func TestConnQueryEncoder(t *testing.T) { t.Parallel() conn := mustConnect(t, *defaultConnConfig) defer closeConn(t, conn) n := pgx.NullInt64{Int64: 1, Valid: true} rows, err := conn.Query("select $1::int8", &n) if err != nil { t.Fatalf("conn.Query failed: ", err) } ok := rows.Next() if !ok { t.Fatal("rows.Next terminated early") } var m pgx.NullInt64 err = rows.Scan(&m) if err != nil { t.Fatalf("rows.Scan failed: ", err) } rows.Close() if !m.Valid { t.Error("m should be valid, but it wasn't") } if m.Int64 != 1 { t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64) } ensureConnValid(t, conn) }
// TestMemInfo tests parseMemInfo with a static meminfo string func TestMemInfo(t *testing.T) { const input = ` MemTotal: 1 kB MemFree: 2 kB SwapTotal: 3 kB SwapFree: 4 kB Malformed1: Malformed2: 1 Malformed3: 2 MB Malformed4: X kB ` meminfo, err := parseMemInfo(strings.NewReader(input)) if err != nil { t.Fatal(err) } if meminfo.MemTotal != 1*units.KiB { t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) } if meminfo.MemFree != 2*units.KiB { t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) } if meminfo.SwapTotal != 3*units.KiB { t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) } if meminfo.SwapFree != 4*units.KiB { t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) } }
func testnext(program string, testcases []nextTest, initialLocation string, t *testing.T) { withTestProcess(program, t, func(p *Process, fixture protest.Fixture) { bp, err := p.SetBreakpointByLocation(initialLocation) assertNoError(err, t, "SetBreakpoint()") assertNoError(p.Continue(), t, "Continue()") p.ClearBreakpoint(bp.Addr) p.CurrentThread.SetPC(bp.Addr) f, ln := currentLineNumber(p, t) for _, tc := range testcases { if ln != tc.begin { t.Fatalf("Program not stopped at correct spot expected %d was %s:%d", tc.begin, filepath.Base(f), ln) } assertNoError(p.Next(), t, "Next() returned an error") f, ln = currentLineNumber(p, t) if ln != tc.end { t.Fatalf("Program did not continue to correct next location expected %d was %s:%d", tc.end, filepath.Base(f), ln) } } if len(p.Breakpoints) != 0 { t.Fatal("Not all breakpoints were cleaned up", len(p.Breakpoints)) } }) }
func TestConfigFileOtherFields(t *testing.T) { yc := struct { ProxyCfgFile string `json:"proxy"` }{ "readonly", } b, err := yaml.Marshal(&yc) if err != nil { t.Fatal(err) } tmpfile := mustCreateCfgFile(t, b) defer os.Remove(tmpfile.Name()) args := []string{ fmt.Sprintf("--config-file=%s", tmpfile.Name()), } cfg := newConfig() err = cfg.parse(args) if err != nil { t.Fatal(err) } validateOtherFlags(t, cfg) }
func TestListImages(t *testing.T) { server := DockerServer{} addImages(&server, 2, false) server.buildMuxer() recorder := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/images/json?all=1", nil) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } expected := make([]docker.APIImages, 2) for i, image := range server.images { expected[i] = docker.APIImages{ ID: image.ID, Created: image.Created.Unix(), } } var got []docker.APIImages err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, expected) { t.Errorf("ListImages. Want %#v. Got %#v.", expected, got) } }
// Test that a connection stays valid when query results read incorrectly func TestConnQueryReadTooManyValues(t *testing.T) { t.Parallel() conn := mustConnect(t, *defaultConnConfig) defer closeConn(t, conn) // Read too many values rows, err := conn.Query("select generate_series(1,$1)", 10) if err != nil { t.Fatalf("conn.Query failed: ", err) } rowsRead := 0 for rows.Next() { var n, m int32 rows.Scan(&n, &m) rowsRead++ } if rowsRead != 1 { t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead) } if rows.Err() == nil { t.Fatal("Expected Rows to have an error after an improper read but it didn't") } ensureConnValid(t, conn) }
func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() a, err := d.Diff("1") if err != nil { t.Fatal(err) } if a == nil { t.Fatalf("Archive should not be nil") } }
func TestCreateContainer(t *testing.T) { server := DockerServer{} server.imgIDs = map[string]string{"base": "a1234"} server.buildMuxer() recorder := httptest.NewRecorder() body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Dns":null, "Image":"base", "Volumes":{}, "VolumesFrom":""}` request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusCreated { t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) } var returned docker.Container err := json.NewDecoder(recorder.Body).Decode(&returned) if err != nil { t.Fatal(err) } stored := server.containers[0] if returned.ID != stored.ID { t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) } if stored.State.Running { t.Errorf("CreateContainer should not set container to running state.") } }
func TestCookieEncodeDecode(t *testing.T) { hashKey := "testhashKey" blockkey := generateRandomKey(16) block, err := aes.NewCipher(blockkey) if err != nil { t.Fatal("NewCipher:", err) } securityName := string(generateRandomKey(20)) val := make(map[interface{}]interface{}) val["name"] = "mikeqian" val["gender"] = "male" str, err := encodeCookie(block, hashKey, securityName, val) if err != nil { t.Fatal("encodeCookie:", err) } dst := make(map[interface{}]interface{}) dst, err = decodeCookie(block, hashKey, securityName, str, 3600) if err != nil { t.Fatal("decodeCookie", err) } if dst["name"] != "mikeqian" { t.Fatal("dst get map error") } if dst["gender"] != "male" { t.Fatal("dst get map error") } }
func RandIdentityOrFatal(t *testing.T) Identity { p, err := RandPeerNetParams() if err != nil { t.Fatal(err) } return &identity{*p} }
func testWatchCancelRunning(t *testing.T, wctx *watchctx) { ctx, cancel := context.WithCancel(context.Background()) if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { t.Fatalf("expected non-nil watcher channel") } if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil { t.Fatal(err) } cancel() select { case <-time.After(time.Second): t.Fatalf("took too long to cancel") case v, ok := <-wctx.ch: if !ok { // closed before getting put; OK break } // got the PUT; should close next select { case <-time.After(time.Second): t.Fatalf("took too long to close") case v, ok = <-wctx.ch: if ok { t.Fatalf("expected watcher channel to close, got %v", v) } } } }
// Issue 5071 func TestPipeLookPathLeak(t *testing.T) { fd0, lsof0 := numOpenFDS(t) for i := 0; i < 4; i++ { cmd := exec.Command("something-that-does-not-exist-binary") cmd.StdoutPipe() cmd.StderrPipe() cmd.StdinPipe() if err := cmd.Run(); err == nil { t.Fatal("unexpected success") } } for triesLeft := 3; triesLeft >= 0; triesLeft-- { open, lsof := numOpenFDS(t) fdGrowth := open - fd0 if fdGrowth > 2 { if triesLeft > 0 { // Work around what appears to be a race with Linux's // proc filesystem (as used by lsof). It seems to only // be eventually consistent. Give it awhile to settle. // See golang.org/issue/7808 time.Sleep(100 * time.Millisecond) continue } t.Errorf("leaked %d fds; want ~0; have:\n%s\noriginally:\n%s", fdGrowth, lsof, lsof0) } break } }
func TestCullInvalidConnections(t *testing.T) { d := newDefaultDaemon() // Is fine d.ExpectingIntroductions[addr] = time.Now() // Is expired d.ExpectingIntroductions[addrb] = util.ZeroTime() // Is not in pool d.ExpectingIntroductions[addrc] = util.ZeroTime() d.Peers.Peers.AddPeer(addr) d.Peers.Peers.AddPeer(addrb) d.Peers.Peers.AddPeer(addrc) d.Pool.Pool.Addresses[addr] = gnetConnection(addr) d.Pool.Pool.Addresses[addrb] = gnetConnection(addrb) d.Pool.Pool.Addresses[addrb].Id = 2 d.Pool.Pool.Pool[1] = d.Pool.Pool.Addresses[addr] d.Pool.Pool.Pool[2] = d.Pool.Pool.Addresses[addrb] assert.NotPanics(t, d.cullInvalidConnections) assert.Equal(t, len(d.ExpectingIntroductions), 1) assert.Equal(t, len(d.Peers.Peers.Peerlist), 2) assert.Equal(t, len(d.Pool.Pool.DisconnectQueue), 1) if len(d.Pool.Pool.DisconnectQueue) == 0 { t.Fatal("pool.Pool.DisconnectQueue not empty, would block") } de := <-d.Pool.Pool.DisconnectQueue assert.Equal(t, de.ConnId, 2) assert.Equal(t, de.Reason, DisconnectIntroductionTimeout) shutdown(d) }
func TestSendPings(t *testing.T) { d := newDefaultDaemon() defer shutdown(d) c := gnetConnection(addr) go d.Pool.Pool.ConnectionWriteLoop(c) d.Pool.Pool.Pool[1] = c assert.NotPanics(t, d.Pool.sendPings) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 1) if len(d.Pool.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-d.Pool.Pool.SendResults assert.Equal(t, sr.Connection, c) assert.Nil(t, sr.Error) _, ok := sr.Message.(*PingMessage) assert.True(t, ok) assert.False(t, c.LastSent.IsZero()) // No pings should be sent, since we just pinged lastSent := c.LastSent assert.NotPanics(t, d.Pool.sendPings) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.Equal(t, c.LastSent, lastSent) }
func TestStatus(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } status := d.Status() if status == nil || len(status) == 0 { t.Fatal("Status should not be nil or empty") } rootDir := status[0] dirs := status[1] if rootDir[0] != "Root Dir" { t.Fatalf("Expected Root Dir got %s", rootDir[0]) } if rootDir[1] != d.rootPath() { t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) } if dirs[0] != "Dirs" { t.Fatalf("Expected Dirs got %s", dirs[0]) } if dirs[1] != "1" { t.Fatalf("Expected 1 got %s", dirs[1]) } }
func TestCreateSnapshot(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() for i := 0; i < testCreateSnapshots; i++ { restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0) } snapshots, err := restic.LoadAllSnapshots(repo) if err != nil { t.Fatal(err) } if len(snapshots) != testCreateSnapshots { t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1) } sn := snapshots[0] if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) { t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time) } if sn.Tree == nil { t.Fatalf("tree id is nil") } if sn.Tree.IsNull() { t.Fatalf("snapshot has zero tree ID") } checker.TestCheckRepo(t, repo) }
func TestExpandPolynomial(t *testing.T) { pol := Pol(0x3DA3358B4DC173) s := pol.Expand() if s != "x^53+x^52+x^51+x^50+x^48+x^47+x^45+x^41+x^40+x^37+x^36+x^34+x^32+x^31+x^27+x^25+x^24+x^22+x^19+x^18+x^16+x^15+x^14+x^8+x^6+x^5+x^4+x+1" { t.Fatal("wrong result") } }
func TestJWTFetch_BadResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) })) defer ts.Close() conf := &Config{ Email: "*****@*****.**", PrivateKey: dummyPrivateKey, TokenURL: ts.URL, } tok, err := conf.TokenSource(oauth2.NoContext).Token() if err != nil { t.Fatal(err) } if tok == nil { t.Fatalf("token is nil") } if tok.Valid() { t.Errorf("token is valid. want invalid.") } if tok.AccessToken != "" { t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken) } if want := "bearer"; tok.TokenType != want { t.Errorf("TokenType = %q; want %q", tok.TokenType, want) } scope := tok.Extra("scope") if want := "user"; scope != want { t.Errorf("token scope = %q; want %q", scope, want) } }
func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } expected := path.Join(tmp, "mnt", "2") if mntPath != expected { t.Fatalf("Expected %s got %s", expected, mntPath) } }
func TestCheckinBackendConfigurationNewRequestWithStripeAccount(t *testing.T) { c := &stripe.BackendConfiguration{URL: stripe.APIURL} p := &stripe.Params{StripeAccount: TestMerchantID} req, err := c.NewRequest("", "", "", "", nil, p) if err != nil { t.Fatal(err) } if req.Header.Get("Stripe-Account") != TestMerchantID { t.Fatalf("Expected Stripe-Account %v but got %v.", TestMerchantID, req.Header.Get("Stripe-Account")) } // Also test the deprecated Account field for now as well. This should be // identical to the exercise above. p = &stripe.Params{Account: TestMerchantID} req, err = c.NewRequest("", "", "", "", nil, p) if err != nil { t.Fatal(err) } if req.Header.Get("Stripe-Account") != TestMerchantID { t.Fatalf("Expected Stripe-Account %v but got %v.", TestMerchantID, req.Header.Get("Stripe-Account")) } }
func TestIntegration_ConditionalDelete(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() o := client.Bucket(bucket).Object("conddel" + suffix) wc := o.NewWriter(ctx) wc.ContentType = "text/plain" if _, err := wc.Write([]byte("foo")); err != nil { t.Fatal(err) } if err := wc.Close(); err != nil { t.Fatal(err) } gen := wc.Attrs().Generation metaGen := wc.Attrs().MetaGeneration if err := o.WithConditions(Generation(gen - 1)).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with Generation") } if err := o.WithConditions(IfMetaGenerationMatch(metaGen + 1)).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") } if err := o.WithConditions(IfMetaGenerationNotMatch(metaGen)).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") } if err := o.WithConditions(Generation(gen)).Delete(ctx); err != nil { t.Fatalf("final delete failed: %v", err) } }
// General purpose Watcher wrapper for all tests func newTestWatcher(t *testing.T) *testWatcher { watcher, err := NewWatcher() if err != nil { t.Fatal(err) } events := &anyEvent{ done: make(chan bool, 1), } tw := &testWatcher{ t: t, watcher: watcher, events: events, } go func() { for { select { case <-events.done: return case ev := <-watcher.Fork: events.forks = append(events.forks, ev.ParentPid) case ev := <-watcher.Exec: events.execs = append(events.execs, ev.Pid) case ev := <-watcher.Exit: events.exits = append(events.exits, ev.Pid) case err := <-watcher.Error: events.errors = append(events.errors, err) } } }() return tw }
func TestRemoveImageWithMultipleTags(t *testing.T) { server := DockerServer{} addImages(&server, 1, true) server.buildMuxer() imgID := server.images[0].ID imgName := "docker/python-" + imgID server.imgIDs["docker/python-wat"] = imgID recorder := httptest.NewRecorder() path := fmt.Sprintf("/images/%s", imgName) request, _ := http.NewRequest("DELETE", path, nil) server.ServeHTTP(recorder, request) _, ok := server.imgIDs[imgName] if ok { t.Error("RemoveImage: did not remove image tag name.") } id, ok := server.imgIDs["docker/python-wat"] if !ok { t.Error("RemoveImage: removed the wrong tag name.") } if id != imgID { t.Error("RemoveImage: disassociated the wrong ID from the tag") } if len(server.images) < 1 { t.Fatal("RemoveImage: removed the image, but should keep it") } if server.images[0].ID != imgID { t.Error("RemoveImage: changed the ID of the image!") } }
func TestInspectContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 2) server.buildMuxer() recorder := httptest.NewRecorder() path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) request, _ := http.NewRequest("GET", path, nil) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } expected := server.containers[0] var got docker.Container err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got.Config, expected.Config) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } got.State.StartedAt = expected.State.StartedAt got.State.FinishedAt = expected.State.FinishedAt got.Config = expected.Config got.Created = expected.Created got.NetworkSettings = expected.NetworkSettings if !reflect.DeepEqual(got, *expected) { t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) } }
func TestCreateExecContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 2) server.buildMuxer() recorder := httptest.NewRecorder() body := `{"Cmd": ["bash", "-c", "ls"]}` path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) request, _ := http.NewRequest("POST", path, strings.NewReader(body)) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } serverExec := server.execs[0] var got docker.Exec err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if got.ID != serverExec.ID { t.Errorf("CreateExec: wrong value. Want %#v. Got %#v.", serverExec.ID, got.ID) } expected := docker.ExecInspect{ ID: got.ID, ProcessConfig: docker.ExecProcessConfig{ EntryPoint: "bash", Arguments: []string{"-c", "ls"}, }, Container: *server.containers[0], } if !reflect.DeepEqual(*serverExec, expected) { t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, *serverExec) } }
func TestListContainers(t *testing.T) { server := DockerServer{} addContainers(&server, 2) server.buildMuxer() recorder := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } expected := make([]docker.APIContainers, 2) for i, container := range server.containers { expected[i] = docker.APIContainers{ ID: container.ID, Image: container.Image, Command: strings.Join(container.Config.Cmd, " "), Created: container.Created.Unix(), Status: container.State.String(), Ports: container.NetworkSettings.PortMappingAPI(), } } var got []docker.APIContainers err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, expected) { t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got) } }
func TestTopContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 1) server.containers[0].State.Running = true server.buildMuxer() recorder := httptest.NewRecorder() path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) request, _ := http.NewRequest("GET", path, nil) server.ServeHTTP(recorder, request) if recorder.Code != http.StatusOK { t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) } var got docker.TopResult err := json.NewDecoder(recorder.Body).Decode(&got) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got.Titles, []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}) { t.Fatalf("TopContainer: Unexpected titles, got: %#v", got.Titles) } if len(got.Processes) != 1 { t.Fatalf("TopContainer: Unexpected process len, got: %d", len(got.Processes)) } if got.Processes[0][len(got.Processes[0])-1] != "ls -la .." { t.Fatalf("TopContainer: Unexpected command name, got: %s", got.Processes[0][len(got.Processes[0])-1]) } }
func TestBufReader(t *testing.T) { reader, writer := io.Pipe() bufreader := NewBufReader(reader) // Write everything down to a Pipe // Usually, a pipe should block but because of the buffered reader, // the writes will go through done := make(chan bool) go func() { writer.Write([]byte("hello world")) writer.Close() done <- true }() // Drain the reader *after* everything has been written, just to verify // it is indeed buffering <-done output, err := ioutil.ReadAll(bufreader) if err != nil { t.Fatal(err) } if !bytes.Equal(output, []byte("hello world")) { t.Error(string(output)) } }
func TestBreakpointInSeperateGoRoutine(t *testing.T) { withTestProcess("testthreads", t, func(p *Process, fixture protest.Fixture) { fn := p.goSymTable.LookupFunc("main.anotherthread") if fn == nil { t.Fatal("No fn exists") } _, err := p.SetBreakpoint(fn.Entry) if err != nil { t.Fatal(err) } err = p.Continue() if err != nil { t.Fatal(err) } pc, err := p.PC() if err != nil { t.Fatal(err) } f, l, _ := p.goSymTable.PCToLine(pc) if f != "testthreads.go" && l != 8 { t.Fatal("Program did not hit breakpoint") } }) }