func mustConnect(t testing.TB, config pgx.ConnConfig) *pgx.Conn { conn, err := pgx.Connect(config) if err != nil { t.Fatalf("Unable to establish connection: %v", err) } return conn }
// Assert drains the Events channel and compares the actual events with those // expected to have been generated by the operations performed on the nodes in // the cluster (restart, kill, ...). In the event of a mismatch, the passed // Tester receives a fatal error. func (l *LocalCluster) Assert(ctx context.Context, t testing.TB) { const almostZero = 50 * time.Millisecond filter := func(ch chan Event, wait time.Duration) *Event { select { case act := <-ch: return &act case <-time.After(wait): } return nil } var events []Event for { exp := filter(l.expectedEvents, almostZero) if exp == nil { break } act := filter(l.events, 15*time.Second) if act == nil || *exp != *act { t.Fatalf("expected event %v, got %v (after %v)", exp, act, events) } events = append(events, *exp) } if cur := filter(l.events, almostZero); cur != nil { t.Fatalf("unexpected extra event %v (after %v)", cur, events) } if log.V(2) { log.Infof(ctx, "asserted %v", events) } }
func run(t testing.TB, commands []Command) { e := empiretest.NewEmpire(t) s := empiretest.NewServer(t, e) defer s.Close() token, err := e.AccessTokensCreate(&empire.AccessToken{ User: &empire.User{Name: "fake", GitHubToken: "token"}, }) if err != nil { t.Fatal(err) } for _, cmd := range commands { got := cli(t, token.Token, s.URL, cmd.Command) if want, ok := cmd.Output.(string); ok { if want != "" { want = want + "\n" } if got != want { t.Fatalf("%q != %q", got, want) } } else if regex, ok := cmd.Output.(*regexp.Regexp); ok { if !regex.MatchString(got) { t.Fatalf("%q != %q", got, regex.String()) } } } }
func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}) (commandTag pgx.CommandTag) { var err error if commandTag, err = conn.Exec(sql, arguments...); err != nil { t.Fatalf("Exec unexpectedly failed with %v: %v", sql, err) } return }
func mustNewRR(tb testing.TB, s string) dns.RR { rr, err := dns.NewRR(s) if err != nil { tb.Fatalf("invalid RR %q: %v", s, err) } return rr }
func saveFile(t testing.TB, be Saver, filename string, n int) { f, err := os.Open(filename) if err != nil { t.Fatal(err) } data := make([]byte, n) m, err := io.ReadFull(f, data) if m != n { t.Fatalf("read wrong number of bytes from %v: want %v, got %v", filename, m, n) } if err = f.Close(); err != nil { t.Fatal(err) } h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()} err = be.Save(h, data) if err != nil { t.Fatal(err) } err = os.Remove(filename) if err != nil { t.Fatal(err) } }
// CheckGossip fetches the gossip infoStore from each node and invokes the given // function. The test passes if the function returns 0 for every node, // retrying for up to the given duration. func CheckGossip( ctx context.Context, t testing.TB, c cluster.Cluster, d time.Duration, f CheckGossipFunc, ) { err := util.RetryForDuration(d, func() error { select { case <-stopper.ShouldStop(): t.Fatalf("interrupted") return nil case <-time.After(1 * time.Second): } var infoStatus gossip.InfoStatus for i := 0; i < c.NumNodes(); i++ { if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/gossip/local", &infoStatus); err != nil { return errors.Wrapf(err, "failed to get gossip status from node %d", i) } if err := f(infoStatus.Infos); err != nil { return errors.Errorf("node %d: %s", i, err) } } return nil }) if err != nil { t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err)) } }
func run(t testing.TB, commands []Command) { e := empiretest.NewEmpire(t) s := empiretest.NewServer(t, e) defer s.Close() token, err := e.AccessTokensCreate(&empire.AccessToken{ User: &empire.User{Name: "fake", GitHubToken: "token"}, }) if err != nil { t.Fatal(err) } for _, cmd := range commands { got := cli(t, token.Token, s.URL, cmd.Command) want := cmd.Output if want != "" { want = want + "\n" } if got != want { t.Fatalf("%q != %q", got, want) } } }
func loadTestPointers(tb testing.TB) []geo.Pointer { f, err := os.Open("../testdata/points.csv.gz") if err != nil { tb.Fatalf("unable to open test file %v", err) } defer f.Close() gzReader, err := gzip.NewReader(f) if err != nil { tb.Fatalf("unable to create gz reader: %v", err) } defer gzReader.Close() // read in events var pointers []geo.Pointer scanner := bufio.NewScanner(gzReader) for scanner.Scan() { parts := strings.Split(scanner.Text(), ",") lat, _ := strconv.ParseFloat(parts[0], 64) lng, _ := strconv.ParseFloat(parts[1], 64) if lat == 0 || lng == 0 { tb.Errorf("latlng not parsed correctly, %s %s", parts[0], parts[1]) } pointers = append(pointers, &event{ Location: geo.NewPoint(lng, lat), }) } return pointers }
// AssertEqualf verifies that two objects are equals and calls FailNow() to // immediately cancel the test case. // // It must be called from the main goroutine. Other goroutines must call // ExpectEqual* flavors. // // This functions enables specifying an arbitrary string on failure. // // Equality is determined via reflect.DeepEqual(). func AssertEqualf(t testing.TB, expected, actual interface{}, format string, items ...interface{}) { // This is cheezy, as there's no way to figure out if the test was properly // started by the test framework. found := false root := "" for i := 1; ; i++ { if _, file, _, ok := runtime.Caller(i); ok { if filepath.Base(file) == "testing.go" { found = true break } root = file } else { break } } if !found { t.Logf(Decorate("ut.AssertEqual*() function MUST be called from within main test goroutine, use ut.ExpectEqual*() instead; found %s."), root) // TODO(maruel): Warning: this will be enforced soon. //t.Fail() } if !reflect.DeepEqual(actual, expected) { t.Fatalf(Decorate(format), items...) } }
func newScheduler(t testing.TB) *docker.Scheduler { s, err := docker.NewSchedulerFromEnv() if err != nil { t.Fatalf("Could not build docker scheduler: %v", err) } return s }
func createClient(t testing.TB, urlStr string) *Client { client, _, err := Dial(urlStr, nil) // use default ClientMessageHandler if err != nil { t.Fatalf("Dial: ", err) } return client }
func withTestTerminal(name string, t testing.TB, fn func(*FakeTerminal)) { os.Setenv("TERM", "dumb") listener, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("couldn't start listener: %s\n", err) } defer listener.Close() server := rpc2.NewServer(&service.Config{ Listener: listener, ProcessArgs: []string{test.BuildFixture(name).Path}, }, false) if err := server.Run(); err != nil { t.Fatal(err) } client := rpc2.NewClient(listener.Addr().String()) defer func() { client.Detach(true) }() ft := &FakeTerminal{ t: t, Term: New(client, nil), } fn(ft) }
func dial(t testing.TB) net.Conn { c, err := net.Dial("unix", sockpath) if err != nil { t.Fatalf("Couldn't open %s: %s.", sockpath, err) } return c }
// NewRequest is like http.NewRequest, but calls t.Fatal on error. func NewRequest(t testing.TB, method, url string, body io.Reader) *http.Request { req, err := http.NewRequest(method, url, body) if err != nil { t.Fatalf("Bug in test: cannot construct http.Request from method=%q, url=%q, body=%#v: %s", method, url, body, err) } return req }
func fatal(t testing.TB, msg string) { // Hack to override the default line number printing. // This is the standard trick used by gocheck and others. // See https://code.google.com/p/go/issues/detail?id=4899 // for one better solution for the future. t.Fatalf("\r\t%s: %s", caller(), msg) }
func closeDB(t testing.TB, db *DB) { if e := recover(); e != nil { fmt.Printf("Panic: %v\n", e) panic(e) } defer setHookpostCloseConn(nil) setHookpostCloseConn(func(_ *fakeConn, err error) { if err != nil { t.Errorf("Error closing fakeConn: %v", err) } }) for node, i := db.freeConn.Front(), 0; node != nil; node, i = node.Next(), i+1 { dc := node.Value.(*driverConn) if n := len(dc.openStmt); n > 0 { // Just a sanity check. This is legal in // general, but if we make the tests clean up // their statements first, then we can safely // verify this is always zero here, and any // other value is a leak. t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, db.freeConn.Len(), n) } } err := db.Close() if err != nil { t.Fatalf("error closing DB: %v", err) } db.mu.Lock() count := db.numOpen db.mu.Unlock() if count != 0 { t.Fatalf("%d connections still open after closing DB", db.numOpen) } }
func start(t testing.TB) *Goctl { gc := NewGoctl(sockpath) if err := gc.Start(); err != nil { t.Fatalf("Couldn't start: %s.", err) } return &gc }
func NewSSLTestServer(t testing.TB, protocol uint8) *TestServer { pem, err := ioutil.ReadFile("testdata/pki/ca.crt") certPool := x509.NewCertPool() if !certPool.AppendCertsFromPEM(pem) { t.Fatalf("Failed parsing or appending certs") } mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key") if err != nil { t.Fatalf("could not load cert") } config := &tls.Config{ Certificates: []tls.Certificate{mycert}, RootCAs: certPool, } listen, err := tls.Listen("tcp", "127.0.0.1:0", config) if err != nil { t.Fatal(err) } headerSize := 8 if protocol > protoVersion2 { headerSize = 9 } srv := &TestServer{ Address: listen.Addr().String(), listen: listen, t: t, protocol: protocol, headerSize: headerSize, quit: make(chan struct{}), } go srv.serve() return srv }
func newAsm(t testing.TB) *Assembler { buf, e := gojit.Alloc(gojit.PageSize) if e != nil { t.Fatalf("alloc: ", e.Error()) } return &Assembler{buf, 0, CgoABI} }
// DriverTestDiffApply tests diffing and applying produces the same layer func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } if err := addManyFiles(driver, base, fileCount, 3); err != nil { t.Fatal(err) } if err := driver.Create(upper, base, "", nil); err != nil { t.Fatal(err) } if err := addManyFiles(driver, upper, fileCount, 6); err != nil { t.Fatal(err) } diffSize, err := driver.DiffSize(upper, "") if err != nil { t.Fatal(err) } diff := stringid.GenerateRandomID() if err := driver.Create(diff, base, "", nil); err != nil { t.Fatal(err) } if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { t.Fatal(err) } arch, err := driver.Diff(upper, base) if err != nil { t.Fatal(err) } buf := bytes.NewBuffer(nil) if _, err := buf.ReadFrom(arch); err != nil { t.Fatal(err) } if err := arch.Close(); err != nil { t.Fatal(err) } applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) if err != nil { t.Fatal(err) } if applyDiffSize != diffSize { t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) } if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { t.Fatal(err) } }
func assertNoError(err error, t testing.TB, s string) { if err != nil { _, file, line, _ := runtime.Caller(1) fname := filepath.Base(file) t.Fatalf("failed assertion at %s:%d: %s - %s\n", fname, line, s, err) } }
// ShouldntFail checks if any of the supplied parameters are non-nil errors and // it fatally fails the test if they are. func ShouldntFail(t testing.TB, errors ...error) { for idx, err := range errors { if err != nil { t.Fatalf("An unexpected error occured in statement %d: %s", idx+1, err) } } }
func tmpDir(t testing.TB) string { dir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %v", err) } return dir }
// Assert that the specified flags are set. func assertFlags(t testing.TB, m common.MapStr, flags []string) { for _, expected := range flags { var key string switch expected { default: t.Fatalf("Unknown flag '%s' specified in test.", expected) case "aa": key = "dns.flags.authoritative" case "ra": key = "dns.flags.recursion_allowed" case "rd": key = "dns.flags.recursion_desired" case "tc": key = "dns.flags.truncated_response" } f := mapValue(t, m, key) flag, ok := f.(bool) if !ok { t.Fatalf("%s value is not a bool.", key) } assert.True(t, flag, "Flag %s should be true.", key) } }
// Retrieves nested MapStr values. func mapValueHelper(t testing.TB, m common.MapStr, keys []string) interface{} { key := keys[0] if len(keys) == 1 { return m[key] } if len(keys) > 1 { value, exists := m[key] if !exists { t.Fatalf("%s is missing from MapStr %v.", key, m) } switch typ := value.(type) { default: t.Fatalf("Expected %s to return a MapStr but got %v.", key, value) case common.MapStr: return mapValueHelper(t, typ, keys[1:]) case []common.MapStr: var values []interface{} for _, m := range typ { values = append(values, mapValueHelper(t, m, keys[1:])) } return values } } panic("mapValueHelper cannot be called with an empty array of keys") }
func archiveWithDedup(t testing.TB) { repo := SetupRepo() defer TeardownRepo(repo) if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping TestArchiverDedup") } var cnt struct { before, after, after2 struct { packs, dataBlobs, treeBlobs uint } } // archive a few files sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats cnt.before.packs = repo.Count(backend.Data) cnt.before.dataBlobs = repo.Index().Count(pack.Data) cnt.before.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) // archive the same files again, without parent snapshot sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again cnt.after.packs = repo.Count(backend.Data) cnt.after.dataBlobs = repo.Index().Count(pack.Data) cnt.after.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) // if there are more data blobs, something is wrong if cnt.after.dataBlobs > cnt.before.dataBlobs { t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", cnt.before.dataBlobs, cnt.after.dataBlobs) } // archive the same files again, with a parent snapshot sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again cnt.after2.packs = repo.Count(backend.Data) cnt.after2.dataBlobs = repo.Index().Count(pack.Data) cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) // if there are more data blobs, something is wrong if cnt.after2.dataBlobs > cnt.before.dataBlobs { t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", cnt.before.dataBlobs, cnt.after2.dataBlobs) } }
// VerifyVastElementFromString method verifies Validate errors for the Unmarshal object generated from the given file. func VerifyVastElementFromFile(t testing.TB, file string, element Validator, expectedError error) { xmlData, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("Cannot read XML file: %v.\n", err) } VerifyVastElementFromBytes(t, xmlData, element, expectedError) }
// GetFromStateCF tests can use this method for getting value from StateCF column-family func (testDB *TestDBWrapper) GetFromStateCF(t testing.TB, key []byte) []byte { openchainDB := GetDBHandle() value, err := openchainDB.GetFromStateCF(key) if err != nil { t.Fatalf("Error while getting from db. Error:%s", err) } return value }
// WriteToDB tests can use this method for persisting a given batch to db func (testDB *TestDBWrapper) WriteToDB(t testing.TB, writeBatch *gorocksdb.WriteBatch) { opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() err := GetDBHandle().DB.Write(opt, writeBatch) if err != nil { t.Fatalf("Error while writing to db. Error:%s", err) } }