func RandPeerIDFatal(t testing.TB) peer.ID { p, err := RandPeerID() if err != nil { t.Fatal(err) } return p }
// ShouldntFail checks if any of the supplied parameters are non-nil errors and // it fatally fails the test if they are. func ShouldntFail(t testing.TB, errors ...error) { for idx, err := range errors { if err != nil { t.Fatalf("An unexpected error occured in statement %d: %s", idx+1, err) } } }
// ExpectNEQ fails the test and displays 'msg', if exp is equal to act. func ExpectNEQ(tb testing.TB, exp, act interface{}, msg string) { if reflect.DeepEqual(exp, act) { _, file, line, _ := runtime.Caller(1) fmt.Printf("%s:%d: %s\n\n\texp: %#v\n\n\tgot: %#v\n\n", filepath.Base(file), line, msg, exp, act) tb.Fail() } }
func assertNoError(err error, t testing.TB, s string) { if err != nil { _, file, line, _ := runtime.Caller(1) fname := filepath.Base(file) t.Fatalf("failed assertion at %s:%d: %s - %s\n", fname, line, s, err) } }
func cmdKeyRemove(t testing.TB, global GlobalOptions, IDs []string) { cmd := &CmdKey{global: &global} t.Logf("remove %d keys: %q\n", len(IDs), IDs) for _, id := range IDs { OK(t, cmd.Execute([]string{"rm", id})) } }
// MustRunCmd wraps RunCmd, failing t if RunCmd returns an error. func MustRunCmd(t testing.TB, c *exec.Cmd) string { out, err := RunCmd(c) if err != nil { t.Fatal(err) } return out }
// notNilUp is like notNil, but used inside helper functions, to ensure that the // file and line number reported by failures corresponds to one or more levels // up the stack. func notNilUp(obtained interface{}, t testing.TB, caller int) { if _isNil(obtained) { _, file, line, _ := runtime.Caller(caller + 1) fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained) t.FailNow() } }
func testLabelValues(t testing.TB) { var scenarios = []struct { in LabelValues out LabelValues }{ { in: LabelValues{"ZZZ", "zzz"}, out: LabelValues{"ZZZ", "zzz"}, }, { in: LabelValues{"aaa", "AAA"}, out: LabelValues{"AAA", "aaa"}, }, } for i, scenario := range scenarios { sort.Sort(scenario.in) for j, expected := range scenario.out { if expected != scenario.in[j] { t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) } } } }
func testDistribution(t testing.TB, counts map[string]int, min, max float64) { for k, v := range counts { if float64(v) < min || float64(v) > max { t.Errorf("Key %v has value %v which is out of range %v-%v", k, v, min, max) } } }
// NewClient implements the Cluster interface. func (f *Farmer) NewClient(ctx context.Context, t testing.TB, i int) *client.DB { conn, err := f.RPCContext.GRPCDial(f.Addr(ctx, i, base.DefaultPort)) if err != nil { t.Fatal(err) } return client.NewDB(client.NewSender(conn)) }
func run(t testing.TB, commands []Command) { e := empiretest.NewEmpire(t) s := empiretest.NewServer(t, e) defer s.Close() token, err := e.AccessTokensCreate(&empire.AccessToken{ User: &empire.User{Name: "fake", GitHubToken: "token"}, }) if err != nil { t.Fatal(err) } for _, cmd := range commands { got := cli(t, token.Token, s.URL, cmd.Command) want := cmd.Output if want != "" { want = want + "\n" } if got != want { t.Fatalf("%q != %q", got, want) } } }
func assertDnsMessage(t testing.TB, q DnsTestMsg) { dns, err := decodeDnsData(TransportUdp, q.rawData) if err != nil { t.Error("failed to decode dns data") } mapStr := common.MapStr{} addDnsToMapStr(mapStr, dns, true, true) if q.question != nil { for k, v := range q.question { assert.NotNil(t, mapStr["question"].(common.MapStr)[k]) assert.Equal(t, v, mapStr["question"].(common.MapStr)[k]) } } if len(q.answers) > 0 { assertRRs(t, q.answers, mapStr["answer"].([]common.MapStr)) } if len(q.authorities) > 0 { assertRRs(t, q.authorities, mapStr["authorities"].([]common.MapStr)) } if len(q.additionals) > 0 { assertRRs(t, q.additionals, mapStr["additionals"].([]common.MapStr)) } if q.opt != nil { for k, v := range q.opt { assert.NotNil(t, mapStr["opt"].(common.MapStr)[k]) assert.Equal(t, v, mapStr["opt"].(common.MapStr)[k]) } } }
func testQueryType(t testing.TB, fn func(*queryTest), itype indexType) { defer index.SetVerboseCorpusLogging(true) index.SetVerboseCorpusLogging(false) idx := index.NewMemoryIndex() // string key-value pairs in memory, as if they were on disk var err error var corpus *index.Corpus if itype == indexCorpusBuild { if corpus, err = idx.KeepInMemory(); err != nil { t.Fatal(err) } } qt := &queryTest{ t: t, id: indextest.NewIndexDeps(idx), itype: itype, } qt.id.Fataler = t qt.newHandler = func() *Handler { h := NewHandler(idx, qt.id.SignerBlobRef) if itype == indexCorpusScan { if corpus, err = idx.KeepInMemory(); err != nil { t.Fatal(err) } idx.PreventStorageAccessForTesting() } if corpus != nil { h.SetCorpus(corpus) } return h } fn(qt) }
// WaitForStores waits for all of the store descriptors to be gossiped. Servers // other than the first "bootstrap" their stores asynchronously, but we'd like // to wait for all of the stores to be initialized before returning the // TestCluster. func (tc *TestCluster) WaitForStores(t testing.TB, g *gossip.Gossip) { // Register a gossip callback for the store descriptors. var storesMu syncutil.Mutex stores := map[roachpb.StoreID]struct{}{} storesDone := make(chan error) storesDoneOnce := storesDone unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, content roachpb.Value) { storesMu.Lock() defer storesMu.Unlock() if storesDoneOnce == nil { return } var desc roachpb.StoreDescriptor if err := content.GetProto(&desc); err != nil { storesDoneOnce <- err return } stores[desc.StoreID] = struct{}{} if len(stores) == len(tc.Servers) { close(storesDoneOnce) storesDoneOnce = nil } }) defer unregister() // Wait for the store descriptors to be gossiped. for err := range storesDone { if err != nil { t.Fatal(err) } } }
// Retrieves nested MapStr values. func mapValueHelper(t testing.TB, m common.MapStr, keys []string) interface{} { key := keys[0] if len(keys) == 1 { return m[key] } if len(keys) > 1 { value, exists := m[key] if !exists { t.Fatalf("%s is missing from MapStr %v.", key, m) } switch typ := value.(type) { default: t.Fatalf("Expected %s to return a MapStr but got %v.", key, value) case common.MapStr: return mapValueHelper(t, typ, keys[1:]) case []common.MapStr: var values []interface{} for _, m := range typ { values = append(values, mapValueHelper(t, m, keys[1:])) } return values } } panic("mapValueHelper cannot be called with an empty array of keys") }
func NetPipe(t testing.TB) (net.Conn, net.Conn) { l, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } defer l.Close() client_future := utils.NewFuture() go func() { client_future.Set(net.Dial(l.Addr().Network(), l.Addr().String())) }() var errs utils.ErrorGroup server_conn, err := l.Accept() errs.Add(err) client_conn, err := client_future.Get() errs.Add(err) err = errs.Finalize() if err != nil { if server_conn != nil { server_conn.Close() } if client_conn != nil { client_conn.(net.Conn).Close() } t.Fatal(err) } return server_conn, client_conn.(net.Conn) }
// Assert that the specified flags are set. func assertFlags(t testing.TB, m common.MapStr, flags []string) { for _, expected := range flags { var key string switch expected { default: t.Fatalf("Unknown flag '%s' specified in test.", expected) case "aa": key = "dns.flags.authoritative" case "ra": key = "dns.flags.recursion_allowed" case "rd": key = "dns.flags.recursion_desired" case "tc": key = "dns.flags.truncated_response" } f := mapValue(t, m, key) flag, ok := f.(bool) if !ok { t.Fatalf("%s value is not a bool.", key) } assert.True(t, flag, "Flag %s should be true.", key) } }
func tmpDir(t testing.TB) string { dir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %v", err) } return dir }
func makePostgres(t testing.TB) (graph.QuadStore, graph.Options, func()) { var conf dock.Config conf.Image = "postgres:9.5" conf.OpenStdin = true conf.Tty = true conf.Env = []string{`POSTGRES_PASSWORD=postgres`} addr, closer := dock.RunAndWait(t, conf, func(addr string) bool { conn, err := pq.Open(`postgres://postgres:postgres@` + addr + `/postgres?sslmode=disable`) if err != nil { return false } conn.Close() return true }) addr = `postgres://postgres:postgres@` + addr + `/postgres?sslmode=disable` if err := createSQLTables(addr, nil); err != nil { closer() t.Fatal(err) } qs, err := newQuadStore(addr, nil) if err != nil { closer() t.Fatal(err) } return qs, nil, func() { qs.Close() closer() } }
// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } content := []byte("test content") if err := addFile(driver, base, "testfile.txt", content); err != nil { t.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { t.Fatal(err) } err = checkManyLayers(driver, topLayer, layerCount) if err != nil { t.Fatal(err) } if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { t.Fatal(err) } }
func tbLoadData(tb testing.TB, filename string) []byte { data, err := ioutil.ReadFile("../testdata/" + filename) if err != nil { tb.Fatal(err) } return data }
// Expect fails the test and displays 'msg', if the condition is false. func Expect(tb testing.TB, condition bool, msg string, v ...interface{}) { if !condition { _, file, line, _ := runtime.Caller(1) fmt.Printf("%s:%d: "+msg+"\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) tb.Fail() } }
func innerTest(client *FlumeClient, t testing.TB) { //header: {businessName=feed, type=list}. //body: 100311 list {"view_self":0,"remoteid":"5445285","timestamp":1403512030,"flume_timestamp":"2014-06-23 16:27:10","business_type":"feed"} body := "{\"view_self\":0,\"remoteid\":\"5445285\",\"timestamp\":1403512030,\"flume_timestamp\":\"2014-06-23 16:27:10\",\"business_type\":\"feed\"}" var demo LogDemo err := json.Unmarshal([]byte(body), &demo) if nil != err { t.Fail() return } data, err := json.Marshal(demo) if nil != err { t.Fail() return } event := NewFlumeEvent("feed", "list", data) events := []*flume.ThriftFlumeEvent{event} for i := 0; i < 1; i++ { err := client.AppendBatch(events) err = client.Append(event) if nil != err { t.Log(err.Error()) t.Fail() } else { t.Logf("%d, send succ ", i) } } }
// AfterTest should be called (generally with "defer leaktest.AfterTest(t)") // from each test which uses goroutines. This waits for all goroutines // on a blacklist to terminate and provides more precise error reporting // than TestMainWithLeakCheck alone. func AfterTest(t testing.TB) { http.DefaultTransport.(*http.Transport).CloseIdleConnections() if testing.Short() { return } var bad string badSubstring := map[string]string{ ").readLoop(": "a Transport", ").writeLoop(": "a Transport", "created by net/http/httptest.(*Server).Start": "an httptest.Server", "timeoutHandler": "a TimeoutHandler", "net.(*netFD).connect(": "a timing out dial", ").noteClientGone(": "a closenotifier sender", "created by net/rpc.NewClientWithCodec": "an rpc client", } var stacks string for i := 0; i < 4; i++ { bad = "" stacks = strings.Join(interestingGoroutines(), "\n\n") for substr, what := range badSubstring { if strings.Contains(stacks, substr) { bad = what } } if bad == "" { return } // Bad stuff found, but goroutines might just still be // shutting down, so give it some time. time.Sleep(10 * time.Millisecond) } t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) }
// WithTestServer creates a new TestServer, runs the passed function, and then // verifies that no resources were leaked. func WithTestServer(t testing.TB, chanOpts *ChannelOpts, f func(*TestServer)) { chanOpts = chanOpts.Copy() runCount := chanOpts.RunCount if runCount < 1 { runCount = 1 } for i := 0; i < runCount; i++ { if t.Failed() { return } // Run without the relay, unless OnlyRelay was set. if !chanOpts.OnlyRelay { noRelayOpts := chanOpts.Copy() noRelayOpts.DisableRelay = true withServer(t, noRelayOpts, f) } // Run with the relay, unless the user has disabled it. if !chanOpts.DisableRelay { withServer(t, chanOpts.Copy(), f) } } }
// equals fails the test if exp is not equal to act. func equals(tb testing.TB, exp, act interface{}) { if !reflect.DeepEqual(exp, act) { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) tb.FailNow() } }
// testClient returns a *Client connected to a localy running sftp-server // the *exec.Cmd returned must be defer Wait'd. func testClient(t testing.TB, readonly bool, delay time.Duration) (*Client, *exec.Cmd) { if !*testIntegration { t.Skip("skipping intergration test") } cmd := exec.Command(*testSftp, "-e", "-R", "-l", debuglevel) // log to stderr, read only if !readonly { cmd = exec.Command(*testSftp, "-e", "-l", debuglevel) // log to stderr } cmd.Stderr = os.Stdout pw, err := cmd.StdinPipe() if err != nil { t.Fatal(err) } if delay > NO_DELAY { pw = newDelayedWriter(pw, delay) } pr, err := cmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := cmd.Start(); err != nil { t.Skipf("could not start sftp-server process: %v", err) } sftp, err := NewClientPipe(pr, pw) if err != nil { t.Fatal(err) } return sftp, cmd }
func newAsm(t testing.TB) *Assembler { buf, e := gojit.Alloc(gojit.PageSize) if e != nil { t.Fatalf("alloc: ", e.Error()) } return &Assembler{buf, 0, CgoABI} }
// ok fails the test if an err is not nil. func ok(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) tb.FailNow() } }
func NewTestServer(t testing.TB, protocol uint8) *TestServer { laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } listen, err := net.ListenTCP("tcp", laddr) if err != nil { t.Fatal(err) } headerSize := 8 if protocol > protoVersion2 { headerSize = 9 } srv := &TestServer{ Address: listen.Addr().String(), listen: listen, t: t, protocol: protocol, headerSize: headerSize, quit: make(chan struct{}), } go srv.serve() return srv }