// MustRunCmd wraps RunCmd, failing t if RunCmd returns an error. func MustRunCmd(t testing.TB, c *exec.Cmd) string { out, err := RunCmd(c) if err != nil { t.Fatal(err) } return out }
func req(t testing.TB, v string) *http.Request { req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(v))) if err != nil { t.Fatal(err) } return req }
func RandPeerIDFatal(t testing.TB) peer.ID { p, err := RandPeerID() if err != nil { t.Fatal(err) } return p }
func open(t testing.TB) restic.Backend { if OpenFn == nil { t.Fatal("OpenFn not set") } if CreateFn == nil { t.Fatalf("CreateFn not set") } if !butInitialized { be, err := CreateFn() if err != nil { t.Fatalf("Create returned unexpected error: %v", err) } but = be butInitialized = true } if but == nil { var err error but, err = OpenFn() if err != nil { t.Fatalf("Open returned unexpected error: %v", err) } } return but }
func run(t testing.TB, commands []Command) { e := empiretest.NewEmpire(t) s := empiretest.NewServer(t, e) defer s.Close() token, err := e.AccessTokensCreate(&empire.AccessToken{ User: &empire.User{Name: "fake", GitHubToken: "token"}, }) if err != nil { t.Fatal(err) } for _, cmd := range commands { got := cli(t, token.Token, s.URL, cmd.Command) want := cmd.Output if want != "" { want = want + "\n" } if got != want { t.Fatalf("%q != %q", got, want) } } }
func makePostgres(t testing.TB) (graph.QuadStore, graph.Options, func()) { var conf dock.Config conf.Image = "postgres:9.5" conf.OpenStdin = true conf.Tty = true conf.Env = []string{`POSTGRES_PASSWORD=postgres`} addr, closer := dock.RunAndWait(t, conf, func(addr string) bool { conn, err := pq.Open(`postgres://postgres:postgres@` + addr + `/postgres?sslmode=disable`) if err != nil { return false } conn.Close() return true }) addr = `postgres://postgres:postgres@` + addr + `/postgres?sslmode=disable` if err := createSQLTables(addr, nil); err != nil { closer() t.Fatal(err) } qs, err := newQuadStore(addr, nil) if err != nil { closer() t.Fatal(err) } return qs, nil, func() { qs.Close() closer() } }
func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) if err != nil { t.Fatal(err) } if fi.Mode()&os.ModeType != mode&os.ModeType { t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) } if fi.Mode()&os.ModePerm != mode&os.ModePerm { t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) } if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) } if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) } if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) } if stat, ok := fi.Sys().(*syscall.Stat_t); ok { if stat.Uid != uid { t.Fatalf("%s no owned by uid %d", path, uid) } if stat.Gid != gid { t.Fatalf("%s not owned by gid %d", path, gid) } } }
// WaitForStores waits for all of the store descriptors to be gossiped. Servers // other than the first "bootstrap" their stores asynchronously, but we'd like // to wait for all of the stores to be initialized before returning the // TestCluster. func (tc *TestCluster) WaitForStores(t testing.TB, g *gossip.Gossip) { // Register a gossip callback for the store descriptors. var storesMu syncutil.Mutex stores := map[roachpb.StoreID]struct{}{} storesDone := make(chan error) storesDoneOnce := storesDone unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, content roachpb.Value) { storesMu.Lock() defer storesMu.Unlock() if storesDoneOnce == nil { return } var desc roachpb.StoreDescriptor if err := content.GetProto(&desc); err != nil { storesDoneOnce <- err return } stores[desc.StoreID] = struct{}{} if len(stores) == len(tc.Servers) { close(storesDoneOnce) storesDoneOnce = nil } }) defer unregister() // Wait for the store descriptors to be gossiped. for err := range storesDone { if err != nil { t.Fatal(err) } } }
// newMetricSet instantiates a new MetricSet using the given configuration. // The ModuleFactory and MetricSetFactory are obtained from the global // Registry. func newMetricSet(t testing.TB, config interface{}) mb.MetricSet { c, err := common.NewConfigFrom(config) if err != nil { t.Fatal(err) } m, err := mb.NewModules([]*common.Config{c}, mb.Registry) if err != nil { t.Fatal(err) } if !assert.Len(t, m, 1) { t.FailNow() } var metricSet mb.MetricSet for _, v := range m { if !assert.Len(t, v, 1) { t.FailNow() } metricSet = v[0] break } if !assert.NotNil(t, metricSet) { t.FailNow() } return metricSet }
func ringN(t testing.TB, n int) ([]*torus.Server, *temp.Server) { servers, mds := createN(t, n) var peers torus.PeerInfoList for _, s := range servers { peers = append(peers, &models.PeerInfo{ UUID: s.MDS.UUID(), TotalBlocks: StorageSize / BlockSize, }) } rep := 2 ringType := ring.Ketama if n == 1 { rep = 1 ringType = ring.Single } newRing, err := ring.CreateRing(&models.Ring{ Type: uint32(ringType), Peers: peers, ReplicationFactor: uint32(rep), Version: uint32(2), }) if err != nil { t.Fatal(err) } err = mds.SetRing(newRing) if err != nil { t.Fatal(err) } return servers, mds }
func newConveyor(t testing.TB) *Conveyor { db := sqlx.MustConnect("postgres", databaseURL) if err := core.Reset(db); err != nil { t.Fatal(err) } c := core.New(db) c.BuildQueue = core.NewBuildQueue(100) c.Logger = logs.Discard ch := make(chan core.BuildContext) c.BuildQueue.Subscribe(ch) w := worker.New(c, worker.Options{ Builder: builder.BuilderFunc(func(ctx context.Context, w io.Writer, options builder.BuildOptions) (string, error) { io.WriteString(w, "Pulling base image\n") return "remind101/acme-inc:1234", nil }), BuildRequests: ch, }) go w.Start() return &Conveyor{ Conveyor: c, worker: w, } }
func newDB(t testing.TB) *sql.DB { db, err := sql.Open("postgres", "postgres://localhost/empire?sslmode=disable") if err != nil { t.Fatal(err) } return db }
func tearDown(t testing.TB) { sigar.Procd = "/proc" err := os.RemoveAll(procd) if err != nil { t.Fatal(err) } }
func assertNotCheckpointed(t testing.TB, checkpointC chan k.Record) { select { case <-checkpointC: t.Fatal("Expected no checkpoint") default: } }
func createBase(t testing.TB, driver graphdriver.Driver, name string) { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) if err := driver.CreateReadWrite(name, "", nil); err != nil { t.Fatal(err) } dir, err := driver.Get(name, "") if err != nil { t.Fatal(err) } defer driver.Put(name) subdir := path.Join(dir, "a subdir") if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { t.Fatal(err) } if err := os.Chown(subdir, 1, 2); err != nil { t.Fatal(err) } file := path.Join(dir, "a file") if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { t.Fatal(err) } }
// VerifyNoImports verifies that a package doesn't depend (directly or // indirectly) on forbidden packages. The forbidden packages are specified as // either exact matches or prefix matches. // If GOPATH isn't set, it is an indication that the source is not available and // the test is skipped. func VerifyNoImports( t testing.TB, pkgPath string, cgo bool, forbiddenPkgs, forbiddenPrefixes []string, ) { // Skip test if source is not available. if build.Default.GOPATH == "" { t.Skip("GOPATH isn't set") } imports, err := TransitiveImports(pkgPath, true) if err != nil { t.Fatal(err) } for _, forbidden := range forbiddenPkgs { if _, ok := imports[forbidden]; ok { t.Errorf("Package %s includes %s, which is forbidden", pkgPath, forbidden) } } for _, forbiddenPrefix := range forbiddenPrefixes { for k := range imports { if strings.HasPrefix(k, forbiddenPrefix) { t.Errorf("Package %s includes %s, which is forbidden", pkgPath, k) } } } }
func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer { laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } listen, err := net.ListenTCP("tcp", laddr) if err != nil { t.Fatal(err) } headerSize := 8 if protocol > protoVersion2 { headerSize = 9 } ctx, cancel := context.WithCancel(ctx) srv := &TestServer{ Address: listen.Addr().String(), listen: listen, t: t, protocol: protocol, headerSize: headerSize, ctx: ctx, cancel: cancel, } go srv.closeWatch() go srv.serve() return srv }
func testQueryType(t testing.TB, fn func(*queryTest), itype indexType) { defer index.SetVerboseCorpusLogging(true) index.SetVerboseCorpusLogging(false) idx := index.NewMemoryIndex() // string key-value pairs in memory, as if they were on disk var err error var corpus *index.Corpus if itype == indexCorpusBuild { if corpus, err = idx.KeepInMemory(); err != nil { t.Fatal(err) } } qt := &queryTest{ t: t, id: indextest.NewIndexDeps(idx), itype: itype, } qt.id.Fataler = t qt.newHandler = func() *Handler { h := NewHandler(idx, qt.id.SignerBlobRef) if itype == indexCorpusScan { if corpus, err = idx.KeepInMemory(); err != nil { t.Fatal(err) } idx.PreventStorageAccessForTesting() } if corpus != nil { h.SetCorpus(corpus) } return h } fn(qt) }
func run(t testing.TB, commands []Command) { e := empiretest.NewEmpire(t) s := empiretest.NewServer(t, e) defer s.Close() token, err := e.AccessTokensCreate(&empire.AccessToken{ User: &empire.User{Name: "fake", GitHubToken: "token"}, }) if err != nil { t.Fatal(err) } for _, cmd := range commands { got := cli(t, token.Token, s.URL, cmd.Command) if want, ok := cmd.Output.(string); ok { if want != "" { want = want + "\n" } if got != want { t.Fatalf("%q != %q", got, want) } } else if regex, ok := cmd.Output.(*regexp.Regexp); ok { if !regex.MatchString(got) { t.Fatalf("%q != %q", got, regex.String()) } } } }
// NewClient implements the Cluster interface. func (f *Farmer) NewClient(ctx context.Context, t testing.TB, i int) *client.DB { conn, err := f.RPCContext.GRPCDial(f.Addr(ctx, i, base.DefaultPort)) if err != nil { t.Fatal(err) } return client.NewDB(client.NewSender(conn)) }
func testDataStore(t testing.TB, max int64) *DataStore { d, err := New(max, logger) if err != nil { t.Fatal("New(%v) failed: %v", max, err) } return d }
func tbLoadData(tb testing.TB, filename string) []byte { data, err := ioutil.ReadFile("../testdata/" + filename) if err != nil { tb.Fatal(err) } return data }
func NewApp(t testing.TB, dataDir string) *server.App { app, err := server.New(dataDir) if err != nil { t.Fatal(err) } return app }
func NetPipe(t testing.TB) (net.Conn, net.Conn) { l, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } defer l.Close() client_future := utils.NewFuture() go func() { client_future.Set(net.Dial(l.Addr().Network(), l.Addr().String())) }() var errs utils.ErrorGroup server_conn, err := l.Accept() errs.Add(err) client_conn, err := client_future.Get() errs.Add(err) err = errs.Finalize() if err != nil { if server_conn != nil { server_conn.Close() } if client_conn != nil { client_conn.(net.Conn).Close() } t.Fatal(err) } return server_conn, client_conn.(net.Conn) }
// NewEmpire returns a new Empire instance suitable for testing. It ensures that // the database is clean before returning. func NewEmpire(t testing.TB) *empire.Empire { opts := empire.Options{ DB: DatabaseURL, AWSConfig: nil, Docker: empire.DockerOptions{ Auth: &docker.AuthConfigurations{ Configs: map[string]docker.AuthConfiguration{ "https://index.docker.io/v1/": docker.AuthConfiguration{ Username: "", Password: "", }, }, }, }, } e, err := empire.New(opts) if err != nil { t.Fatal(err) } if err := e.Reset(); err != nil { t.Fatal(err) } return e }
// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) base := stringid.GenerateRandomID() if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } content := []byte("test content") if err := addFile(driver, base, "testfile.txt", content); err != nil { t.Fatal(err) } topLayer, err := addManyLayers(driver, base, layerCount) if err != nil { t.Fatal(err) } err = checkManyLayers(driver, topLayer, layerCount) if err != nil { t.Fatal(err) } if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { t.Fatal(err) } }
func NewFakeUserOrBust(tb testing.TB, prefix string) (fu *FakeUser) { var err error if fu, err = NewFakeUser(prefix); err != nil { tb.Fatal(err) } return fu }
func NewSSLTestServer(t testing.TB, protocol uint8) *TestServer { pem, err := ioutil.ReadFile("testdata/pki/ca.crt") certPool := x509.NewCertPool() if !certPool.AppendCertsFromPEM(pem) { t.Fatalf("Failed parsing or appending certs") } mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key") if err != nil { t.Fatalf("could not load cert") } config := &tls.Config{ Certificates: []tls.Certificate{mycert}, RootCAs: certPool, } listen, err := tls.Listen("tcp", "127.0.0.1:0", config) if err != nil { t.Fatal(err) } headerSize := 8 if protocol > protoVersion2 { headerSize = 9 } srv := &TestServer{ Address: listen.Addr().String(), listen: listen, t: t, protocol: protocol, headerSize: headerSize, quit: make(chan struct{}), } go srv.serve() return srv }
func request(t testing.TB, url string) *http.Request { req, err := http.NewRequest("GET", url, nil) if err != nil { t.Fatal(err) } return req }
func NewTestServer(t testing.TB, protocol uint8) *TestServer { laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } listen, err := net.ListenTCP("tcp", laddr) if err != nil { t.Fatal(err) } headerSize := 8 if protocol > protoVersion2 { headerSize = 9 } srv := &TestServer{ Address: listen.Addr().String(), listen: listen, t: t, protocol: protocol, headerSize: headerSize, quit: make(chan struct{}), } go srv.serve() return srv }