// testClient returns a *Client connected to a localy running sftp-server // the *exec.Cmd returned must be defer Wait'd. func testClient(t testing.TB, readonly bool, delay time.Duration) (*Client, *exec.Cmd) { if !*testIntegration { t.Skip("skipping intergration test") } cmd := exec.Command(*testSftp, "-e", "-R", "-l", debuglevel) // log to stderr, read only if !readonly { cmd = exec.Command(*testSftp, "-e", "-l", debuglevel) // log to stderr } cmd.Stderr = os.Stdout pw, err := cmd.StdinPipe() if err != nil { t.Fatal(err) } if delay > NO_DELAY { pw = newDelayedWriter(pw, delay) } pr, err := cmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := cmd.Start(); err != nil { t.Skipf("could not start sftp-server process: %v", err) } sftp, err := NewClientPipe(pr, pw) if err != nil { t.Fatal(err) } return sftp, cmd }
// VerifyNoImports verifies that a package doesn't depend (directly or // indirectly) on forbidden packages. The forbidden packages are specified as // either exact matches or prefix matches. // If GOPATH isn't set, it is an indication that the source is not available and // the test is skipped. func VerifyNoImports( t testing.TB, pkgPath string, cgo bool, forbiddenPkgs, forbiddenPrefixes []string, ) { // Skip test if source is not available. if build.Default.GOPATH == "" { t.Skip("GOPATH isn't set") } imports, err := TransitiveImports(pkgPath, true) if err != nil { t.Fatal(err) } for _, forbidden := range forbiddenPkgs { if _, ok := imports[forbidden]; ok { t.Errorf("Package %s includes %s, which is forbidden", pkgPath, forbidden) } } for _, forbiddenPrefix := range forbiddenPrefixes { for k := range imports { if strings.HasPrefix(k, forbiddenPrefix) { t.Errorf("Package %s includes %s, which is forbidden", pkgPath, k) } } } }
// withTestEnvironment creates a test environment and calls f with it. After f has // returned, the temporary directory is removed. func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) { if !RunIntegrationTest { t.Skip("integration tests disabled") } tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) env := testEnvironment{ base: tempdir, cache: filepath.Join(tempdir, "cache"), repo: filepath.Join(tempdir, "repo"), testdata: filepath.Join(tempdir, "testdata"), } OK(t, os.MkdirAll(env.testdata, 0700)) OK(t, os.MkdirAll(env.cache, 0700)) OK(t, os.MkdirAll(env.repo, 0700)) f(&env, configureRestic(t, env.cache, env.repo)) if !TestCleanup { t.Logf("leaving temporary directory %v used for test", tempdir) return } RemoveAll(t, tempdir) }
func archiveWithDedup(t testing.TB) { repo := SetupRepo() defer TeardownRepo(repo) if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping TestArchiverDedup") } var cnt struct { before, after, after2 struct { packs, dataBlobs, treeBlobs uint } } // archive a few files sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats cnt.before.packs = repo.Count(backend.Data) cnt.before.dataBlobs = repo.Index().Count(pack.Data) cnt.before.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) // archive the same files again, without parent snapshot sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again cnt.after.packs = repo.Count(backend.Data) cnt.after.dataBlobs = repo.Index().Count(pack.Data) cnt.after.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) // if there are more data blobs, something is wrong if cnt.after.dataBlobs > cnt.before.dataBlobs { t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", cnt.before.dataBlobs, cnt.after.dataBlobs) } // archive the same files again, with a parent snapshot sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again cnt.after2.packs = repo.Count(backend.Data) cnt.after2.dataBlobs = repo.Index().Count(pack.Data) cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) // if there are more data blobs, something is wrong if cnt.after2.dataBlobs > cnt.before.dataBlobs { t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", cnt.before.dataBlobs, cnt.after2.dataBlobs) } }
func checkTestCacheAvailable(tb testing.TB, cache *Cache) { conn, err := cache.Pool.Dial() if err != nil { cache.Close() tb.Skip(err) } conn.Close() }
func skipIfNotUnix(tb testing.TB) { switch runtime.GOOS { case "android", "nacl", "plan9", "windows": tb.Skipf("%s does not support unix sockets", runtime.GOOS) } if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { tb.Skip("iOS does not support unix, unixgram") } }
func newScheduler(t testing.TB) *ecs.Scheduler { creds := &credentials.EnvProvider{} if _, err := creds.Retrieve(); err != nil { t.Skip("Skipping ECS test because AWS_ environment variables are not present.") } config := defaults.DefaultConfig.WithCredentials(credentials.NewCredentials(creds)) return ecs.NewScheduler(config) }
func checkTestCacheAvailable(tb testing.TB, cache *Cache) { err := cache.Client.Set(&memcache_impl.Item{ Key: "ping", Value: []byte("ping"), }) if err != nil { tb.Skip(err) } }
func NewReplicaSetHarness(n uint, t testing.TB) *ReplicaSetHarness { if disableSlowTests { t.Skip("disabled because it's slow") } mgoRS := mgotest.NewReplicaSet(n, t) return &ReplicaSetHarness{ Harness: newHarnessInternal(mgoRS.Addrs()[n-1], mgoRS, t), MgoReplicaSet: mgoRS, } }
// PutDriver removes the driver if it is no longer used and updates the reference count. func PutDriver(t testing.TB) { if drv == nil { t.Skip("No driver to put!") } drv.refCount-- if drv.refCount == 0 { cleanup(t, drv) drv = nil } }
func parseAccount(tb testing.TB) *Account { acc := &Account{} if data, err := ioutil.ReadFile("account.txt"); err == nil { if err := acc.Parse(strings.TrimSpace(string(data))); err == nil { tb.Logf("Using account %+v", *acc) return acc } } tb.Skip("Please, place your Pinterest account in account.txt (username: password)") return nil }
func testInit(dir string, t testing.TB) graphdriver.Driver { d, err := Init(dir, nil, nil, nil) if err != nil { if err == graphdriver.ErrNotSupported { t.Skip(err) } else { t.Fatal(err) } } return d }
func (o *postgresOpener) Open(t testing.TB) (*Orm, interface{}) { u, err := user.Current() if err != nil { t.Fatal(err) } exec.Command("dropdb", "gotest").Run() if err := exec.Command("createdb", "gotest").Run(); err != nil { t.Skip("cannot create gotest postgres database, skipping test") } return newOrm(t, fmt.Sprintf("postgres://dbname=gotest user=%v password=%v", u.Username, u.Username), true), nil }
func newLocalhostServer(tb testing.TB) *Client { c, err := net.Dial("tcp", testServer) if err != nil { tb.Skip("skipping test; no server running at %s", testServer) return nil } c.Write([]byte("flush_all\r\n")) c.Close() client, err := New(testServer) if err != nil { tb.Fatal(err) } return client }
func testRun(b testing.TB, decode decodeFunc) { if !fastjpeg.Available() { b.Skip("Skipping benchmark, djpeg unavailable.") } im, _, err := decode(bytes.NewReader(jpegBytes)) if err != nil { b.Fatal(err) } rect := im.Bounds() w, h := 128, 128 im = resize.Resize(im, rect, w, h) err = jpeg.Encode(ioutil.Discard, im, nil) if err != nil { b.Fatal(err) } }
// ensureVersion tries to setup gpgme with a specific version or skip func ensureVersion(t testing.TB, version, msg string) { if isVersion(t, version) { return } for _, bin := range gpgBins { path, err := exec.LookPath(bin) if err != nil { continue } if err := SetEngineInfo(ProtocolOpenPGP, path, testGPGHome); err != nil { continue } if isVersion(t, version) { return } } t.Skip(msg) }
// withTestEnvironment creates a test environment and calls f with it. After f has // returned, the temporary directory is removed. func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) { if !RunIntegrationTest { t.Skip("integration tests disabled") } repository.TestUseLowSecurityKDFParameters(t) tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) env := testEnvironment{ base: tempdir, cache: filepath.Join(tempdir, "cache"), repo: filepath.Join(tempdir, "repo"), testdata: filepath.Join(tempdir, "testdata"), } OK(t, os.MkdirAll(env.testdata, 0700)) OK(t, os.MkdirAll(env.cache, 0700)) OK(t, os.MkdirAll(env.repo, 0700)) gopts := GlobalOptions{ Repo: env.repo, Quiet: true, password: TestPassword, stdout: os.Stdout, stderr: os.Stderr, } // always overwrite global options globalOptions = gopts f(&env, gopts) if !TestCleanupTempDirs { t.Logf("leaving temporary directory %v used for test", tempdir) return } RemoveAll(t, tempdir) }
func newUnixServer(tb testing.TB) (*exec.Cmd, *Client) { sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid()) os.Remove(sock) cmd := exec.Command("memcached", "-s", sock) if err := cmd.Start(); err != nil { tb.Skip("skipping test; couldn't find memcached") return nil, nil } // Wait a bit for the socket to appear. for i := 0; i < 10; i++ { if _, err := os.Stat(sock); err == nil { break } time.Sleep(time.Duration(25*i) * time.Millisecond) } c, err := New(sock) if err != nil { tb.Fatal(err) } return cmd, c }
func setupRedis(tb testing.TB, ttl time.Duration) (redis.Conn, *redigostore.RedigoStore) { pool := getPool() c := pool.Get() if _, err := redis.String(c.Do("PING")); err != nil { c.Close() tb.Skip("redis server not available on localhost port 6379") } if _, err := redis.String(c.Do("SELECT", redisTestDB)); err != nil { c.Close() tb.Fatal(err) } st, err := redigostore.New(pool, redisTestPrefix, redisTestDB) if err != nil { c.Close() tb.Fatal(err) } return c, st }
func chooseInterfaces(tb testing.TB) (*net.TCPAddr, *net.TCPAddr) { ibAddrs := InterfaceAddrs() var addri, addrj net.Addr for i := 0; i < len(ibAddrs); i++ { if iface := InterfaceForAddr(ibAddrs[i]); iface == nil || !iface.Active() { continue } addri = ibAddrs[i] } for j := len(ibAddrs) - 1; j >= 0; j-- { if iface := InterfaceForAddr(ibAddrs[j]); iface == nil || !iface.Active() { continue } addrj = ibAddrs[j] } if addri == nil || addrj == nil { tb.Skip("no interfaces to test with") } laddr := &net.TCPAddr{IP: addri.(*net.IPNet).IP} raddr := &net.TCPAddr{IP: addrj.(*net.IPNet).IP} return laddr, raddr }
// testClient returns a *Client connected to a localy running sftp-server // the *exec.Cmd returned must be defer Wait'd. func testClient(t testing.TB, readonly bool) (*Client, *exec.Cmd) { if !*testIntegration { t.Skip("skipping intergration test") } cmd := exec.Command(*testSftp, "-e", "-R", "-l", debuglevel) // log to stderr, read only if !readonly { cmd = exec.Command(*testSftp, "-e", "-l", debuglevel) // log to stderr } cmd.Stderr = os.Stdout pw, err := cmd.StdinPipe() if err != nil { t.Fatal(err) } pr, err := cmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := cmd.Start(); err != nil { t.Skipf("could not start sftp-server process: %v", err) } sftp, err := NewClientPipe(pr, pw) if err != nil { t.Fatal(err) } if err := sftp.sendInit(); err != nil { defer cmd.Wait() t.Fatal(err) } if err := sftp.recvVersion(); err != nil { defer cmd.Wait() t.Fatal(err) } return sftp, cmd }
// SkipUnlessLocal calls t.Skip if not running against a local cluster. func SkipUnlessLocal(t testing.TB) { if *flagRemote { t.Skip("skipping since not run against local cluster") } }
// SkipUnlessRemote calls t.Skip if not running against a remote cluster. func SkipUnlessRemote(t testing.TB) { if !*flagRemote { t.Skip("skipping since not run against remote cluster") } }
// SkipUnlessPrivileged calls t.Skip if not running with the privileged flag. func SkipUnlessPrivileged(t testing.TB) { if !*flagPrivileged { t.Skip("skipping since not run in privileged mode") } }
func checkDocker(t testing.TB) { if testing.Short() { t.Skip("Skipping docker tests because they take a long time") } }
func testAllPuzzles(t testing.TB) { filename := os.Getenv("SUDOKU_PUZZLE_FILENAME") if len(filename) < 1 { t.Skip("Provide SUDOKU_PUZZLE_FILENAME env to test all puzzles") } file, err := os.Open(filename) if err != nil { t.Error(err) } defer file.Close() scanner := bufio.NewScanner(file) var total, longest time.Duration var count int OUTER_LOOP: for scanner.Scan() { line := scanner.Text() if len(line) != 81 { t.Logf("line length not 81 [%s]", line) continue } in := &bytes.Buffer{} for k, v := range line { if k > 0 { if k%9 == 0 { in.WriteRune('\n') } else { in.WriteRune(' ') } } switch v { case '0', '.', '_': in.WriteRune('_') case '1', '2', '3', '4', '5', '6', '7', '8', '9': in.WriteRune(v) case '\n': // no-op default: t.Logf("Invalid field value [%s]", string(v)) continue OUTER_LOOP } } var puzz puzzle if err := puzz.init(in); err != nil { t.Error(err) continue } before := time.Now() puzz, _ = solve(puzz, ioutil.Discard) duration := time.Now().Sub(before) total += duration if duration > longest { longest = duration } count++ if err := puzz.solved(); err == nil { t.Logf("puzzle solved in %.3f [%s]", duration.Seconds(), line[:81]) } else { t.Errorf("puzzle not solved in %.3f [%s] [%s]", duration.Seconds(), line[:81], err) } } t.Logf("%d puzzles solved in %.3f seconds [%.3f per] [%.3f longest]", count, total.Seconds(), total.Seconds()/float64(count), longest.Seconds()) }