// TestCreateSnapshot creates a snapshot filled with fake data. The // fake data is generated deterministically from the timestamp `at`, which is // also used as the snapshot's timestamp. func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID { fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) snapshot, err := NewSnapshot([]string{fakedir}) if err != nil { t.Fatal(err) } snapshot.Time = at treeID := saveTree(t, repo, at.UnixNano()) snapshot.Tree = &treeID id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) if err != nil { t.Fatal(err) } t.Logf("saved snapshot %v", id.Str()) err = repo.Flush() if err != nil { t.Fatal(err) } err = repo.SaveIndex() if err != nil { t.Fatal(err) } return id }
func loadIDSet(t testing.TB, filename string) restic.BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) return restic.NewBlobSet() } sc := bufio.NewScanner(f) blobs := restic.NewBlobSet() for sc.Scan() { var h restic.BlobHandle err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { t.Errorf("file %v contained invalid blob: %#v", filename, err) continue } blobs.Insert(h) } if err = f.Close(); err != nil { t.Errorf("closing file %v failed with error %v", filename, err) } return blobs }
func cmdKeyRemove(t testing.TB, global GlobalOptions, IDs []string) { cmd := &CmdKey{global: &global} t.Logf("remove %d keys: %q\n", len(IDs), IDs) for _, id := range IDs { OK(t, cmd.Execute([]string{"rm", id})) } }
func ConstructRandomStateDelta( t testing.TB, chaincodeIDPrefix string, numChaincodes int, maxKeySuffix int, numKeysToInsert int, kvSize int) *StateDelta { delta := NewStateDelta() s2 := rand.NewSource(time.Now().UnixNano()) r2 := rand.New(s2) for i := 0; i < numKeysToInsert; i++ { chaincodeID := chaincodeIDPrefix + "_" + strconv.Itoa(r2.Intn(numChaincodes)) key := "key_" + strconv.Itoa(r2.Intn(maxKeySuffix)) valueSize := kvSize - len(key) if valueSize < 1 { panic(fmt.Errorf("valueSize cannot be less than one. ValueSize=%d", valueSize)) } value := testutil.ConstructRandomBytes(t, valueSize) delta.Set(chaincodeID, key, value, nil) } for _, chaincodeDelta := range delta.ChaincodeStateDeltas { sortedKeys := chaincodeDelta.getSortedKeys() smallestKey := sortedKeys[0] largestKey := sortedKeys[len(sortedKeys)-1] t.Logf("chaincode=%s, numKeys=%d, smallestKey=%s, largestKey=%s", chaincodeDelta.ChaincodeID, len(sortedKeys), smallestKey, largestKey) } return delta }
// AssertEqualf verifies that two objects are equals and calls FailNow() to // immediately cancel the test case. // // It must be called from the main goroutine. Other goroutines must call // ExpectEqual* flavors. // // This functions enables specifying an arbitrary string on failure. // // Equality is determined via reflect.DeepEqual(). func AssertEqualf(t testing.TB, expected, actual interface{}, format string, items ...interface{}) { // This is cheezy, as there's no way to figure out if the test was properly // started by the test framework. found := false root := "" for i := 1; ; i++ { if _, file, _, ok := runtime.Caller(i); ok { if filepath.Base(file) == "testing.go" { found = true break } root = file } else { break } } if !found { t.Logf(Decorate("ut.AssertEqual*() function MUST be called from within main test goroutine, use ut.ExpectEqual*() instead; found %s."), root) // TODO(maruel): Warning: this will be enforced soon. //t.Fail() } if !reflect.DeepEqual(actual, expected) { t.Fatalf(Decorate(format), items...) } }
// saveTree saves a tree of fake files in the repo and returns the ID. func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID { rnd := rand.NewSource(seed) numNodes := int(rnd.Int63() % 64) t.Logf("create %v nodes", numNodes) var tree Tree for i := 0; i < numNodes; i++ { seed := rnd.Int63() % maxSeed size := rnd.Int63() % maxFileSize node := &Node{ Name: fmt.Sprintf("file-%v", seed), Type: "file", Mode: 0644, Size: uint64(size), } node.Content = saveFile(t, repo, fakeFile(t, seed, size)) tree.Nodes = append(tree.Nodes, node) } id, err := repo.SaveJSON(pack.Tree, tree) if err != nil { t.Fatal(err) } return id }
func innerTest(client *FlumeClient, t testing.TB) { //header: {businessName=feed, type=list}. //body: 100311 list {"view_self":0,"remoteid":"5445285","timestamp":1403512030,"flume_timestamp":"2014-06-23 16:27:10","business_type":"feed"} body := "{\"view_self\":0,\"remoteid\":\"5445285\",\"timestamp\":1403512030,\"flume_timestamp\":\"2014-06-23 16:27:10\",\"business_type\":\"feed\"}" var demo LogDemo err := json.Unmarshal([]byte(body), &demo) if nil != err { t.Fail() return } data, err := json.Marshal(demo) if nil != err { t.Fail() return } header := make(map[string]string, 2) header["businessName"] = "feed" header["type"] = "list" for i := 0; i < 1; i++ { err := client.Append(header, data) if nil != err { t.Log(err.Error()) t.Fail() } else { t.Logf("%d, send succ ", i) } } }
func innerTest(client *FlumeClient, t testing.TB) { //header: {businessName=feed, type=list}. //body: 100311 list {"view_self":0,"remoteid":"5445285","timestamp":1403512030,"flume_timestamp":"2014-06-23 16:27:10","business_type":"feed"} body := "{\"view_self\":0,\"remoteid\":\"5445285\",\"timestamp\":1403512030,\"flume_timestamp\":\"2014-06-23 16:27:10\",\"business_type\":\"feed\"}" var demo LogDemo err := json.Unmarshal([]byte(body), &demo) if nil != err { t.Fail() return } data, err := json.Marshal(demo) if nil != err { t.Fail() return } event := NewFlumeEvent("feed", "list", data) events := []*flume.ThriftFlumeEvent{event} for i := 0; i < 1; i++ { err := client.AppendBatch(events) err = client.Append(event) if nil != err { t.Log(err.Error()) t.Fail() } else { t.Logf("%d, send succ ", i) } } }
// withTestEnvironment creates a test environment and calls f with it. After f has // returned, the temporary directory is removed. func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) { if !RunIntegrationTest { t.Skip("integration tests disabled") } tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) env := testEnvironment{ base: tempdir, cache: filepath.Join(tempdir, "cache"), repo: filepath.Join(tempdir, "repo"), testdata: filepath.Join(tempdir, "testdata"), } OK(t, os.MkdirAll(env.testdata, 0700)) OK(t, os.MkdirAll(env.cache, 0700)) OK(t, os.MkdirAll(env.repo, 0700)) f(&env, configureRestic(t, env.cache, env.repo)) if !TestCleanup { t.Logf("leaving temporary directory %v used for test", tempdir) return } RemoveAll(t, tempdir) }
func testRunInit(t testing.TB, opts GlobalOptions) { repository.TestUseLowSecurityKDFParameters(t) restic.TestSetLockTimeout(t, 0) OK(t, runInit(opts, nil)) t.Logf("repository initialized at %v", opts.Repo) }
func AssertPanic(t testing.TB, msg string) { x := recover() if x == nil { t.Fatal(msg) } else { t.Logf("A panic was caught successfully. Actual msg = %s", x) } }
func cleanupTempdir(t testing.TB, tempdir string) { if !TestCleanup { t.Logf("leaving temporary directory %v used for test", tempdir) return } RemoveAll(t, tempdir) }
func fatal(tb testing.TB, userMsgAndArgs []interface{}, msgFmt string, msgArgs ...interface{}) { logMessage(tb, userMsgAndArgs) _, file, line, ok := runtime.Caller(2) if ok { tb.Logf("%s:%d", file, line) } tb.Fatalf(msgFmt, msgArgs...) }
func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID backend.ID) { cmd := &CmdBackup{global: &global} cmd.Parent = parentID.String() t.Logf("backing up %v", target) OK(t, cmd.Execute(target)) }
func logMessage(tb testing.TB, msgAndArgs ...interface{}) { if len(msgAndArgs) == 1 { tb.Logf(msgAndArgs[0].(string)) } if len(msgAndArgs) > 1 { tb.Logf(msgAndArgs[0].(string), msgAndArgs[1:]...) } }
func testQueryTypes(t testing.TB, types []indexType, fn func(*queryTest)) { defer test.TLog(t)() for _, it := range types { if *queryType == "" || *queryType == it.String() { t.Logf("Testing: --querytype=%s ...", it) testQueryType(t, fn, it) } } }
func AssertEquals(t testing.TB, actual interface{}, expected interface{}) { t.Logf("%s: AssertEquals [%#v] and [%#v]", getCallerInfo(), actual, expected) if expected == nil && isNil(actual) { return } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Values are not equal.\n Actual=[%#v], \n Expected=[%#v]\n %s", actual, expected, getCallerInfo()) } }
// MountedT mounts the filesystem at a temporary directory, // directing it's debug log to the testing logger. // // See Mounted for usage. // // The debug log is not enabled by default. Use `-fuse.debug` or call // DebugByDefault to enable. func MountedT(t testing.TB, filesys fs.FS, options ...fuse.MountOption) (*Mount, error) { conf := &fs.Config{} if debug { conf.Debug = func(msg interface{}) { t.Logf("FUSE: %s", msg) } } return Mounted(filesys, conf, options...) }
func TeardownRepo(t testing.TB, repo *repository.Repository) { if !TestCleanup { l := repo.Backend().(*local.Local) t.Logf("leaving local backend at %s\n", l.Location()) return } OK(t, repo.Delete()) }
func showStack(t testing.TB) { pc := make([]uintptr, 10) n := runtime.Callers(4, pc) for i := 0; i < n; i++ { f := runtime.FuncForPC(pc[i]) file, line := f.FileLine(pc[i]) t.Logf(" %s (%s line %d", f.Name(), file, line) } }
// CleanDB This method closes existing db, remove the db dir. // Can be called before starting a test so that data from other tests does not interfere func (testDB *TestDBWrapper) CleanDB(t testing.TB) { // cleaning up test db here so that each test does not have to call it explicitly // at the end of the test testDB.cleanup() testDB.removeDBPath() t.Logf("Creating testDB") testDB.performCleanup = true }
// makeTmpDir creates a temporary directory underneath baseTempDir. func makeTmpDir(t testing.TB, suffix string) string { dir, err := ioutil.TempDir(baseTempDir, suffix) if err != nil { t.Fatal(err) } if *logTmpDirs { t.Logf("Using temp dir %s", dir) } return dir }
func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) { cmd := &CmdBackup{global: &global, Excludes: excludes} if parentID != nil { cmd.Parent = parentID.String() } t.Logf("backing up %v", target) OK(t, cmd.Execute(target)) }
func archiveDirectory(b testing.TB) { repo, cleanup := repository.TestRepository(b) defer cleanup() arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, nil) OK(b, err) b.Logf("snapshot archived as %v", id) }
// convenience method for creating two switches connected to each other. func makeSwitchPair(t testing.TB, initSwitch func(*Switch) *Switch) (*Switch, *Switch) { s1PrivKey := crypto.GenPrivKeyEd25519() s2PrivKey := crypto.GenPrivKeyEd25519() // Create two switches that will be interconnected. s1 := initSwitch(NewSwitch()) s1.SetNodeInfo(&NodeInfo{ PubKey: s1PrivKey.PubKey().(crypto.PubKeyEd25519), Moniker: "switch1", Network: "testing", Version: "123.123.123", }) s1.SetNodePrivKey(s1PrivKey) s2 := initSwitch(NewSwitch()) s2.SetNodeInfo(&NodeInfo{ PubKey: s2PrivKey.PubKey().(crypto.PubKeyEd25519), Moniker: "switch2", Network: "testing", Version: "123.123.123", }) s2.SetNodePrivKey(s2PrivKey) // Start switches and reactors s1.Start() s2.Start() // Create a listener for s1 l := NewDefaultListener("tcp", ":8001", true) // Dial the listener & add the connection to s2. lAddr := l.ExternalAddress() connOut, err := lAddr.Dial() if err != nil { t.Fatalf("Could not connect to listener address %v", lAddr) } else { t.Logf("Created a connection to listener address %v", lAddr) } connIn, ok := <-l.Connections() if !ok { t.Fatalf("Could not get inbound connection from listener") } go s1.AddPeerWithConnection(connIn, false) // AddPeer is blocking, requires handshake. s2.AddPeerWithConnection(connOut, true) // Wait for things to happen, peers to get added... time.Sleep(100 * time.Millisecond) // Close the server, no longer needed. l.Stop() return s1, s2 }
func parseAccount(tb testing.TB) *Account { acc := &Account{} if data, err := ioutil.ReadFile("account.txt"); err == nil { if err := acc.Parse(strings.TrimSpace(string(data))); err == nil { tb.Logf("Using account %+v", *acc) return acc } } tb.Skip("Please, place your Pinterest account in account.txt (username: password)") return nil }
func archiveDirectory(b testing.TB) { repo := SetupRepo() defer TeardownRepo(repo) arch := restic.NewArchiver(repo) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) OK(b, err) b.Logf("snapshot archived as %v", id) }
// MountedT mounts the filesystem at a temporary directory, // directing it's debug log to the testing logger. // // See Mounted for usage. // // The debug log is not enabled by default. Use `-fuse.debug` or call // DebugByDefault to enable. func MountedT(t testing.TB, filesys fs.FS, options ...fuse.MountOption) (*Mount, error) { srv := &fs.Server{ FS: filesys, } if debug { srv.Debug = func(msg interface{}) { t.Logf("FUSE: %s", msg) } } return Mounted(srv, options...) }
// MountedFuncT mounts a filesystem at a temporary directory, // directing it's debug log to the testing logger. // // See MountedFunc for usage. // // The debug log is not enabled by default. Use `-fuse.debug` or call // DebugByDefault to enable. func MountedFuncT(t testing.TB, fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { if conf == nil { conf = &fs.Config{} } if debug && conf.Debug == nil { conf.Debug = func(msg interface{}) { t.Logf("FUSE: %s", msg) } } return MountedFunc(fn, conf, options...) }
func readFile(t testing.TB, filename string) []byte { t.Logf("opening file %s", filename) p := filepath.Join("testdata", filepath.FromSlash(filename)) data, err := ioutil.ReadFile(p) if err != nil { if os.IsNotExist(err) { t.Skipf("missing file %s", p) } t.Fatal(err) } return data }