Esempio n. 1
0
func (tester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) {
	var cleanup []func()
	defer func() {
		for _, fn := range cleanup {
			fn()
		}
	}()

	initIndex := func() *index.Index {
		td, err := ioutil.TempDir("", "kvfile-test")
		if err != nil {
			t.Fatal(err)
		}
		is, closer, err := kvfile.NewStorage(filepath.Join(td, "kvfile"))
		if err != nil {
			os.RemoveAll(td)
			t.Fatal(err)
		}
		cleanup = append(cleanup, func() {
			closer.Close()
			os.RemoveAll(td)
		})
		return index.New(is)
	}

	tfn(t, initIndex)
}
Esempio n. 2
0
func indexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
	is := &indexStorage{}
	var (
		blobPrefix = config.RequiredString("blobSource")
		ns         = config.OptionalString("namespace", "")
	)
	if err := config.Validate(); err != nil {
		return nil, err
	}
	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		return nil, err
	}
	is.ns, err = sanitizeNamespace(ns)
	if err != nil {
		return nil, err
	}

	ix, err := index.New(is)
	if err != nil {
		return nil, err
	}
	ix.BlobSource = sto
	ix.KeyFetcher = ix.BlobSource // TODO(bradfitz): global search? something else?
	return ix, nil
}
Esempio n. 3
0
func BenchmarkCorpusFromStorage(b *testing.B) {
	defer test.TLog(b)()
	buildKvOnce.Do(func() {
		kvForBenchmark = sorted.NewMemoryKeyValue()
		idx := index.New(kvForBenchmark)
		id := indextest.NewIndexDeps(idx)
		id.Fataler = b
		for i := 0; i < 10; i++ {
			fileRef, _ := id.UploadFile("file.txt", fmt.Sprintf("some file %d", i), time.Unix(1382073153, 0))
			pn := id.NewPlannedPermanode(fmt.Sprint(i))
			id.SetAttribute(pn, "camliContent", fileRef.String())
		}
	})
	defer index.SetVerboseCorpusLogging(true)
	index.SetVerboseCorpusLogging(false)

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		_, err := index.NewCorpusFromStorage(kvForBenchmark)
		if err != nil {
			b.Fatal(err)
		}
	}
}
Esempio n. 4
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	blobPrefix := config.RequiredString("blobSource")
	postgresConf, err := postgres.ConfigFromJSON(config)
	if err != nil {
		return nil, err
	}
	kv, err := postgres.NewKeyValue(postgresConf)
	if err != nil {
		return nil, err
	}

	ix, err := index.New(kv)
	if err != nil {
		return nil, err
	}

	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		ix.Close()
		return nil, err
	}
	ix.BlobSource = sto
	// Good enough, for now:
	ix.KeyFetcher = ix.BlobSource

	return ix, nil
}
Esempio n. 5
0
func reindex(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("reindexing")
	if err := os.RemoveAll(dbfile); err != nil {
		b.Fatal(err)
	}
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)

	b.ResetTimer()
	if err := idx.Reindex(); err != nil {
		b.Fatal(err)
	}
	return idx
}
Esempio n. 6
0
func makeIndex() *index.Index {
	dbname := "camlitest_" + os.Getenv("USER")
	closeAllSessions(dbname)
	do(rootdb, "DROP DATABASE IF EXISTS "+dbname)
	do(rootdb, "CREATE DATABASE "+dbname)
	var err error

	testdb, err = sql.Open("postgres", "user=postgres password=postgres host=localhost sslmode=require dbname="+dbname)
	if err != nil {
		panic("opening test database: " + err.Error())
	}
	for _, tableSql := range postgres.SQLCreateTables() {
		do(testdb, tableSql)
	}
	for _, statement := range postgres.SQLDefineReplace() {
		do(testdb, statement)
	}

	doQuery(testdb, fmt.Sprintf(`SELECT replaceintometa('version', '%d')`, postgres.SchemaVersion()))
	s, err := postgres.NewStorage("localhost", "postgres", "postgres", dbname, "require")
	if err != nil {
		panic(err)
	}
	return index.New(s)
}
Esempio n. 7
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	blobPrefix := config.RequiredString("blobSource")
	file := config.RequiredString("file")
	if err := config.Validate(); err != nil {
		return nil, err
	}

	is, closer, err := NewStorage(file)
	if err != nil {
		return nil, err
	}

	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		closer.Close()
		return nil, err
	}

	ix := index.New(is)
	if err != nil {
		return nil, err
	}
	ix.BlobSource = sto

	// Good enough, for now:
	ix.KeyFetcher = ix.BlobSource

	return ix, err
}
Esempio n. 8
0
func (sqliteTester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) {
	once.Do(checkDB)
	f, err := ioutil.TempFile("", "sqlite-test")
	if err != nil {
		t.Fatal(err)
	}
	defer os.Remove(f.Name())
	makeIndex := func() *index.Index {
		db, err := sql.Open("sqlite3", f.Name())
		if err != nil {
			t.Fatalf("opening test database: %v", err)
			return nil
		}
		for _, tableSql := range sqlite.SQLCreateTables() {
			do(db, tableSql)
		}
		do(db, fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, sqlite.SchemaVersion()))
		s, err := sqlite.NewStorage(f.Name())
		if err != nil {
			panic(err)
		}
		return index.New(s)
	}
	tfn(t, makeIndex)
}
Esempio n. 9
0
// MustNew wraps index.New and fails with a Fatal error on t if New
// returns an error.
func MustNew(t *testing.T, s sorted.KeyValue) *index.Index {
	ix, err := index.New(s)
	if err != nil {
		t.Fatalf("Error creating index: %v", err)
	}
	return ix
}
Esempio n. 10
0
// killReindex starts a reindexing in a new process, and kills that process
// after killTime. It then (naively for now ?) verifies that the kv store file is
// not corrupted by reinitializing an (possibly incomplete) index (with a corpus)
// with it. If the indexing was completed before we could kill the process, it
// returns true, false otherwise.
func killReindex(b *testing.B, dbfile string, killTime time.Duration,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) bool {
	cmd := exec.Command(os.Args[0], "-test.run=TestChildIndexer")
	cmd.Env = append(cmd.Env, "TEST_BE_CHILD=1", "TEST_BE_CHILD_DBFILE="+dbfile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout
	cmd.Stderr = &stderr
	if err := cmd.Start(); err != nil {
		b.Fatal(err)
	}

	waitc := make(chan error)
	go func() {
		waitc <- cmd.Wait()
	}()
	fullIndex := false
	select {
	case err := <-waitc:
		if err == nil {
			// indexer finished before we killed it
			fullIndex = true
			b.Logf("Finished indexing before being killed at %v", killTime)
			break
		}
		// TODO(mpl): do better
		if err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	case <-time.After(killTime):
		if err := cmd.Process.Kill(); err != nil {
			b.Fatal(err)
		}
		err := <-waitc
		// TODO(mpl): do better
		if err != nil && err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	}

	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	if _, err := idx.KeepInMemory(); err != nil {
		b.Fatal(err)
	}
	if err := idx.Close(); err != nil {
		b.Fatal(err)
	}
	return fullIndex
}
Esempio n. 11
0
func enumerateMeta(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) int {
	b.Logf("enumerating meta blobs")
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	defer idx.Close()

	ch := make(chan camtypes.BlobMeta, 100)
	go func() {
		if err := idx.EnumerateBlobMeta(nil, ch); err != nil {
			b.Fatal(err)
		}
	}()
	n := 0
	for range ch {
		n++
	}
	b.Logf("Enumerated %d meta blobs", n)
	return n
}
Esempio n. 12
0
func newMongoIndex(mgw *MongoWrapper) (*index.Index, error) {
	db, err := mgw.getCollection()
	if err != nil {
		return nil, err
	}
	mongoStorage := &mongoKeys{db: db}
	return index.New(mongoStorage), nil
}
Esempio n. 13
0
func TestIndexingClaimMissingPubkey(t *testing.T) {
	s := sorted.NewMemoryKeyValue()
	idx, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}

	id := indextest.NewIndexDeps(idx)
	id.Fataler = t

	goodKeyFetcher := id.Index.KeyFetcher
	emptyFetcher := new(test.Fetcher)

	pn := id.NewPermanode()

	// Prevent the index from being able to find the public key:
	idx.KeyFetcher = emptyFetcher

	// This previous failed to upload, since the signer's public key was
	// unavailable.
	claimRef := id.SetAttribute(pn, "tag", "foo")

	t.Logf(" Claim is %v", claimRef)
	t.Logf("Signer is %v", id.SignerBlobRef)

	// Verify that populateClaim noted the missing public key blob:
	{
		key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef)
		if got, err := s.Get(key); got == "" || err != nil {
			t.Errorf("key %q missing (err: %v); want 1", key, err)
		}
	}

	// Now make it available again:
	idx.KeyFetcher = idx.Exp_BlobSource()

	if err := copyBlob(id.SignerBlobRef, idx.Exp_BlobSource().(*test.Fetcher), goodKeyFetcher); err != nil {
		t.Errorf("Error copying public key to BlobSource: %v", err)
	}
	if err := copyBlob(id.SignerBlobRef, idx, goodKeyFetcher); err != nil {
		t.Errorf("Error uploading public key to indexer: %v", err)
	}

	idx.Exp_AwaitReindexing(t)

	// Verify that populateClaim noted the missing public key blob:
	{
		key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef)
		if got, err := s.Get(key); got != "" || err == nil {
			t.Errorf("row %q still exists", key)
		}
	}
}
Esempio n. 14
0
func TestOutOfOrderIndexing(t *testing.T) {
	tf := new(test.Fetcher)
	s := sorted.NewMemoryKeyValue()

	ix, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	ix.BlobSource = tf

	t.Logf("file ref = %v", fileBlobRef)
	t.Logf("missing data chunks = %v, %v, %v", chunk1ref, chunk2ref, chunk3ref)

	add := func(b *test.Blob) {
		tf.AddBlob(b)
		if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
			t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
		}
	}

	add(fileBlob)

	{
		key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk1ref)
		if got, err := s.Get(key); got == "" || err != nil {
			t.Errorf("key %q missing (err: %v); want 1", key, err)
		}
	}

	add(chunk1)
	add(chunk2)

	ix.Exp_AwaitReindexing(t)

	{
		key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk3ref)
		if got, err := s.Get(key); got == "" || err != nil {
			t.Errorf("key %q missing (err: %v); want 1", key, err)
		}
	}

	add(chunk3)

	ix.Exp_AwaitReindexing(t)

	foreachSorted(t, s, func(k, v string) {
		if strings.HasPrefix(k, "missing|") {
			t.Errorf("Shouldn't have missing key: %q", k)
		}
	})
}
Esempio n. 15
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	var (
		blobPrefix = config.RequiredString("blobSource")
		host       = config.OptionalString("host", "localhost")
		user       = config.RequiredString("user")
		password   = config.OptionalString("password", "")
		database   = config.RequiredString("database")
	)
	if err := config.Validate(); err != nil {
		return nil, err
	}
	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		return nil, err
	}
	isto, err := NewStorage(host, user, password, database)
	if err != nil {
		return nil, err
	}
	is := isto.(*myIndexStorage)
	if err := is.ping(); err != nil {
		return nil, err
	}

	version, err := is.SchemaVersion()
	if err != nil {
		return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err)
	}
	if version != requiredSchemaVersion {
		if version == 20 && requiredSchemaVersion == 21 {
			fmt.Fprintf(os.Stderr, fixSchema20to21)
		}
		if os.Getenv("CAMLI_DEV_CAMLI_ROOT") != "" {
			// Good signal that we're using the devcam server, so help out
			// the user with a more useful tip:
			return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion)
		}
		return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)",
			version, requiredSchemaVersion)
	}

	ix := index.New(is)
	ix.BlobSource = sto
	// Good enough, for now:
	ix.KeyFetcher = ix.BlobSource

	return ix, nil
}
Esempio n. 16
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	var (
		blobPrefix = config.RequiredString("blobSource")
		file       = config.RequiredString("file")
	)
	if err := config.Validate(); err != nil {
		return nil, err
	}
	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		return nil, err
	}
	fi, err := os.Stat(file)
	if os.IsNotExist(err) || (err == nil && fi.Size() == 0) {
		return nil, fmt.Errorf(`You need to initialize your SQLite index database with: camtool dbinit --dbname=%s --dbtype=sqlite`, file)
	}
	isto, err := NewStorage(file)
	if err != nil {
		return nil, err
	}
	is := isto.(*storage)

	version, err := is.SchemaVersion()
	if err != nil {
		return nil, fmt.Errorf("error getting schema version (need to init database with 'camtool dbinit %s'?): %v", file, err)
	}

	if err := is.ping(); err != nil {
		return nil, err
	}

	if version != requiredSchemaVersion {
		if os.Getenv("CAMLI_ADVERTISED_PASSWORD") != "" {
			// Good signal that we're using the dev-server script, so help out
			// the user with a more useful tip:
			return nil, fmt.Errorf("database schema version is %d; expect %d (run \"./dev-server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion)
		}
		return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)",
			version, requiredSchemaVersion)
	}

	ix := index.New(is)
	ix.BlobSource = sto
	// Good enough, for now:
	ix.KeyFetcher = ix.BlobSource

	return ix, nil
}
Esempio n. 17
0
func (tester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) {
	defer test.TLog(t)()
	var cleanups []func()
	defer func() {
		for _, fn := range cleanups {
			fn()
		}
	}()

	initIndex := func() *index.Index {
		kv, cleanup := newSorted(t)
		cleanups = append(cleanups, cleanup)
		return index.New(kv)
	}

	tfn(t, initIndex)
}
Esempio n. 18
0
func (postgresTester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) {
	var mu sync.Mutex // guards cleanups
	var cleanups []func()
	defer func() {
		mu.Lock() // never unlocked
		for _, fn := range cleanups {
			fn()
		}
	}()
	makeIndex := func() *index.Index {
		s, cleanup := newSorted(t)
		mu.Lock()
		cleanups = append(cleanups, cleanup)
		mu.Unlock()
		return index.New(s)
	}
	tfn(t, makeIndex)
}
Esempio n. 19
0
func TestChildIndexer(t *testing.T) {
	if os.Getenv("TEST_BE_CHILD") != "1" {
		t.Skip("not a real test; used as a child process by the benchmarks")
	}
	dbfile := os.Getenv("TEST_BE_CHILD_DBFILE")
	if dbfile == "" {
		log.Fatal("empty TEST_BE_CHILD_DBFILE")
	}
	if err := os.RemoveAll(dbfile); err != nil {
		log.Fatal(err)
	}
	var kv sorted.KeyValue
	var err error
	switch {
	case strings.HasSuffix(dbfile, "leveldb.db"):
		kv, err = leveldb.NewStorage(dbfile)
	case strings.HasSuffix(dbfile, "kvfile.db"):
		kv, err = kvfile.NewStorage(dbfile)
	case strings.HasSuffix(dbfile, "sqlite.db"):
		kv, err = sqlite.NewStorage(dbfile)
	default:
		log.Fatalf("unknown sorted provider for %v", dbfile)
	}
	if err != nil {
		log.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		log.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		log.Fatal(err)
	}
	idx.InitBlobSource(bs)
	defer func() {
		if err := idx.Close(); err != nil {
			log.Fatal(err)
		}
	}()
	if err := idx.Reindex(); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 20
0
func main() {
	flag.Parse()

	if len(*blobDir)*len(*indexDir) == 0 {
		flag.Usage()
		return
	}
	s, err := dir.New(*blobDir)
	if err != nil {
		log.Fatal(err)
	}
	src, ok := s.(blobserver.BlobStreamer)
	if !ok {
		log.Fatalf("%v is not a BlobStreamer", s)
	}

	db, err := leveldb.NewStorage(*indexDir)
	if err != nil {
		log.Fatal(err)
	}
	defer db.Close()
	dst, err := index.New(db)
	if err != nil {
		log.Fatal(err)
	}

	fe := FetcherEnumerator{
		FetcherEnumerator: s,
		c1:                make(map[string]*blob.Blob),
		start:             time.Now(),
	}
	dst.InitBlobSource(&fe)

	ch := make(chan blobserver.BlobAndToken)
	go fe.PrintStats()
	for i := 0; i < *parallel; i++ {
		go fe.Index(ch, dst)
	}
	ctx := context.New()
	if err := src.StreamBlobs(ctx, ch, *streamStart); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 21
0
func (mongoTester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) {
	defer test.TLog(t)()
	var mu sync.Mutex // guards cleanups
	var cleanups []func()
	defer func() {
		mu.Lock() // never unlocked
		for _, fn := range cleanups {
			fn()
		}
	}()
	initIndex := func() *index.Index {
		kv, cleanup := newSorted(t)
		mu.Lock()
		cleanups = append(cleanups, cleanup)
		mu.Unlock()
		return index.New(kv)
	}
	tfn(t, initIndex)
}
Esempio n. 22
0
func makeIndex() *index.Index {
	dbname := "camlitest_" + osutil.Username()
	do(rootdb, "DROP DATABASE IF EXISTS "+dbname)
	do(rootdb, "CREATE DATABASE "+dbname)

	db, err := sql.Open("mymysql", dbname+"/root/root")
	if err != nil {
		panic("opening test database: " + err.Error())
	}
	for _, tableSql := range mysql.SQLCreateTables() {
		do(db, tableSql)
	}

	do(db, fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, mysql.SchemaVersion()))
	s, err := mysql.NewStorage("localhost", "root", "root", dbname)
	if err != nil {
		panic(err)
	}
	return index.New(s)
}
Esempio n. 23
0
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("populating %v", dbfile)
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
	if err := os.MkdirAll(bsRoot, 0700); err != nil {
		b.Fatal(err)
	}
	dataDir, err := os.Open("testdata")
	if err != nil {
		b.Fatal(err)
	}
	fis, err := dataDir.Readdir(-1)
	if err != nil {
		b.Fatal(err)
	}
	if len(fis) == 0 {
		b.Fatalf("no files in %s dir", "testdata")
	}

	ks := doKeyStuff(b)

	bs, err := localdisk.New(bsRoot)
	if err != nil {
		b.Fatal(err)
	}
	if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())

	b.ResetTimer()
	for _, v := range fis {
		f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
		if err != nil {
			b.Fatal(err)
		}
		td := &trackDigestReader{r: f}
		fm := schema.NewFileMap(v.Name())
		fm.SetModTime(v.ModTime())
		fileRef, err := schema.WriteFileMap(bs, fm, td)
		if err != nil {
			b.Fatal(err)
		}
		f.Close()

		unsigned := schema.NewPlannedPermanode(td.Sum())
		unsigned.SetSigner(ks.pubKeyRef)
		sr := &jsonsign.SignRequest{
			UnsignedJSON: unsigned.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: time.Unix(0, 0),
		}
		signed, err := sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		pn := blob.SHA1FromString(signed)
		// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
		if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}

		contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
		claimTime, ok := fm.ModTime()
		if !ok {
			b.Fatal(err)
		}
		contentAttr.SetClaimDate(claimTime)
		contentAttr.SetSigner(ks.pubKeyRef)
		sr = &jsonsign.SignRequest{
			UnsignedJSON: contentAttr.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: claimTime,
		}
		signed, err = sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		cl := blob.SHA1FromString(signed)
		if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}
	}
	sh.IdleWait()

	return idx
}
Esempio n. 24
0
func TestInitNeededMaps(t *testing.T) {
	s := sorted.NewMemoryKeyValue()

	// Start unknowning that the data chunks are all gone:
	s.Set("schemaversion", fmt.Sprint(index.Exp_schemaVersion()))
	s.Set(index.Exp_missingKey(fileBlobRef, chunk1ref), "1")
	s.Set(index.Exp_missingKey(fileBlobRef, chunk2ref), "1")
	s.Set(index.Exp_missingKey(fileBlobRef, chunk3ref), "1")
	ix, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	{
		needs, neededBy, _ := ix.NeededMapsForTest()
		needsWant := map[blob.Ref][]blob.Ref{
			fileBlobRef: []blob.Ref{chunk1ref, chunk2ref, chunk3ref},
		}
		neededByWant := map[blob.Ref][]blob.Ref{
			chunk1ref: []blob.Ref{fileBlobRef},
			chunk2ref: []blob.Ref{fileBlobRef},
			chunk3ref: []blob.Ref{fileBlobRef},
		}
		if !reflect.DeepEqual(needs, needsWant) {
			t.Errorf("needs = %v; want %v", needs, needsWant)
		}
		if !reflect.DeepEqual(neededBy, neededByWant) {
			t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
		}
	}

	ix.Exp_noteBlobIndexed(chunk2ref)

	{
		needs, neededBy, ready := ix.NeededMapsForTest()
		needsWant := map[blob.Ref][]blob.Ref{
			fileBlobRef: []blob.Ref{chunk1ref, chunk3ref},
		}
		neededByWant := map[blob.Ref][]blob.Ref{
			chunk1ref: []blob.Ref{fileBlobRef},
			chunk3ref: []blob.Ref{fileBlobRef},
		}
		if !reflect.DeepEqual(needs, needsWant) {
			t.Errorf("needs = %v; want %v", needs, needsWant)
		}
		if !reflect.DeepEqual(neededBy, neededByWant) {
			t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
		}
		if len(ready) != 0 {
			t.Errorf("ready = %v; want nothing", ready)
		}
	}

	ix.Exp_noteBlobIndexed(chunk1ref)

	{
		needs, neededBy, ready := ix.NeededMapsForTest()
		needsWant := map[blob.Ref][]blob.Ref{
			fileBlobRef: []blob.Ref{chunk3ref},
		}
		neededByWant := map[blob.Ref][]blob.Ref{
			chunk3ref: []blob.Ref{fileBlobRef},
		}
		if !reflect.DeepEqual(needs, needsWant) {
			t.Errorf("needs = %v; want %v", needs, needsWant)
		}
		if !reflect.DeepEqual(neededBy, neededByWant) {
			t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
		}
		if len(ready) != 0 {
			t.Errorf("ready = %v; want nothing", ready)
		}
	}

	ix.Exp_noteBlobIndexed(chunk3ref)

	{
		needs, neededBy, ready := ix.NeededMapsForTest()
		needsWant := map[blob.Ref][]blob.Ref{}
		neededByWant := map[blob.Ref][]blob.Ref{}
		if !reflect.DeepEqual(needs, needsWant) {
			t.Errorf("needs = %v; want %v", needs, needsWant)
		}
		if !reflect.DeepEqual(neededBy, neededByWant) {
			t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
		}
		if !ready[fileBlobRef] {
			t.Error("fileBlobRef not ready")
		}
	}
	dumpSorted(t, s)
}
Esempio n. 25
0
// tests that we add the missing wholeRef entries in FileInfo rows when going from
// a version 4 to a version 5 index.
func TestFixMissingWholeref(t *testing.T) {
	tf := new(test.Fetcher)
	s := sorted.NewMemoryKeyValue()

	ix, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	ix.InitBlobSource(tf)

	// populate with a file
	add := func(b *test.Blob) {
		tf.AddBlob(b)
		if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
			t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
		}
	}
	add(chunk1)
	add(chunk2)
	add(chunk3)
	add(fileBlob)

	// revert the row to the old form, by stripping the wholeRef suffix
	key := "fileinfo|" + fileBlobRef.String()
	val5, err := s.Get(key)
	if err != nil {
		t.Fatalf("could not get %v: %v", key, err)
	}
	parts := strings.SplitN(val5, "|", 4)
	val4 := strings.Join(parts[:3], "|")
	if err := s.Set(key, val4); err != nil {
		t.Fatalf("could not set (%v, %v): %v", key, val4, err)
	}

	// revert index version at 4 to trigger the fix
	if err := s.Set("schemaversion", "4"); err != nil {
		t.Fatal(err)
	}

	// init broken index
	ix, err = index.New(s)
	if err != index.Exp_ErrMissingWholeRef {
		t.Fatalf("wrong error upon index initialization: got %v, wanted %v", err, index.Exp_ErrMissingWholeRef)
	}
	// and fix it
	if err := ix.Exp_FixMissingWholeRef(tf); err != nil {
		t.Fatal(err)
	}

	// init fixed index
	ix, err = index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	// and check that the value is now actually fixed
	fi, err := ix.GetFileInfo(fileBlobRef)
	if err != nil {
		t.Fatal(err)
	}
	if fi.WholeRef.String() != parts[3] {
		t.Fatalf("index fileInfo wholeref was not fixed: got %q, wanted %v", fi.WholeRef, parts[3])
	}
}