Ejemplo n.º 1
0
func TestArchiver(t *testing.T) {
	src := new(test.Fetcher)
	blobHello := &test.Blob{Contents: "Hello"}
	blobWorld := &test.Blob{Contents: "World" + strings.Repeat("!", 1024)}

	golden := map[blob.Ref]string{
		blobHello.BlobRef(): blobHello.Contents,
		blobWorld.BlobRef(): blobWorld.Contents,
	}

	a := &Archiver{
		Source:                 src,
		DeleteSourceAfterStore: true,
	}

	src.AddBlob(blobHello)
	a.Store = func([]byte, []blob.SizedRef) error {
		return errors.New("Store shouldn't be called")
	}
	a.MinZipSize = 400 // empirically: the zip will be 416 bytes
	if err := a.RunOnce(); err != ErrSourceTooSmall {
		t.Fatalf("RunOnce with just Hello = %v; want ErrSourceTooSmall", err)
	}

	src.AddBlob(blobWorld)
	var zipData []byte
	var inZip []blob.SizedRef
	a.Store = func(zip []byte, brs []blob.SizedRef) error {
		zipData = zip
		inZip = brs
		return nil
	}
	if err := a.RunOnce(); err != nil {
		t.Fatalf("RunOnce with Hello and World = %v", err)
	}
	if zipData == nil {
		t.Error("no zip data stored")
	}
	if len(src.BlobrefStrings()) != 0 {
		t.Errorf("source still has blobs = %q; want none", src.BlobrefStrings)
	}
	if len(inZip) != 2 {
		t.Errorf("expected 2 blobs reported as in zip to Store; got %v", inZip)
	}

	got := map[blob.Ref]string{}
	if err := foreachZipEntry(zipData, func(br blob.Ref, all []byte) {
		got[br] = string(all)
	}); err != nil {
		t.Fatal(err)
	}
	if !reflect.DeepEqual(golden, got) {
		t.Errorf("zip contents didn't match. got: %v; want %v", got, golden)
	}
}
Ejemplo n.º 2
0
func TestOutOfOrderIndexing(t *testing.T) {
	tf := new(test.Fetcher)
	s := sorted.NewMemoryKeyValue()

	ix, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	ix.BlobSource = tf

	t.Logf("file ref = %v", fileBlobRef)
	t.Logf("missing data chunks = %v, %v, %v", chunk1ref, chunk2ref, chunk3ref)

	add := func(b *test.Blob) {
		tf.AddBlob(b)
		if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
			t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
		}
	}

	add(fileBlob)

	{
		key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk1ref)
		if got, err := s.Get(key); got == "" || err != nil {
			t.Errorf("key %q missing (err: %v); want 1", key, err)
		}
	}

	add(chunk1)
	add(chunk2)

	ix.Exp_AwaitReindexing(t)

	{
		key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk3ref)
		if got, err := s.Get(key); got == "" || err != nil {
			t.Errorf("key %q missing (err: %v); want 1", key, err)
		}
	}

	add(chunk3)

	ix.Exp_AwaitReindexing(t)

	foreachSorted(t, s, func(k, v string) {
		if strings.HasPrefix(k, "missing|") {
			t.Errorf("Shouldn't have missing key: %q", k)
		}
	})
}
Ejemplo n.º 3
0
// Tests a bunch of rounds on a bunch of data.
func TestArchiverStress(t *testing.T) {
	if testing.Short() {
		t.Skip("Skipping in short mode")
	}
	src := new(test.Fetcher)
	fileRef, err := schema.WriteFileFromReader(src, "random", io.LimitReader(randReader{}, 10<<20))
	if err != nil {
		t.Fatal(err)
	}
	n0 := src.NumBlobs()
	t.Logf("Wrote %v in %d blobs", fileRef, n0)

	refs0 := src.BlobrefStrings()

	var zips [][]byte
	archived := map[blob.Ref]bool{}
	a := &Archiver{
		Source:                 src,
		MinZipSize:             1 << 20,
		DeleteSourceAfterStore: true,
		Store: func(zipd []byte, brs []blob.SizedRef) error {
			zips = append(zips, zipd)
			for _, sbr := range brs {
				if archived[sbr.Ref] {
					t.Error("duplicate archive of %v", sbr.Ref)
				}
				archived[sbr.Ref] = true
			}
			return nil
		},
	}
	for {
		err := a.RunOnce()
		if err == ErrSourceTooSmall {
			break
		}
		if err != nil {
			t.Fatal(err)
		}
	}

	if len(archived) == 0 {
		t.Errorf("unexpected small number of archived blobs = %d", len(archived))
	}
	if len(zips) < 2 {
		t.Errorf("unexpected small number of zip files = %d", len(zips))
	}
	if n1 := src.NumBlobs() + len(archived); n0 != n1 {
		t.Errorf("original %d blobs != %d after + %d archived (%d)", n0, src.NumBlobs(), len(archived), n1)
	}

	// And restore:
	for _, zipd := range zips {
		if err := foreachZipEntry(zipd, func(br blob.Ref, contents []byte) {
			tb := &test.Blob{Contents: string(contents)}
			if tb.BlobRef() != br {
				t.Fatal("corrupt zip callback")
			}
			src.AddBlob(tb)
		}); err != nil {
			t.Fatal(err)
		}
	}

	refs1 := src.BlobrefStrings()
	if !reflect.DeepEqual(refs0, refs1) {
		t.Error("Restore error.")
	}
}
Ejemplo n.º 4
0
// tests that we add the missing wholeRef entries in FileInfo rows when going from
// a version 4 to a version 5 index.
func TestFixMissingWholeref(t *testing.T) {
	tf := new(test.Fetcher)
	s := sorted.NewMemoryKeyValue()

	ix, err := index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	ix.InitBlobSource(tf)

	// populate with a file
	add := func(b *test.Blob) {
		tf.AddBlob(b)
		if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
			t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
		}
	}
	add(chunk1)
	add(chunk2)
	add(chunk3)
	add(fileBlob)

	// revert the row to the old form, by stripping the wholeRef suffix
	key := "fileinfo|" + fileBlobRef.String()
	val5, err := s.Get(key)
	if err != nil {
		t.Fatalf("could not get %v: %v", key, err)
	}
	parts := strings.SplitN(val5, "|", 4)
	val4 := strings.Join(parts[:3], "|")
	if err := s.Set(key, val4); err != nil {
		t.Fatalf("could not set (%v, %v): %v", key, val4, err)
	}

	// revert index version at 4 to trigger the fix
	if err := s.Set("schemaversion", "4"); err != nil {
		t.Fatal(err)
	}

	// init broken index
	ix, err = index.New(s)
	if err != index.Exp_ErrMissingWholeRef {
		t.Fatalf("wrong error upon index initialization: got %v, wanted %v", err, index.Exp_ErrMissingWholeRef)
	}
	// and fix it
	if err := ix.Exp_FixMissingWholeRef(tf); err != nil {
		t.Fatal(err)
	}

	// init fixed index
	ix, err = index.New(s)
	if err != nil {
		t.Fatal(err)
	}
	// and check that the value is now actually fixed
	fi, err := ix.GetFileInfo(fileBlobRef)
	if err != nil {
		t.Fatal(err)
	}
	if fi.WholeRef.String() != parts[3] {
		t.Fatalf("index fileInfo wholeref was not fixed: got %q, wanted %v", fi.WholeRef, parts[3])
	}
}
Ejemplo n.º 5
0
func TestForeachChunkAllSchemaBlobs(t *testing.T) {
	sto := new(test.Fetcher) // in-memory blob storage
	foo := &test.Blob{"foo"}
	bar := &test.Blob{"bar"}
	sto.AddBlob(foo)
	sto.AddBlob(bar)

	// Make a "bytes" schema blob referencing the "foo" and "bar" chunks.
	// Verify it works.
	bytesBlob := &test.Blob{`{"camliVersion": 1,
"camliType": "bytes",
"parts": [
   {"blobRef": "` + foo.BlobRef().String() + `", "size": 3},
   {"blobRef": "` + bar.BlobRef().String() + `", "size": 3}
]}`}
	sto.AddBlob(bytesBlob)

	var fr *FileReader
	mustRead := func(name string, br blob.Ref, want string) {
		var err error
		fr, err = NewFileReader(sto, br)
		if err != nil {
			t.Fatalf("%s: %v", name, err)
		}
		all, err := ioutil.ReadAll(fr)
		if err != nil {
			t.Fatalf("%s: %v", name, err)
		}
		if string(all) != want {
			t.Errorf("%s: read contents %q; want %q", name, all, want)
		}
	}
	mustRead("bytesBlob", bytesBlob.BlobRef(), "foobar")

	// Now make another bytes schema blob embedding the previous one.
	bytesBlob2 := &test.Blob{`{"camliVersion": 1,
"camliType": "bytes",
"parts": [
   {"bytesRef": "` + bytesBlob.BlobRef().String() + `", "size": 6}
]}`}
	sto.AddBlob(bytesBlob2)
	mustRead("bytesBlob2", bytesBlob2.BlobRef(), "foobar")

	sawSchema := map[blob.Ref]bool{}
	sawData := map[blob.Ref]bool{}
	if err := fr.ForeachChunk(func(path []blob.Ref, p BytesPart) error {
		for _, sref := range path {
			sawSchema[sref] = true
		}
		sawData[p.BlobRef] = true
		return nil
	}); err != nil {
		t.Fatal(err)
	}
	want := []struct {
		name string
		tb   *test.Blob
		m    map[blob.Ref]bool
	}{
		{"bytesBlob", bytesBlob, sawSchema},
		{"bytesBlob2", bytesBlob2, sawSchema},
		{"foo", foo, sawData},
		{"bar", bar, sawData},
	}
	for _, tt := range want {
		if b := tt.tb.BlobRef(); !tt.m[b] {
			t.Errorf("didn't see %s (%s)", tt.name, b)
		}
	}
}