func TestStreamSkipRemovedBlobs(t *testing.T) { // Note: This is the only streaming test that makes use of the // index (for RemoveBlobs() to succeed). The others do create // an indexed storage but they do not use the index to stream // (nor should they use it). The streaming in this test is // done by reading the underlying diskpacks. s, cleanup := newTempDiskpacked(t) defer cleanup() uploadTestBlobs(t, s, testPack1) ref, ok := blob.Parse(testPack1[0].digest) if !ok { t.Fatalf("blob.Parse: %s", testPack1[0].digest) } err := s.RemoveBlobs([]blob.Ref{ref}) if err != nil { t.Fatalf("RemoveBlobs: %v", err) } diskpackedSto := s.(*storage) expected := len(testPack1) - 1 // We've deleted 1 storagetest.TestStreamer(t, diskpackedSto, storagetest.WantN(expected)) }
func TestStreamer(t *testing.T) { s := new(memory.Storage) phrases := []string{"foo", "bar", "baz", "quux"} for _, str := range phrases { (&test.Blob{str}).MustUpload(t, s) } storagetest.TestStreamer(t, s, storagetest.WantN(len(phrases))) }
func TestStaticStreamer(t *testing.T) { var blobs []*blob.Blob var want []blob.SizedRef for i := 0; i < 5; i++ { tb := &test.Blob{strconv.Itoa(i)} b := tb.Blob() blobs = append(blobs, b) want = append(want, b.SizedRef()) } bs := staticStreamer(blobs) storagetest.TestStreamer(t, bs, storagetest.WantSizedRefs(want)) }
func testStreamBlobs(t *testing.T, small blobserver.Storage, large subFetcherStorage, populate func(*testing.T, *storage) []storagetest.StreamerTestOpt) { s := &storage{ small: small, large: large, meta: sorted.NewMemoryKeyValue(), log: test.NewLogger(t, "blobpacked: "), } s.init() wants := populate(t, s) storagetest.TestStreamer(t, s, wants...) }
// Tests the streaming of all blobs in a storage, with hash verification. func TestBasicStreaming(t *testing.T) { s, clean := newTestStorage(t, pack{testPack1}) defer clean() expected := len(testPack1) blobs := streamAll(t, s) if len(blobs) != expected { t.Fatalf("Wrong blob count: Expected %d, got %d", expected, len(blobs)) } wantRefs := make([]blob.SizedRef, len(blobs)) for i, b := range blobs { wantRefs[i] = b.SizedRef() } storagetest.TestStreamer(t, s, storagetest.WantSizedRefs(wantRefs)) }
func TestMultiStreamer(t *testing.T) { var streamers []blobserver.BlobStreamer var want []blob.SizedRef n := 0 for st := 0; st < 3; st++ { var blobs []*blob.Blob for i := 0; i < 3; i++ { n++ tb := &test.Blob{strconv.Itoa(n)} b := tb.Blob() want = append(want, b.SizedRef()) // overall blobs = append(blobs, b) // this sub-streamer } streamers = append(streamers, staticStreamer(blobs)) } storagetest.TestStreamer(t, blobserver.NewMultiBlobStreamer(streamers...), storagetest.WantSizedRefs(want)) }
func TestStreamBlobs(t *testing.T) { small := new(test.Fetcher) s := &storage{ small: small, large: new(test.Fetcher), meta: sorted.NewMemoryKeyValue(), log: test.NewLogger(t, "blobpacked: "), } s.init() all := map[blob.Ref]bool{} const nBlobs = 10 for i := 0; i < nBlobs; i++ { b := &test.Blob{strconv.Itoa(i)} b.MustUpload(t, small) all[b.BlobRef()] = true } ctx, cancel := context.WithCancel(context.TODO()) defer cancel() token := "" // beginning got := map[blob.Ref]bool{} dest := make(chan blobserver.BlobAndToken, 16) done := make(chan bool) go func() { defer close(done) for bt := range dest { got[bt.Blob.Ref()] = true } }() err := s.StreamBlobs(ctx, dest, token) if err != nil { t.Fatalf("StreamBlobs = %v", err) } <-done if !reflect.DeepEqual(got, all) { t.Errorf("Got blobs %v; want %v", got, all) } storagetest.TestStreamer(t, s, storagetest.WantN(nBlobs)) }
// Tests that we can correctly switch over to the next pack if we // still need to stream more blobs when a pack reaches EOF. func TestStreamMultiplePacks(t *testing.T) { s, clean := newTestStorage(t, pack{testPack1}, pack{testPack2}) defer clean() storagetest.TestStreamer(t, s, storagetest.WantN(len(testPack1)+len(testPack2))) }