func TestReceiveIsSchema(t *testing.T) { ld := test.NewLoader() sto := newCond(t, ld, map[string]interface{}{ "write": map[string]interface{}{ "if": "isSchema", "then": "/good-schema/", "else": "/good-other/", }, "read": "/good-other/", }) otherBlob := &test.Blob{Contents: "stuff"} schemaBlob := &test.Blob{Contents: `{"camliVersion": 1, "camliType": "foo"}`} ssb := mustReceive(t, sto, schemaBlob) osb := mustReceive(t, sto, otherBlob) ssto, _ := ld.GetStorage("/good-schema/") osto, _ := ld.GetStorage("/good-other/") if _, err := blobserver.StatBlob(ssto, ssb.Ref); err != nil { t.Errorf("schema blob didn't end up on schema storage") } if _, err := blobserver.StatBlob(osto, osb.Ref); err != nil { t.Errorf("other blob didn't end up on other storage") } }
func (h *Handler) uploadPublicKey() error { h.pubKeyUploadMu.RLock() if h.pubKeyUploaded { h.pubKeyUploadMu.RUnlock() return nil } h.pubKeyUploadMu.RUnlock() sto := h.pubKeyDest h.pubKeyUploadMu.Lock() defer h.pubKeyUploadMu.Unlock() if h.pubKeyUploaded { return nil } _, err := blobserver.StatBlob(sto, h.pubKeyBlobRef) if err == nil { h.pubKeyUploaded = true return nil } _, err = blobserver.Receive(sto, h.pubKeyBlobRef, strings.NewReader(h.pubKey)) log.Printf("uploadPublicKey(%T, %v) = %v", sto, h.pubKeyBlobRef, err) if err == nil { h.pubKeyUploaded = true } return err }
func (h *Handler) uploadPublicKey(sto blobserver.Storage, key string) error { _, err := blobserver.StatBlob(sto, h.pubKeyBlobRef) if err == nil { return nil } _, err = sto.ReceiveBlob(h.pubKeyBlobRef, strings.NewReader(key)) return err }
func serverHasBlob(bs blobserver.BlobStatter, br blob.Ref) (have bool, err error) { _, err = blobserver.StatBlob(bs, br) if err == nil { have = true } else if err == os.ErrNotExist { err = nil } return }
// see if storage proxies through to small for Fetch, Stat, and Enumerate. func TestSmallFallback(t *testing.T) { small := new(test.Fetcher) s := &storage{ small: small, large: new(test.Fetcher), meta: sorted.NewMemoryKeyValue(), log: test.NewLogger(t, "blobpacked: "), } s.init() b1 := &test.Blob{"foo"} b1.MustUpload(t, small) wantSB := b1.SizedRef() // Fetch rc, _, err := s.Fetch(b1.BlobRef()) if err != nil { t.Errorf("failed to Get blob: %v", err) } else { rc.Close() } // Stat. sb, err := blobserver.StatBlob(s, b1.BlobRef()) if err != nil { t.Errorf("failed to Stat blob: %v", err) } else if sb != wantSB { t.Errorf("Stat = %v; want %v", sb, wantSB) } // Enumerate saw := false ctx, cancel := context.WithCancel(context.TODO()) defer cancel() if err := blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error { if sb != wantSB { return fmt.Errorf("saw blob %v; want %v", sb, wantSB) } saw = true return nil }); err != nil { t.Errorf("EnuerateAll: %v", err) } if !saw { t.Error("didn't see blob in Enumerate") } }
func TestReceiveGood(t *testing.T) { sto := newReplica(t, map[string]interface{}{ "backends": []interface{}{"/good-1/", "/good-2/"}, }) tb := &test.Blob{Contents: "stuff"} sb := mustReceive(t, sto, tb) if len(sto.replicas) != 2 { t.Fatalf("replicas = %d; want 2", len(sto.replicas)) } for i, rep := range sto.replicas { got, err := blobserver.StatBlob(rep, sb.Ref) if err != nil { t.Errorf("Replica %s got stat error %v", sto.replicaPrefixes[i], err) } else if got != sb { t.Errorf("Replica %s got %+v; want %+v", sto.replicaPrefixes[i], got, sb) } } }
func TestReceiveOneGoodOneFail(t *testing.T) { sto := newReplica(t, map[string]interface{}{ "backends": []interface{}{"/good-1/", "/fail-1/"}, "minWritesForSuccess": float64(1), }) tb := &test.Blob{Contents: "stuff"} sb := mustReceive(t, sto, tb) if len(sto.replicas) != 2 { t.Fatalf("replicas = %d; want 2", len(sto.replicas)) } for i, rep := range sto.replicas { got, err := blobserver.StatBlob(rep, sb.Ref) pfx := sto.replicaPrefixes[i] if (i == 0) != (err == nil) { t.Errorf("For replica %s, unexpected error: %v", pfx, err) } if err == nil && got != sb { t.Errorf("Replica %s got %+v; want %+v", sto.replicaPrefixes[i], got, sb) } } }
// foreachZipBlob calls fn for each blob in the zip pack blob // identified by zipRef. If fn returns a non-nil error, // foreachZipBlob stops enumerating with that error. func (s *storage) foreachZipBlob(zipRef blob.Ref, fn func(BlobAndPos) error) error { sb, err := blobserver.StatBlob(s.large, zipRef) if err != nil { return err } zr, err := zip.NewReader(blob.ReaderAt(s.large, zipRef), int64(sb.Size)) if err != nil { return zipOpenError{zipRef, err} } var maniFile *zip.File // or nil if not found var firstOff int64 // offset of first file (the packed data chunks) for i, f := range zr.File { if i == 0 { firstOff, err = f.DataOffset() if err != nil { return err } } if f.Name == zipManifestPath { maniFile = f break } } if maniFile == nil { return errors.New("no camlistore manifest file found in zip") } // apply fn to all the schema blobs for _, f := range zr.File { if !strings.HasPrefix(f.Name, "camlistore/") || f.Name == zipManifestPath || !strings.HasSuffix(f.Name, ".json") { continue } brStr := strings.TrimSuffix(strings.TrimPrefix(f.Name, "camlistore/"), ".json") br, ok := blob.Parse(brStr) if ok { off, err := f.DataOffset() if err != nil { return err } if err := fn(BlobAndPos{ SizedRef: blob.SizedRef{Ref: br, Size: uint32(f.UncompressedSize64)}, Offset: off, }); err != nil { return err } } } maniRC, err := maniFile.Open() if err != nil { return err } defer maniRC.Close() var mf Manifest if err := json.NewDecoder(maniRC).Decode(&mf); err != nil { return err } if !mf.WholeRef.Valid() || mf.WholeSize == 0 || !mf.DataBlobsOrigin.Valid() { return errors.New("incomplete blobpack manifest JSON") } // apply fn to all the data blobs for _, bap := range mf.DataBlobs { bap.Offset += firstOff if err := fn(bap); err != nil { return err } } return nil }