Beispiel #1
0
// NewRootedCamliFileSystem returns a CamliFileSystem with root as its
// base.
func NewRootedCamliFileSystem(fetcher blob.SeekFetcher, root blob.Ref) (*CamliFileSystem, error) {
	fs := newCamliFileSystem(fetcher)

	blob, err := fs.fetchSchemaMeta(root)
	if err != nil {
		return nil, err
	}
	if blob.Type() != "directory" {
		return nil, fmt.Errorf("Blobref must be of a directory, got a %v", blob.Type())
	}
	n := &node{fs: fs, blobref: root, meta: blob}
	n.populateAttr()
	fs.root = n
	return fs, nil
}
Beispiel #2
0
// populateMutationMap populates keys & values that will be committed
// into the returned map.
//
// the blobref can be trusted at this point (it's been fully consumed
// and verified to match), and the sniffer has been populated.
func (ix *Index) populateMutationMap(fetcher *missTrackFetcher, br blob.Ref, sniffer *BlobSniffer) (*mutationMap, error) {
	// TODO(mpl): shouldn't we remove these two from the map (so they don't get committed) when
	// e.g in populateClaim we detect a bogus claim (which does not yield an error)?
	mm := &mutationMap{
		kv: map[string]string{
			"have:" + br.String(): fmt.Sprintf("%d", sniffer.Size()),
			"meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()),
		},
	}

	if blob, ok := sniffer.SchemaBlob(); ok {
		switch blob.Type() {
		case "claim":
			if err := ix.populateClaim(blob, mm); err != nil {
				return nil, err
			}
		case "file":
			if err := ix.populateFile(fetcher, blob, mm); err != nil {
				return nil, err
			}
		case "directory":
			if err := ix.populateDir(fetcher, blob, mm); err != nil {
				return nil, err
			}
		}
	}

	return mm, nil
}
Beispiel #3
0
// populateMutation populates keys & values into the provided BatchMutation.
//
// the blobref can be trusted at this point (it's been fully consumed
// and verified to match), and the sniffer has been populated.
func (ix *Index) populateMutation(br blob.Ref, sniffer *BlobSniffer, bm BatchMutation) error {
	bm.Set("have:"+br.String(), fmt.Sprintf("%d", sniffer.Size()))
	bm.Set("meta:"+br.String(), fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()))

	if blob, ok := sniffer.SchemaBlob(); ok {
		switch blob.Type() {
		case "claim":
			if err := ix.populateClaim(blob, bm); err != nil {
				return err
			}
		case "permanode":
			//if err := mi.populatePermanode(blobRef, camli, bm); err != nil {
			//return err
			//}
		case "file":
			if err := ix.populateFile(blob, bm); err != nil {
				return err
			}
		case "directory":
			if err := ix.populateDir(blob, bm); err != nil {
				return err
			}
		}
	}
	return nil
}
Beispiel #4
0
func TestBlobFromReader(t *testing.T) {
	br := blob.MustParse("sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
	blob, err := BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"}  `))
	if err != nil {
		t.Error(err)
	} else if blob.Type() != "foo" {
		t.Errorf("got type %q; want foo", blob.Type())
	}

	blob, err = BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"}  X  `))
	if err == nil {
		// TODO(bradfitz): fix this somehow. Currently encoding/json's
		// decoder over-reads.
		// See: https://code.google.com/p/go/issues/detail?id=1955 ,
		// which was "fixed", but not really.
		t.Logf("TODO(bradfitz): make sure bogus non-whitespace after the JSON object causes an error.")
	}
}
Beispiel #5
0
// consolated logic for determining a node to mount based on an arbitrary blobref
func (fs *CamliFileSystem) newNodeFromBlobRef(root blob.Ref) (fusefs.Node, error) {
	blob, err := fs.fetchSchemaMeta(root)
	if err != nil {
		return nil, err
	}

	switch blob.Type() {
	case "directory":
		n := &node{fs: fs, blobref: root, meta: blob}
		n.populateAttr()
		return n, nil

	case "permanode":
		// other mutDirs listed in the default fileystem have names and are displayed
		return &mutDir{fs: fs, permanode: root, name: "-"}, nil
	}

	return nil, fmt.Errorf("Blobref must be of a directory or permanode got a %v", blob.Type())
}
Beispiel #6
0
func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc {
	return func(br blob.Ref, src io.Reader) (dest blobserver.Storage, newSrc io.Reader, err error) {
		var buf bytes.Buffer
		blob, err := schema.BlobFromReader(br, io.TeeReader(src, &buf))
		newSrc = io.MultiReader(bytes.NewReader(buf.Bytes()), src)
		if err != nil || blob.Type() == "" {
			return elseSto, newSrc, nil
		}
		return thenSto, newSrc, nil
	}
}
Beispiel #7
0
func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc {
	return func(src io.Reader) (dest blobserver.Storage, overRead []byte, err error) {
		var buf bytes.Buffer
		tee := io.TeeReader(src, &buf)
		blob, err := schema.BlobFromReader(dummyRef, tee)
		if err != nil || blob.Type() == "" {
			return elseSto, buf.Bytes(), nil
		}
		return thenSto, buf.Bytes(), nil
	}
}
Beispiel #8
0
// populateMutationMap populates keys & values that will be committed
// into the returned map.
//
// the blobref can be trusted at this point (it's been fully consumed
// and verified to match), and the sniffer has been populated.
func (ix *Index) populateMutationMap(fetcher *missTrackFetcher, br blob.Ref, sniffer *BlobSniffer) (*mutationMap, error) {
	mm := &mutationMap{
		kv: map[string]string{
			"meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()),
		},
	}
	var err error
	if blob, ok := sniffer.SchemaBlob(); ok {
		switch blob.Type() {
		case "claim":
			err = ix.populateClaim(fetcher, blob, mm)
		case "file":
			err = ix.populateFile(fetcher, blob, mm)
		case "directory":
			err = ix.populateDir(fetcher, blob, mm)
		}
	}
	if err != nil && err != errMissingDep {
		return nil, err
	}
	var haveVal string
	if err == errMissingDep {
		haveVal = fmt.Sprintf("%d", sniffer.Size())
	} else {
		haveVal = fmt.Sprintf("%d|indexed", sniffer.Size())
	}
	mm.kv["have:"+br.String()] = haveVal
	ix.mu.Lock()
	defer ix.mu.Unlock()
	if len(fetcher.missing) == 0 {
		// If err == nil, we're good. Else (err == errMissingDep), we
		// know the error did not come from a fetching miss (because
		// len(fetcher.missing) == 0) , but from an index miss. Therefore
		// we know the miss has already been noted and will be dealt with
		// later, so we can also pretend everything's fine.
		return mm, nil
	}
	return mm, err
}
Beispiel #9
0
// Errors returned are:
//    os.ErrNotExist -- blob not found
//    os.ErrInvalid -- not JSON or a camli schema blob
func (fs *CamliFileSystem) fetchSchemaMeta(br blob.Ref) (*schema.Blob, error) {
	blobStr := br.String()
	if blob, ok := fs.blobToSchema.Get(blobStr); ok {
		return blob.(*schema.Blob), nil
	}

	rc, _, err := fs.fetcher.Fetch(br)
	if err != nil {
		return nil, err
	}
	defer rc.Close()
	blob, err := schema.BlobFromReader(br, rc)
	if err != nil {
		log.Printf("Error parsing %s as schema blob: %v", br, err)
		return nil, os.ErrInvalid
	}
	if blob.Type() == "" {
		log.Printf("blob %s is JSON but lacks camliType", br)
		return nil, os.ErrInvalid
	}
	fs.blobToSchema.Add(blobStr, blob)
	return blob, nil
}
Beispiel #10
0
// Unauthenticated user.  Be paranoid.
func handleGetViaSharing(rw http.ResponseWriter, req *http.Request,
	blobRef blob.Ref, fetcher blob.Fetcher) error {
	if !httputil.IsGet(req) {
		return &shareError{code: invalidMethod, response: badRequest, message: "Invalid method"}
	}

	rw.Header().Set("Access-Control-Allow-Origin", "*")

	viaPathOkay := false
	startTime := time.Now()
	defer func() {
		if !viaPathOkay {
			// Insert a delay, to hide timing attacks probing
			// for the existence of blobs.
			sleep := fetchFailureDelay - (time.Now().Sub(startTime))
			timeSleep(sleep)
		}
	}()
	viaBlobs := make([]blob.Ref, 0)
	if via := req.FormValue("via"); via != "" {
		for _, vs := range strings.Split(via, ",") {
			if br, ok := blob.Parse(vs); ok {
				viaBlobs = append(viaBlobs, br)
			} else {
				return &shareError{code: invalidVia, response: badRequest, message: "Malformed blobref in via param"}
			}
		}
	}

	fetchChain := make([]blob.Ref, 0)
	fetchChain = append(fetchChain, viaBlobs...)
	fetchChain = append(fetchChain, blobRef)
	isTransitive := false
	for i, br := range fetchChain {
		switch i {
		case 0:
			file, size, err := fetcher.Fetch(br)
			if err != nil {
				return unauthorized(shareFetchFailed, "Fetch chain 0 of %s failed: %v", br, err)
			}
			defer file.Close()
			if size > schema.MaxSchemaBlobSize {
				return unauthorized(shareBlobTooLarge, "Fetch chain 0 of %s too large", br)
			}
			blob, err := schema.BlobFromReader(br, file)
			if err != nil {
				return unauthorized(shareReadFailed, "Can't create a blob from %v: %v", br, err)
			}
			share, ok := blob.AsShare()
			if !ok {
				return unauthorized(shareBlobInvalid, "Fetch chain 0 of %s wasn't a valid Share (is %q)", br, blob.Type())
			}
			if share.IsExpired() {
				return unauthorized(shareExpired, "Share is expired")
			}
			if len(fetchChain) > 1 && fetchChain[1].String() != share.Target().String() {
				return unauthorized(shareTargetInvalid,
					"Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
					br, fetchChain[1], share.Target())
			}
			isTransitive = share.IsTransitive()
			if len(fetchChain) > 2 && !isTransitive {
				return unauthorized(shareNotTransitive, "Share is not transitive")
			}
		case len(fetchChain) - 1:
			// Last one is fine (as long as its path up to here has been proven, and it's
			// not the first thing in the chain)
			continue
		default:
			rc, _, err := fetcher.Fetch(br)
			if err != nil {
				return unauthorized(viaChainFetchFailed, "Fetch chain %d of %s failed: %v", i, br, err)
			}
			defer rc.Close()
			lr := io.LimitReader(rc, schema.MaxSchemaBlobSize)
			slurpBytes, err := ioutil.ReadAll(lr)
			if err != nil {
				return unauthorized(viaChainReadFailed,
					"Fetch chain %d of %s failed in slurp: %v", i, br, err)
			}
			saught := fetchChain[i+1]
			if !bytesHaveSchemaLink(br, slurpBytes, saught) {
				return unauthorized(viaChainInvalidLink,
					"Fetch chain %d of %s failed; no reference to %s", i, br, saught)
			}
		}
	}

	if assemble, _ := strconv.ParseBool(req.FormValue("assemble")); assemble {
		if !isTransitive {
			return unauthorized(assembleNonTransitive, "Cannot assemble non-transitive share")
		}
		dh := &DownloadHandler{
			Fetcher: fetcher,
			// TODO(aa): It would be nice to specify a local cache here, as the UI handler does.
		}
		dh.ServeHTTP(rw, req, blobRef)
	} else {
		gethandler.ServeBlobRef(rw, req, blobRef, fetcher)
	}
	viaPathOkay = true
	return nil
}