Esempio n. 1
0
// NewFileReader returns a new FileReader reading the contents of fileBlobRef,
// fetching blobs from fetcher.  The fileBlobRef must be of a "bytes" or "file"
// schema blob.
//
// The caller should call Close on the FileReader when done reading.
func NewFileReader(fetcher blob.Fetcher, fileBlobRef blob.Ref) (*FileReader, error) {
	// TODO(bradfitz): rename this into bytes reader? but for now it's still
	//                 named FileReader, but can also read a "bytes" schema.
	if !fileBlobRef.Valid() {
		return nil, errors.New("schema/filereader: NewFileReader blobref invalid")
	}
	rc, _, err := fetcher.Fetch(fileBlobRef)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
	}
	defer rc.Close()
	ss, err := parseSuperset(rc)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err)
	}
	ss.BlobRef = fileBlobRef
	if ss.Type != "file" && ss.Type != "bytes" {
		return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type)
	}
	fr, err := ss.NewFileReader(fetcher)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err)
	}
	return fr, nil
}
Esempio n. 2
0
func copyBlob(br blob.Ref, dst blobserver.BlobReceiver, src blob.Fetcher) error {
	rc, _, err := src.Fetch(br)
	if err != nil {
		return err
	}
	defer rc.Close()
	_, err = dst.ReceiveBlob(br, rc)
	return err
}
Esempio n. 3
0
func fetch(src blob.Fetcher, br blob.Ref) (r io.ReadCloser, err error) {
	if *flagVerbose {
		log.Printf("Fetching %s", br.String())
	}
	r, _, err = src.Fetch(br)
	if err != nil {
		return nil, fmt.Errorf("Failed to fetch %s: %s", br, err)
	}
	return r, err
}
Esempio n. 4
0
func slurpBlob(t *testing.T, sto blob.Fetcher, br blob.Ref) []byte {
	rc, _, err := sto.Fetch(br)
	if err != nil {
		t.Fatal(err)
	}
	defer rc.Close()
	slurp, err := ioutil.ReadAll(rc)
	if err != nil {
		t.Fatal(err)
	}
	return slurp
}
Esempio n. 5
0
func (ss *superset) setFromBlobRef(fetcher blob.Fetcher, blobRef blob.Ref) error {
	if !blobRef.Valid() {
		return errors.New("schema/dirreader: blobref invalid")
	}
	ss.BlobRef = blobRef
	rc, _, err := fetcher.Fetch(blobRef)
	if err != nil {
		return fmt.Errorf("schema/dirreader: fetching schema blob %s: %v", blobRef, err)
	}
	defer rc.Close()
	if err := json.NewDecoder(rc).Decode(ss); err != nil {
		return fmt.Errorf("schema/dirreader: decoding schema blob %s: %v", blobRef, err)
	}
	return nil
}
Esempio n. 6
0
func schemaFromBlobRef(bs blob.Fetcher, ref string) (*schema.Blob, error) {
	br, ok := blob.Parse(ref)
	if !ok {
		return nil, fmt.Errorf("%q: unparseable blob ref")
	}
	body, _, err := bs.Fetch(br)
	if err != nil {
		// TODO(dichro): delete this from index?
		return nil, fmt.Errorf("%s: previously indexed; now missing", br)
	}
	s, ok := parseSchema(br, body)
	body.Close()
	if !ok {
		return nil, fmt.Errorf("%s: previously schema; now unparseable", br)
	}
	return s, nil
}
Esempio n. 7
0
func printHierarchy(fsck *db.DB, bs blob.Fetcher, depth int, suffix string, nodes []string) {
	prefix := ""
	for i := 0; i < depth; i++ {
		prefix = prefix + "  "
	}
	for _, node := range nodes {
		nextSuffix := suffix
		ref := blob.MustParse(node)
		camliType := "unknown"
		if body, _, err := bs.Fetch(ref); err != nil {
			camliType = fmt.Sprintf("Fetch(): %s", err)
		} else {
			if s, ok := parseSchema(ref, body); ok {
				fileName := s.FileName()
				switch t := s.Type(); t {
				case "file":
					// this blob is a "file" that just happens to contain a
					// camlistore blob in its contents. This happens because I
					// may have camput my blobs directory once or twice :P
					if len(suffix) > 0 {
						nextSuffix = fmt.Sprintf("%s -> %s", fileName, nextSuffix)
					} else {
						nextSuffix = fileName
					}
					camliType = fmt.Sprintf("%s: %q", t, nextSuffix)
				case "directory":
					nextSuffix = fmt.Sprintf("%s/%s", fileName, suffix)
					camliType = fmt.Sprintf("%s: %q", t, nextSuffix)
				default:
					camliType = t
				}
			}
			body.Close()
		}

		switch next, err := fsck.Parents(node); {
		case err != nil:
			fmt.Printf("%s* %s: %s\n", prefix, node, err)
		case len(next) == 0:
			fmt.Printf("%s- %s (%s)\n", prefix, node, camliType)
		default:
			fmt.Printf("%s+ %s (%s)\n", prefix, node, camliType)
			printHierarchy(fsck, bs, depth+1, nextSuffix, next)
		}
	}
}
Esempio n. 8
0
// ServeBlobRef serves a blob.
func ServeBlobRef(rw http.ResponseWriter, req *http.Request, blobRef blob.Ref, fetcher blob.Fetcher) {
	rc, size, err := fetcher.Fetch(blobRef)
	switch err {
	case nil:
		break
	case os.ErrNotExist:
		rw.WriteHeader(http.StatusNotFound)
		fmt.Fprintf(rw, "Blob %q not found", blobRef)
		return
	default:
		httputil.ServeError(rw, req, err)
		return
	}
	defer rc.Close()
	rw.Header().Set("Content-Type", "application/octet-stream")

	var content io.ReadSeeker = readerutil.NewFakeSeeker(rc, int64(size))
	rangeHeader := req.Header.Get("Range") != ""
	const small = 32 << 10
	var b *blob.Blob
	if rangeHeader || size < small {
		// Slurp to memory, so we can actually seek on it (for Range support),
		// or if we're going to be showing it in the browser (below).
		b, err = blob.FromReader(blobRef, rc, size)
		if err != nil {
			httputil.ServeError(rw, req, err)
			return
		}
		content = b.Open()
	}
	if !rangeHeader && size < small {
		// If it's small and all UTF-8, assume it's text and
		// just render it in the browser.  This is more for
		// demos/debuggability than anything else.  It isn't
		// part of the spec.
		if b.IsUTF8() {
			rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
		}
	}
	http.ServeContent(rw, req, "", dummyModTime, content)
}
Esempio n. 9
0
// Unauthenticated user.  Be paranoid.
func handleGetViaSharing(conn http.ResponseWriter, req *http.Request,
	blobRef blob.Ref, fetcher blob.Fetcher) error {
	if !httputil.IsGet(req) {
		return &shareError{code: invalidMethod, response: badRequest, message: "Invalid method"}
	}

	conn.Header().Set("Access-Control-Allow-Origin", "*")

	viaPathOkay := false
	startTime := time.Now()
	defer func() {
		if !viaPathOkay {
			// Insert a delay, to hide timing attacks probing
			// for the existence of blobs.
			sleep := fetchFailureDelay - (time.Now().Sub(startTime))
			time.Sleep(sleep)
		}
	}()
	viaBlobs := make([]blob.Ref, 0)
	if via := req.FormValue("via"); via != "" {
		for _, vs := range strings.Split(via, ",") {
			if br, ok := blob.Parse(vs); ok {
				viaBlobs = append(viaBlobs, br)
			} else {
				return &shareError{code: invalidVia, response: badRequest, message: "Malformed blobref in via param"}
			}
		}
	}

	fetchChain := make([]blob.Ref, 0)
	fetchChain = append(fetchChain, viaBlobs...)
	fetchChain = append(fetchChain, blobRef)
	isTransitive := false
	for i, br := range fetchChain {
		switch i {
		case 0:
			file, size, err := fetcher.Fetch(br)
			if err != nil {
				return unauthorized(shareFetchFailed, "Fetch chain 0 of %s failed: %v", br, err)
			}
			defer file.Close()
			if size > schema.MaxSchemaBlobSize {
				return unauthorized(shareBlobTooLarge, "Fetch chain 0 of %s too large", br)
			}
			blob, err := schema.BlobFromReader(br, file)
			if err != nil {
				return unauthorized(shareReadFailed, "Can't create a blob from %v: %v", br, err)
			}
			share, ok := blob.AsShare()
			if !ok {
				return unauthorized(shareBlobInvalid, "Fetch chain 0 of %s wasn't a valid Share", br)
			}
			if share.IsExpired() {
				return unauthorized(shareExpired, "Share is expired")
			}
			if len(fetchChain) > 1 && fetchChain[1].String() != share.Target().String() {
				return unauthorized(shareTargetInvalid,
					"Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
					br, fetchChain[1], share.Target())
			}
			isTransitive = share.IsTransitive()
			if len(fetchChain) > 2 && !isTransitive {
				return unauthorized(shareNotTransitive, "Share is not transitive")
			}
		case len(fetchChain) - 1:
			// Last one is fine (as long as its path up to here has been proven, and it's
			// not the first thing in the chain)
			continue
		default:
			file, _, err := fetcher.Fetch(br)
			if err != nil {
				return unauthorized(viaChainFetchFailed, "Fetch chain %d of %s failed: %v", i, br, err)
			}
			defer file.Close()
			lr := io.LimitReader(file, schema.MaxSchemaBlobSize)
			slurpBytes, err := ioutil.ReadAll(lr)
			if err != nil {
				return unauthorized(viaChainReadFailed,
					"Fetch chain %d of %s failed in slurp: %v", i, br, err)
			}
			saught := fetchChain[i+1].String()
			if bytes.Index(slurpBytes, []byte(saught)) == -1 {
				return unauthorized(viaChainInvalidLink,
					"Fetch chain %d of %s failed; no reference to %s", i, br, saught)
			}
		}
	}

	if assemble, _ := strconv.ParseBool(req.FormValue("assemble")); assemble {
		if !isTransitive {
			return unauthorized(assembleNonTransitive, "Cannot assemble non-transitive share")
		}
		dh := &DownloadHandler{
			Fetcher: fetcher,
			// TODO(aa): It would be nice to specify a local cache here, as the UI handler does.
		}
		dh.ServeHTTP(conn, req, blobRef)
	} else {
		gethandler.ServeBlobRef(conn, req, blobRef, fetcher)
	}
	viaPathOkay = true
	return nil
}