Example #1
0
// bytesHaveSchemaLink reports whether bb is a valid Camlistore schema
// blob and has target somewhere in a schema field used to represent a
// Merkle-tree-ish file or directory.
func bytesHaveSchemaLink(br blob.Ref, bb []byte, target blob.Ref) bool {
	// Fast path for no:
	if !bytes.Contains(bb, []byte(target.String())) {
		return false
	}
	b, err := schema.BlobFromReader(br, bytes.NewReader(bb))
	if err != nil {
		return false
	}
	typ := b.Type()
	switch typ {
	case "file", "blob":
		for _, bp := range b.ByteParts() {
			if bp.BlobRef.Valid() {
				return bp.BlobRef == target
			}
			if bp.BytesRef.Valid() {
				return bp.BytesRef == target
			}
		}
	case "directory":
		if d, ok := b.DirectoryEntries(); ok {
			return d == target
		}
	case "static-set":
		for _, m := range b.StaticSetMembers() {
			if m == target {
				return true
			}
		}
	}
	return false
}
Example #2
0
func NewFromShareRoot(shareBlobURL string) (c *Client, target *blobref.BlobRef, err error) {
	var root string
	if m := shareURLRx.FindStringSubmatch(shareBlobURL); m == nil {
		return nil, nil, fmt.Errorf("Unkown URL base; doesn't contain /camli/")
	} else {
		c = New(m[1])
		c.discoOnce.Do(func() { /* nothing */
		})
		c.prefixOnce.Do(func() { /* nothing */
		})
		c.prefixv = m[1]
		c.authMode = auth.None{}
		c.via = make(map[string]string)
		root = m[2]
	}
	res, err := http.Get(shareBlobURL)
	if err != nil {
		return nil, nil, fmt.Errorf("Error fetching %s: %v", shareBlobURL, err)
	}
	defer res.Body.Close()
	blob, err := schema.BlobFromReader(blobref.Parse(root), res.Body)
	if err != nil {
		return nil, nil, fmt.Errorf("Error parsing JSON from %s: %v", shareBlobURL, err)
	}
	if blob.ShareAuthType() != "haveref" {
		return nil, nil, fmt.Errorf("Unknown share authType of %q", blob.ShareAuthType())
	}
	target = blob.ShareTarget()
	if target == nil {
		return nil, nil, fmt.Errorf("No target.")
	}
	c.via[target.String()] = root
	return c, target, nil
}
Example #3
0
func (c *Client) FetchSchemaBlob(b blob.Ref) (*schema.Blob, error) {
	rc, _, err := c.Fetch(b)
	if err != nil {
		return nil, err
	}
	defer rc.Close()
	return schema.BlobFromReader(b, rc)
}
Example #4
0
func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc {
	return func(br blob.Ref, src io.Reader) (dest blobserver.Storage, newSrc io.Reader, err error) {
		var buf bytes.Buffer
		blob, err := schema.BlobFromReader(br, io.TeeReader(src, &buf))
		newSrc = io.MultiReader(bytes.NewReader(buf.Bytes()), src)
		if err != nil || blob.Type() == "" {
			return elseSto, newSrc, nil
		}
		return thenSto, newSrc, nil
	}
}
Example #5
0
func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc {
	return func(src io.Reader) (dest blobserver.Storage, overRead []byte, err error) {
		var buf bytes.Buffer
		tee := io.TeeReader(src, &buf)
		blob, err := schema.BlobFromReader(dummyRef, tee)
		if err != nil || blob.Type() == "" {
			return elseSto, buf.Bytes(), nil
		}
		return thenSto, buf.Bytes(), nil
	}
}
Example #6
0
func (sn *BlobSniffer) bufferIsCamliJSON() bool {
	buf := sn.header
	if !schema.LikelySchemaBlob(buf) {
		return false
	}
	blob, err := schema.BlobFromReader(sn.br, bytes.NewReader(buf))
	if err != nil {
		return false
	}
	sn.meta = blob
	return true
}
Example #7
0
// NewFromShareRoot uses shareBlobURL to set up and return a client that
// will be used to fetch shared blobs.
func NewFromShareRoot(shareBlobURL string, opts ...ClientOption) (c *Client, target blob.Ref, err error) {
	var root string
	m := shareURLRx.FindStringSubmatch(shareBlobURL)
	if m == nil {
		return nil, blob.Ref{}, fmt.Errorf("Unkown share URL base")
	}
	c = New(m[1])
	c.discoOnce.Do(func() { /* nothing */
	})
	c.prefixOnce.Do(func() { /* nothing */
	})
	c.prefixv = m[1]
	c.isSharePrefix = true
	c.authMode = auth.None{}
	c.via = make(map[string]string)
	root = m[2]

	for _, v := range opts {
		v.modifyClient(c)
	}
	c.SetHTTPClient(&http.Client{Transport: c.TransportForConfig(nil)})

	req := c.newRequest("GET", shareBlobURL, nil)
	res, err := c.doReqGated(req)
	if err != nil {
		return nil, blob.Ref{}, fmt.Errorf("Error fetching %s: %v", shareBlobURL, err)
	}
	defer res.Body.Close()
	b, err := schema.BlobFromReader(blob.ParseOrZero(root), res.Body)
	if err != nil {
		return nil, blob.Ref{}, fmt.Errorf("Error parsing JSON from %s: %v", shareBlobURL, err)
	}
	if b.ShareAuthType() != schema.ShareHaveRef {
		return nil, blob.Ref{}, fmt.Errorf("Unknown share authType of %q", b.ShareAuthType())
	}
	target = b.ShareTarget()
	if !target.Valid() {
		return nil, blob.Ref{}, fmt.Errorf("No target.")
	}
	c.via[target.String()] = root
	return c, target, nil
}
Example #8
0
// NewFromShareRoot uses shareBlobURL to set up and return a client that
// will be used to fetch shared blobs.
func NewFromShareRoot(shareBlobURL string, opts ...ClientOption) (c *Client, target blob.Ref, err error) {
	var root string
	m := shareURLRx.FindStringSubmatch(shareBlobURL)
	if m == nil {
		return nil, blob.Ref{}, fmt.Errorf("Unkown share URL base")
	}
	c = New(m[1], opts...)
	c.discoOnce.Do(noop)
	c.prefixOnce.Do(noop)
	c.prefixv = m[1]
	c.isSharePrefix = true
	c.authMode = auth.None{}
	c.via = make(map[blob.Ref]blob.Ref)
	root = m[2]

	req := c.newRequest("GET", shareBlobURL, nil)
	res, err := c.expect2XX(req)
	if err != nil {
		return nil, blob.Ref{}, fmt.Errorf("error fetching %s: %v", shareBlobURL, err)
	}
	defer res.Body.Close()
	var buf bytes.Buffer
	rootbr, ok := blob.Parse(root)
	if !ok {
		return nil, blob.Ref{}, fmt.Errorf("invalid root blob ref for sharing: %q", root)
	}
	b, err := schema.BlobFromReader(rootbr, io.TeeReader(res.Body, &buf))
	if err != nil {
		return nil, blob.Ref{}, fmt.Errorf("error parsing JSON from %s: %v , with response: %q", shareBlobURL, err, buf.Bytes())
	}
	if b.ShareAuthType() != schema.ShareHaveRef {
		return nil, blob.Ref{}, fmt.Errorf("unknown share authType of %q", b.ShareAuthType())
	}
	target = b.ShareTarget()
	if !target.Valid() {
		return nil, blob.Ref{}, fmt.Errorf("no target.")
	}
	c.via[target] = rootbr
	return c, target, nil
}
Example #9
0
// cached returns a FileReader for the given blobref, which may
// point to either a blob representing the entire thumbnail (max
// 16MB) or a file schema blob.
//
// The ReadCloser should be closed when done reading.
func (ih *ImageHandler) cached(br blob.Ref) (io.ReadCloser, error) {
	rsc, _, err := ih.Cache.FetchStreaming(br)
	if err != nil {
		return nil, err
	}
	slurp, err := ioutil.ReadAll(rsc)
	rsc.Close()
	if err != nil {
		return nil, err
	}
	// In the common case, when the scaled image itself is less than 16 MB, it's
	// all together in one blob.
	if strings.HasPrefix(magic.MIMEType(slurp), "image/") {
		thumbCacheHitFull.Add(1)
		if imageDebug {
			log.Printf("Image Cache: hit: %v\n", br)
		}
		return ioutil.NopCloser(bytes.NewReader(slurp)), nil
	}

	// For large scaled images, the cached blob is a file schema blob referencing
	// the sub-chunks.
	fileBlob, err := schema.BlobFromReader(br, bytes.NewReader(slurp))
	if err != nil {
		log.Printf("Failed to parse non-image thumbnail cache blob %v: %v", br, err)
		return nil, err
	}
	fetchSeeker := blob.SeekerFromStreamingFetcher(ih.Cache)
	fr, err := fileBlob.NewFileReader(fetchSeeker)
	if err != nil {
		log.Printf("cached(%d) NewFileReader = %v", br, err)
		return nil, err
	}
	thumbCacheHitFile.Add(1)
	if imageDebug {
		log.Printf("Image Cache: fileref hit: %v\n", br)
	}
	return fr, nil
}
Example #10
0
func (s *storage) ReceiveBlob(br blob.Ref, source io.Reader) (sb blob.SizedRef, err error) {
	buf := pools.BytesBuffer()
	defer pools.PutBuffer(buf)

	if _, err := io.Copy(buf, source); err != nil {
		return sb, err
	}
	size := uint32(buf.Len())
	isFile := false
	fileBlob, err := schema.BlobFromReader(br, bytes.NewReader(buf.Bytes()))
	if err == nil && fileBlob.Type() == "file" {
		isFile = true
	}
	meta, err := s.getMetaRow(br)
	if err != nil {
		return sb, err
	}
	if meta.exists {
		sb = blob.SizedRef{Size: size, Ref: br}
	} else {
		sb, err = s.small.ReceiveBlob(br, buf)
		if err != nil {
			return sb, err
		}
	}
	if !isFile || meta.isPacked() || fileBlob.PartsSize() < packThreshold {
		return sb, nil
	}

	// Pack the blob.
	s.packGate.Start()
	defer s.packGate.Done()
	// We ignore the return value from packFile since we can't
	// really recover. At least be happy that we have all the
	// data on 'small' already. packFile will log at least.
	s.packFile(br)
	return sb, nil
}
Example #11
0
// Errors returned are:
//    os.ErrNotExist -- blob not found
//    os.ErrInvalid -- not JSON or a camli schema blob
func (fs *CamliFileSystem) fetchSchemaMeta(br blob.Ref) (*schema.Blob, error) {
	blobStr := br.String()
	if blob, ok := fs.blobToSchema.Get(blobStr); ok {
		return blob.(*schema.Blob), nil
	}

	rc, _, err := fs.fetcher.Fetch(br)
	if err != nil {
		return nil, err
	}
	defer rc.Close()
	blob, err := schema.BlobFromReader(br, rc)
	if err != nil {
		log.Printf("Error parsing %s as schema blob: %v", br, err)
		return nil, os.ErrInvalid
	}
	if blob.Type() == "" {
		log.Printf("blob %s is JSON but lacks camliType", br)
		return nil, os.ErrInvalid
	}
	fs.blobToSchema.Add(blobStr, blob)
	return blob, nil
}
Example #12
0
// Unauthenticated user.  Be paranoid.
func handleGetViaSharing(conn http.ResponseWriter, req *http.Request,
	blobRef blob.Ref, fetcher blob.StreamingFetcher) error {
	if !httputil.IsGet(req) {
		return &shareError{code: invalidMethod, response: badRequest, message: "Invalid method"}
	}

	viaPathOkay := false
	startTime := time.Now()
	defer func() {
		if !viaPathOkay {
			// Insert a delay, to hide timing attacks probing
			// for the existence of blobs.
			sleep := fetchFailureDelay - (time.Now().Sub(startTime))
			time.Sleep(sleep)
		}
	}()
	viaBlobs := make([]blob.Ref, 0)
	if via := req.FormValue("via"); via != "" {
		for _, vs := range strings.Split(via, ",") {
			if br, ok := blob.Parse(vs); ok {
				viaBlobs = append(viaBlobs, br)
			} else {
				return &shareError{code: invalidVia, response: badRequest, message: "Malformed blobref in via param"}
			}
		}
	}

	fetchChain := make([]blob.Ref, 0)
	fetchChain = append(fetchChain, viaBlobs...)
	fetchChain = append(fetchChain, blobRef)
	isTransitive := false
	for i, br := range fetchChain {
		switch i {
		case 0:
			file, size, err := fetcher.FetchStreaming(br)
			if err != nil {
				return unauthorized(shareFetchFailed, "Fetch chain 0 of %s failed: %v", br, err)
			}
			defer file.Close()
			if size > schema.MaxSchemaBlobSize {
				return unauthorized(shareBlobTooLarge, "Fetch chain 0 of %s too large", br)
			}
			blob, err := schema.BlobFromReader(br, file)
			if err != nil {
				return unauthorized(shareReadFailed, "Can't create a blob from %v: %v", br, err)
			}
			share, ok := blob.AsShare()
			if !ok {
				return unauthorized(shareBlobInvalid, "Fetch chain 0 of %s wasn't a valid Share", br)
			}
			if share.IsExpired() {
				return unauthorized(shareExpired, "Share is expired")
			}
			if len(fetchChain) > 1 && fetchChain[1].String() != share.Target().String() {
				return unauthorized(shareTargetInvalid,
					"Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
					br, fetchChain[1], share.Target())
			}
			isTransitive = share.IsTransitive()
			if len(fetchChain) > 2 && !isTransitive {
				return unauthorized(shareNotTransitive, "Share is not transitive")
			}
		case len(fetchChain) - 1:
			// Last one is fine (as long as its path up to here has been proven, and it's
			// not the first thing in the chain)
			continue
		default:
			file, _, err := fetcher.FetchStreaming(br)
			if err != nil {
				return unauthorized(viaChainFetchFailed, "Fetch chain %d of %s failed: %v", i, br, err)
			}
			defer file.Close()
			lr := io.LimitReader(file, schema.MaxSchemaBlobSize)
			slurpBytes, err := ioutil.ReadAll(lr)
			if err != nil {
				return unauthorized(viaChainReadFailed,
					"Fetch chain %d of %s failed in slurp: %v", i, br, err)
			}
			saught := fetchChain[i+1].String()
			if bytes.Index(slurpBytes, []byte(saught)) == -1 {
				return unauthorized(viaChainInvalidLink,
					"Fetch chain %d of %s failed; no reference to %s", i, br, saught)
			}
		}
	}

	if assemble, _ := strconv.ParseBool(req.FormValue("assemble")); assemble {
		if !isTransitive {
			return unauthorized(assembleNonTransitive, "Cannot assemble non-transitive share")
		}
		dh := &DownloadHandler{
			Fetcher: fetcher,
			// TODO(aa): It would be nice to specify a local cache here, as the UI handler does.
		}
		dh.ServeHTTP(conn, req, blobRef)
	} else {
		gethandler.ServeBlobRef(conn, req, blobRef, fetcher)
	}
	viaPathOkay = true
	return nil
}
Example #13
0
// Unauthenticated user.  Be paranoid.
func handleGetViaSharing(conn http.ResponseWriter, req *http.Request,
	blobRef blob.Ref, fetcher blob.StreamingFetcher) {
	if req.Method != "GET" && req.Method != "HEAD" {
		httputil.BadRequestError(conn, "Invalid method")
		return
	}

	viaPathOkay := false
	startTime := time.Now()
	defer func() {
		if !viaPathOkay {
			// Insert a delay, to hide timing attacks probing
			// for the existence of blobs.
			sleep := fetchFailureDelay - (time.Now().Sub(startTime))
			time.Sleep(sleep)
		}
	}()
	viaBlobs := make([]blob.Ref, 0)
	if via := req.FormValue("via"); via != "" {
		for _, vs := range strings.Split(via, ",") {
			if br, ok := blob.Parse(vs); ok {
				viaBlobs = append(viaBlobs, br)
			} else {
				httputil.BadRequestError(conn, "Malformed blobref in via param")
				return
			}
		}
	}

	fetchChain := make([]blob.Ref, 0)
	fetchChain = append(fetchChain, viaBlobs...)
	fetchChain = append(fetchChain, blobRef)
	for i, br := range fetchChain {
		switch i {
		case 0:
			file, size, err := fetcher.FetchStreaming(br)
			if err != nil {
				log.Printf("Fetch chain 0 of %s failed: %v", br.String(), err)
				auth.SendUnauthorized(conn, req)
				return
			}
			defer file.Close()
			if size > schema.MaxSchemaBlobSize {
				log.Printf("Fetch chain 0 of %s too large", br.String())
				auth.SendUnauthorized(conn, req)
				return
			}
			blob, err := schema.BlobFromReader(br, file)
			if err != nil {
				log.Printf("Can't create a blob from %v: %v", br.String(), err)
				auth.SendUnauthorized(conn, req)
				return
			}
			share, ok := blob.AsShare()
			if !ok {
				log.Printf("Fetch chain 0 of %s wasn't a valid Share", br.String())
				auth.SendUnauthorized(conn, req)
				return
			}
			if len(fetchChain) > 1 && fetchChain[1].String() != share.Target().String() {
				log.Printf("Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
					br.String(), fetchChain[1].String(), share.Target().String())
				auth.SendUnauthorized(conn, req)
				return
			}
		case len(fetchChain) - 1:
			// Last one is fine (as long as its path up to here has been proven, and it's
			// not the first thing in the chain)
			continue
		default:
			file, _, err := fetcher.FetchStreaming(br)
			if err != nil {
				log.Printf("Fetch chain %d of %s failed: %v", i, br.String(), err)
				auth.SendUnauthorized(conn, req)
				return
			}
			defer file.Close()
			lr := io.LimitReader(file, schema.MaxSchemaBlobSize)
			slurpBytes, err := ioutil.ReadAll(lr)
			if err != nil {
				log.Printf("Fetch chain %d of %s failed in slurp: %v", i, br.String(), err)
				auth.SendUnauthorized(conn, req)
				return
			}
			saught := fetchChain[i+1].String()
			if bytes.IndexAny(slurpBytes, saught) == -1 {
				log.Printf("Fetch chain %d of %s failed; no reference to %s",
					i, br.String(), saught)
				auth.SendUnauthorized(conn, req)
				return
			}
		}
	}

	viaPathOkay = true

	gethandler.ServeBlobRef(conn, req, blobRef, fetcher)
}