Beispiel #1
0
func (x *Index) GetImageInfo(fileRef *blobref.BlobRef) (*search.ImageInfo, error) {
	// it might be that the key does not exist because image.DecodeConfig failed earlier
	// (because of unsupported JPEG features like progressive mode).
	key := keyImageSize.Key(fileRef.String())
	dim, err := x.s.Get(key)
	if err == ErrNotFound {
		err = os.ErrNotExist
	}
	if err != nil {
		return nil, err
	}
	valPart := strings.Split(dim, "|")
	if len(valPart) != 2 {
		return nil, fmt.Errorf("index: bogus key %q = %q", key, dim)
	}
	width, err := strconv.Atoi(valPart[0])
	if err != nil {
		return nil, fmt.Errorf("index: bogus integer at position 0 in key %q: %q", key, valPart[0])
	}
	height, err := strconv.Atoi(valPart[1])
	if err != nil {
		return nil, fmt.Errorf("index: bogus integer at position 1 in key %q: %q", key, valPart[1])
	}

	imgInfo := &search.ImageInfo{
		Width:  width,
		Height: height,
	}
	return imgInfo, nil
}
Beispiel #2
0
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) *Builder {
	bb := base(1, "share")
	bb.m["authType"] = authType
	bb.m["target"] = target.String()
	bb.m["transitive"] = transitive
	return bb
}
Beispiel #3
0
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) map[string]interface{} {
	m := newCamliMap(1, "share")
	m["authType"] = authType
	m["target"] = target.String()
	m["transitive"] = transitive
	return m
}
Beispiel #4
0
func newClaim(permaNode *blobref.BlobRef, t time.Time, claimType string) *Builder {
	bb := newMap(1, "claim")
	bb.m["permaNode"] = permaNode.String()
	bb.m["claimType"] = claimType
	bb.SetClaimDate(t)
	return bb
}
Beispiel #5
0
// populateMutation populates keys & values into the provided BatchMutation.
//
// the blobref can be trusted at this point (it's been fully consumed
// and verified to match), and the sniffer has been populated.
func (ix *Index) populateMutation(br *blobref.BlobRef, sniffer *BlobSniffer, bm BatchMutation) error {
	bm.Set("have:"+br.String(), fmt.Sprintf("%d", sniffer.Size()))
	bm.Set("meta:"+br.String(), fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MimeType()))

	if camli, ok := sniffer.Superset(); ok {
		switch camli.Type {
		case "claim":
			if err := ix.populateClaim(br, camli, sniffer, bm); err != nil {
				return err
			}
		case "permanode":
			//if err := mi.populatePermanode(blobRef, camli, bm); err != nil {
			//return err
			//}
		case "file":
			if err := ix.populateFile(br, camli, bm); err != nil {
				return err
			}
		case "directory":
			if err := ix.populateDir(br, camli, bm); err != nil {
				return err
			}
		}
	}
	return nil
}
Beispiel #6
0
func (c *FlatHaveCache) NoteBlobExists(br *blobref.BlobRef, size int64) {
	c.mu.Lock()
	defer c.mu.Unlock()
	if size < 0 {
		panic("negative size")
	}
	k := br.String()
	if c.m[k] == size {
		// dup
		return
	}
	c.m[k] = size

	if c.af == nil {
		var err error
		c.af, err = os.OpenFile(c.filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
		if err != nil {
			log.Printf("opening have-cache for append: %v", err)
			return
		}
	}
	// TODO: flocking. see leveldb-go.
	c.af.Seek(0, os.SEEK_END)
	c.af.Write([]byte(fmt.Sprintf("%s %d\n", k, size)))
}
Beispiel #7
0
// Errors returned are:
//    os.ErrNotExist -- blob not found
//    os.ErrInvalid -- not JSON or a camli schema blob
func (fs *CamliFileSystem) fetchSchemaSuperset(br *blobref.BlobRef) (*schema.Superset, error) {
	blobStr := br.String()
	if ss, ok := fs.blobToSchema.Get(blobStr); ok {
		return ss.(*schema.Superset), nil
	}

	rsc, _, err := fs.fetcher.Fetch(br)
	if err != nil {
		return nil, err
	}
	defer rsc.Close()
	jd := json.NewDecoder(rsc)
	ss := new(schema.Superset)
	err = jd.Decode(ss)
	if err != nil {
		log.Printf("Error parsing %s as schema blob: %v", br, err)
		return nil, os.ErrInvalid
	}
	if ss.Type == "" {
		log.Printf("blob %s is JSON but lacks camliType", br)
		return nil, os.ErrInvalid
	}
	ss.BlobRef = br
	fs.blobToSchema.Add(blobStr, ss)
	return ss, nil
}
Beispiel #8
0
func (c *SQLiteHaveCache) StatBlobCache(br *blobref.BlobRef) (size int64, ok bool) {
	if br == nil {
		return
	}
	// TODO(mpl): is it enough that we know it's a valid blobref to avoid any injection risk ?
	query := blobSizeQuery + fmt.Sprintf("'%v';\n", br.String())
	c.mu.Lock()
	defer c.mu.Unlock()
	err := c.startSQLiteChild()
	if err != nil {
		log.Fatalf("Could not start sqlite child process: %v", err)
	}
	_, err = c.w.Write([]byte(query))
	if err != nil {
		log.Fatalf("failed to query have cache: %v", err)
	}
	out, err := c.r.ReadString('\n')
	if err != nil {
		log.Fatalf("failed to read have cache query result: %v", err)
	}
	out = strings.TrimRight(out, "\n")
	if out == noResult {
		return
	}
	size, err = strconv.ParseInt(out, 10, 64)
	if err != nil {
		log.Fatalf("Bogus blob size in %v table: %v", haveTableName, err)
	}
	return size, true
}
Beispiel #9
0
func NewClaim(permaNode *blobref.BlobRef, claimType string) map[string]interface{} {
	m := newCamliMap(1, "claim")
	m["permaNode"] = permaNode.String()
	m["claimType"] = claimType
	m["claimDate"] = RFC3339FromTime(time.Now())
	return m
}
Beispiel #10
0
// ScaledCached reads the scaled version of the image in file,
// if it is in cache. On success, the image format is returned.
func (ih *ImageHandler) scaledCached(buf *bytes.Buffer, file *blobref.BlobRef) (format string, err error) {
	name := cacheKey(file.String(), ih.MaxWidth, ih.MaxHeight)
	br, err := ih.sc.Get(name)
	if err != nil {
		return format, fmt.Errorf("%v: %v", name, err)
	}
	fr, err := ih.cached(br)
	if err != nil {
		return format, fmt.Errorf("No cache hit for %v: %v", br, err)
	}
	_, err = io.Copy(buf, fr)
	if err != nil {
		return format, fmt.Errorf("error reading cached thumbnail %v: %v", name, err)
	}
	mime := magic.MimeType(buf.Bytes())
	if mime == "" {
		return format, fmt.Errorf("error with cached thumbnail %v: unknown mime type", name)
	}
	pieces := strings.Split(mime, "/")
	if len(pieces) < 2 {
		return format, fmt.Errorf("error with cached thumbnail %v: bogus mime type", name)
	}
	if pieces[0] != "image" {
		return format, fmt.Errorf("error with cached thumbnail %v: not an image", name)
	}
	return pieces[1], nil
}
Beispiel #11
0
func (h *SimpleBlobHub) NotifyBlobReceived(blob *blobref.BlobRef) {
	h.l.Lock()
	defer h.l.Unlock()

	// Callback channels to notify, nil until non-empty
	var notify []chan *blobref.BlobRef

	// Append global listeners
	for ch, _ := range h.listeners {
		notify = append(notify, ch)
	}

	// Append blob-specific listeners
	if h.blobListeners != nil {
		blobstr := blob.String()
		if set, ok := h.blobListeners[blobstr]; ok {
			for ch, _ := range set {
				notify = append(notify, ch)
			}
		}
	}

	// Run in a separate Goroutine so NotifyBlobReceived doesn't block
	// callers if callbacks are slow.
	go func() {
		for _, ch := range notify {
			ch <- blob
		}
	}()
}
Beispiel #12
0
// Given a blobref and a few hex characters of the digest of the next hop, return the complete
// blobref of the prefix, if that's a valid next hop.
func (sh *Handler) ResolvePrefixHop(parent *blobref.BlobRef, prefix string) (child *blobref.BlobRef, err error) {
	// TODO: this is a linear scan right now. this should be
	// optimized to use a new database table of members so this is
	// a quick lookup.  in the meantime it should be in memcached
	// at least.
	if len(prefix) < 8 {
		return nil, fmt.Errorf("Member prefix %q too small", prefix)
	}
	dr := sh.NewDescribeRequest()
	dr.Describe(parent, 1)
	res, err := dr.Result()
	if err != nil {
		return
	}
	des, ok := res[parent.String()]
	if !ok {
		return nil, fmt.Errorf("Failed to describe member %q in parent %q", prefix, parent)
	}
	if des.Permanode != nil {
		if cr, ok := des.ContentRef(); ok && strings.HasPrefix(cr.Digest(), prefix) {
			return cr, nil
		}
		for _, member := range des.Members() {
			if strings.HasPrefix(member.BlobRef.Digest(), prefix) {
				return member.BlobRef, nil
			}
		}
	}
	return nil, fmt.Errorf("Member prefix %q not found in %q", prefix, parent)
}
Beispiel #13
0
func (fi *FakeIndex) AddClaim(owner, permanode *blobref.BlobRef, claimType, attr, value string) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	date := fi.nextDate()

	claim := &search.Claim{
		Permanode: permanode,
		Signer:    nil,
		BlobRef:   nil,
		Date:      date,
		Type:      claimType,
		Attr:      attr,
		Value:     value,
	}
	key := permanode.String() + "/" + owner.String()
	fi.ownerClaims[key] = append(fi.ownerClaims[key], claim)

	if claimType == "set-attribute" && strings.HasPrefix(attr, "camliPath:") {
		suffix := attr[len("camliPath:"):]
		path := &search.Path{
			Target: blobref.MustParse(value),
			Suffix: suffix,
		}
		fi.path[fmt.Sprintf("%s\x00%s\x00%s", owner, permanode, suffix)] = path
	}
}
Beispiel #14
0
func newClaim(permaNode *blobref.BlobRef, t time.Time, claimType string) Map {
	m := newMap(1, "claim")
	m["permaNode"] = permaNode.String()
	m["claimType"] = claimType
	m.SetClaimDate(t)
	return m
}
Beispiel #15
0
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) Map {
	m := newMap(1, "share")
	m["authType"] = authType
	m["target"] = target.String()
	m["transitive"] = transitive
	return m
}
Beispiel #16
0
func (fr *FileReader) getSuperset(br *blobref.BlobRef) (*Superset, error) {
	if root := fr.rootReader(); root != fr {
		return root.getSuperset(br)
	}
	brStr := br.String()
	ssi, err := fr.sfg.Do(brStr, func() (interface{}, error) {
		fr.ssmmu.Lock()
		ss, ok := fr.ssm[brStr]
		fr.ssmmu.Unlock()
		if ok {
			return ss, nil
		}
		rsc, _, err := fr.fetcher.Fetch(br)
		if err != nil {
			return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
		}
		defer rsc.Close()
		ss, err = ParseSuperset(rsc)
		if err != nil {
			return nil, err
		}
		fr.ssmmu.Lock()
		defer fr.ssmmu.Unlock()
		fr.ssm[brStr] = ss
		return ss, nil
	})
	if err != nil {
		return nil, err
	}
	return ssi.(*Superset), nil
}
Beispiel #17
0
func (c *Client) FetchVia(b *blobref.BlobRef, v []*blobref.BlobRef) (io.ReadCloser, int64, error) {
	pfx, err := c.prefix()
	if err != nil {
		return nil, 0, err
	}
	url := fmt.Sprintf("%s/camli/%s", pfx, b)

	if len(v) > 0 {
		buf := bytes.NewBufferString(url)
		buf.WriteString("?via=")
		for i, br := range v {
			if i != 0 {
				buf.WriteString(",")
			}
			buf.WriteString(br.String())
		}
		url = buf.String()
	}

	req := c.newRequest("GET", url)
	resp, err := c.httpClient.Do(req)
	if err != nil {
		return nil, 0, err
	}

	if resp.StatusCode != 200 {
		return nil, 0, errors.New(fmt.Sprintf("Got status code %d from blobserver for %s", resp.StatusCode, b))
	}

	size := resp.ContentLength
	if size == -1 {
		return nil, 0, errors.New("blobserver didn't return a Content-Length for blob")
	}

	if c.via == nil {
		// Not in sharing mode, so return immediately.
		return resp.Body, size, nil
	}

	// Slurp 1 MB to find references to other blobrefs for the via path.
	const maxSlurp = 1 << 20
	var buf bytes.Buffer
	_, err = io.Copy(&buf, io.LimitReader(resp.Body, maxSlurp))
	if err != nil {
		return nil, 0, err
	}
	// If it looks like a JSON schema blob (starts with '{')
	if schema.LikelySchemaBlob(buf.Bytes()) {
		for _, blobstr := range blobsRx.FindAllString(buf.String(), -1) {
			c.via[blobstr] = b.String()
		}
	}
	// Read from the multireader, but close the HTTP response body.
	type rc struct {
		io.Reader
		io.Closer
	}
	return rc{io.MultiReader(&buf, resp.Body), resp.Body}, size, nil
}
Beispiel #18
0
func (dr *DescribeRequest) DescribeSync(br *blobref.BlobRef) (*DescribedBlob, error) {
	dr.Describe(br, 1)
	res, err := dr.Result()
	if err != nil {
		return nil, err
	}
	return res[br.String()], nil
}
Beispiel #19
0
func (tf *Fetcher) BlobContents(br *blobref.BlobRef) (contents string, ok bool) {
	tf.l.Lock()
	defer tf.l.Unlock()
	b, ok := tf.m[br.String()]
	if !ok {
		return
	}
	return b.Contents, true
}
Beispiel #20
0
func (dr *DescribeRequest) addError(br *blobref.BlobRef, err error) {
	if err == nil {
		return
	}
	dr.mu.Lock()
	defer dr.mu.Unlock()
	// TODO: append? meh.
	dr.errs[br.String()] = err
}
Beispiel #21
0
func (n *mutFile) setContent(br *blobref.BlobRef, size int64) error {
	n.mu.Lock()
	defer n.mu.Unlock()
	n.content = br
	n.size = size
	claim := schema.NewSetAttributeClaim(n.permanode, "camliContent", br.String())
	_, err := n.fs.client.UploadAndSignBlob(claim)
	return err
}
Beispiel #22
0
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) {
	if req.Method != "GET" && req.Method != "HEAD" {
		http.Error(rw, "Invalid download method", 400)
		return
	}
	if req.Header.Get("If-Modified-Since") != "" {
		// Immutable, so any copy's a good copy.
		rw.WriteHeader(http.StatusNotModified)
		return
	}

	fr, err := schema.NewFileReader(dh.storageSeekFetcher(), file)
	if err != nil {
		http.Error(rw, "Can't serve file: "+err.Error(), 500)
		return
	}
	defer fr.Close()

	schema := fr.FileSchema()
	h := rw.Header()
	h.Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize()))
	h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat))

	mimeType := magic.MIMETypeFromReaderAt(fr)
	if dh.ForceMime != "" {
		mimeType = dh.ForceMime
	}
	if mimeType == "" {
		mimeType = "application/octet-stream"
	}
	h.Set("Content-Type", mimeType)
	if mimeType == "application/octet-stream" {
		// Chrome seems to silently do nothing on
		// application/octet-stream unless this is set.
		// Maybe it's confused by lack of URL it recognizes
		// along with lack of mime type?
		rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat")
	}

	if req.Method == "HEAD" && req.FormValue("verifycontents") != "" {
		vbr := blobref.Parse(req.FormValue("verifycontents"))
		if vbr == nil {
			return
		}
		hash := vbr.Hash()
		if hash == nil {
			return
		}
		io.Copy(hash, fr) // ignore errors, caught later
		if vbr.HashMatches(hash) {
			rw.Header().Set("X-Camli-Contents", vbr.String())
		}
		return
	}

	http.ServeContent(rw, req, "", time.Now(), fr)
}
Beispiel #23
0
func (fi *FakeIndex) GetBlobMimeType(blob *blobref.BlobRef) (mime string, size int64, err error) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	bs := blob.String()
	mime, ok := fi.mimeType[bs]
	if !ok {
		return "", 0, os.ErrNotExist
	}
	return mime, fi.size[bs], nil
}
Beispiel #24
0
// returns os.ErrNotExist on cache miss
func (s *storage) fetchMeta(b *blobref.BlobRef) (*metaValue, error) {
	v, err := s.index.Get(b.String())
	if err == index.ErrNotFound {
		err = os.ErrNotExist
	}
	if err != nil {
		return nil, err
	}
	return parseMetaValue(v)
}
Beispiel #25
0
func fetch(src blobref.StreamingFetcher, br *blobref.BlobRef) (r io.ReadCloser, err error) {
	if *flagVerbose {
		log.Printf("Fetching %s", br.String())
	}
	r, _, err = src.FetchStreaming(br)
	if err != nil {
		return nil, fmt.Errorf("Failed to fetch %s: %s", br, err)
	}
	return r, err
}
Beispiel #26
0
// uploadFilePermanode creates and uploads the planned permanode (with sum as a
// fixed key) associated with the file blobref fileRef.
// It also sets the optional tags for this permanode.
func (up *Uploader) uploadFilePermanode(sum string, fileRef *blobref.BlobRef, claimTime time.Time) error {
	// Use a fixed time value for signing; not using modtime
	// so two identical files don't have different modtimes?
	// TODO(bradfitz): consider this more?
	permaNodeSigTime := time.Unix(0, 0)
	permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
	if err != nil {
		return fmt.Errorf("Error uploading planned permanode: %v", err)
	}
	handleResult("node-permanode", permaNode, nil)

	contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", fileRef.String())
	contentAttr.SetClaimDate(claimTime)
	signed, err := up.SignBlob(contentAttr, claimTime)
	if err != nil {
		return fmt.Errorf("Failed to sign content claim: %v", err)
	}
	put, err := up.uploadString(signed)
	if err != nil {
		return fmt.Errorf("Error uploading permanode's attribute: %v", err)
	}

	handleResult("node-permanode-contentattr", put, nil)
	if tags := up.fileOpts.tags(); len(tags) > 0 {
		errch := make(chan error)
		for _, tag := range tags {
			go func(tag string) {
				m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag)
				m.SetClaimDate(claimTime)
				signed, err := up.SignBlob(m, claimTime)
				if err != nil {
					errch <- fmt.Errorf("Failed to sign tag claim: %v", err)
					return
				}
				put, err := up.uploadString(signed)
				if err != nil {
					errch <- fmt.Errorf("Error uploading permanode's tag attribute %v: %v", tag, err)
					return
				}
				handleResult("node-permanode-tag", put, nil)
				errch <- nil
			}(tag)
		}

		for _ = range tags {
			if e := <-errch; e != nil && err == nil {
				err = e
			}
		}
		if err != nil {
			return err
		}
	}
	return nil
}
Beispiel #27
0
func (ih *ImageHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) {
	if req.Method != "GET" && req.Method != "HEAD" {
		http.Error(rw, "Invalid method", 400)
		return
	}
	mw, mh := ih.MaxWidth, ih.MaxHeight
	if mw == 0 || mh == 0 || mw > 2000 || mh > 2000 {
		http.Error(rw, "bogus dimensions", 400)
		return
	}

	var buf bytes.Buffer
	var err error
	format := ""
	cacheHit := false
	if ih.sc != nil {
		format, err = ih.scaledCached(&buf, file)
		if err != nil {
			log.Printf("image resize: %v", err)
		} else {
			cacheHit = true
		}
	}

	if !cacheHit {
		format, err = ih.scaleImage(&buf, file)
		if err != nil {
			http.Error(rw, err.Error(), 500)
			return
		}
		if ih.sc != nil {
			name := cacheKey(file.String(), mw, mh)
			bufcopy := buf.Bytes()
			err = ih.cacheScaled(bytes.NewBuffer(bufcopy), name)
			if err != nil {
				log.Printf("image resize: %v", err)
			}
		}
	}

	rw.Header().Set("Content-Type", imageContentTypeOfFormat(format))
	size := buf.Len()
	rw.Header().Set("Content-Length", fmt.Sprintf("%d", size))
	n, err := io.Copy(rw, &buf)
	if err != nil {
		log.Printf("error serving thumbnail of file schema %s: %v", file, err)
		return
	}
	if n != int64(size) {
		log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d",
			file, n, size)
		return
	}
}
Beispiel #28
0
func (dr *DescribeRequest) describedBlob(b *blobref.BlobRef) *DescribedBlob {
	dr.mu.Lock()
	defer dr.mu.Unlock()
	bs := b.String()
	if des, ok := dr.m[bs]; ok {
		return des
	}
	des := &DescribedBlob{Request: dr, BlobRef: b}
	dr.m[bs] = des
	return des
}
Beispiel #29
0
func (b *DescribedBlob) PeerBlob(br *blobref.BlobRef) *DescribedBlob {
	if b.Request == nil {
		return &DescribedBlob{BlobRef: br, Stub: true}
	}
	b.Request.lk.Lock()
	defer b.Request.lk.Unlock()
	if peer, ok := b.Request.m[br.String()]; ok {
		return peer
	}
	return &DescribedBlob{Request: b.Request, BlobRef: br, Stub: true}
}
Beispiel #30
0
func (cf *CachingFetcher) faultIn(br *blobref.BlobRef) error {
	_, err := cf.g.Do(br.String(), func() (interface{}, error) {
		sblob, _, err := cf.sf.FetchStreaming(br)
		if err != nil {
			return nil, err
		}
		defer sblob.Close()
		_, err = cf.c.ReceiveBlob(br, sblob)
		return nil, err
	})
	return err
}