func (c *SQLiteHaveCache) StatBlobCache(br blob.Ref) (size int64, ok bool) {
	if !br.Valid() {
		return
	}
	// TODO(mpl): is it enough that we know it's a valid blobref to avoid any injection risk ?
	query := blobSizeQuery + fmt.Sprintf("'%v';\n", br.String())
	c.mu.Lock()
	defer c.mu.Unlock()
	err := c.startSQLiteChild()
	if err != nil {
		log.Fatalf("Could not start sqlite child process: %v", err)
	}
	_, err = c.w.Write([]byte(query))
	if err != nil {
		log.Fatalf("failed to query have cache: %v", err)
	}
	out, err := c.r.ReadString('\n')
	if err != nil {
		log.Fatalf("failed to read have cache query result: %v", err)
	}
	out = strings.TrimRight(out, "\n")
	if out == noResult {
		return
	}
	size, err = strconv.ParseInt(out, 10, 64)
	if err != nil {
		log.Fatalf("Bogus blob size in %v table: %v", haveTableName, err)
	}
	return size, true
}
Exemple #2
0
func (n *node) addEdge(dst blob.Ref) {
	if !dst.Valid() {
		return
	}
	n.g.startLoadNode(dst)
	n.edges = append(n.edges, dst)
}
Exemple #3
0
// NewFileReader returns a new FileReader reading the contents of fileBlobRef,
// fetching blobs from fetcher.  The fileBlobRef must be of a "bytes" or "file"
// schema blob.
//
// The caller should call Close on the FileReader when done reading.
func NewFileReader(fetcher blob.Fetcher, fileBlobRef blob.Ref) (*FileReader, error) {
	// TODO(bradfitz): rename this into bytes reader? but for now it's still
	//                 named FileReader, but can also read a "bytes" schema.
	if !fileBlobRef.Valid() {
		return nil, errors.New("schema/filereader: NewFileReader blobref invalid")
	}
	rc, _, err := fetcher.Fetch(fileBlobRef)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
	}
	defer rc.Close()
	ss, err := parseSuperset(rc)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err)
	}
	ss.BlobRef = fileBlobRef
	if ss.Type != "file" && ss.Type != "bytes" {
		return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type)
	}
	fr, err := ss.NewFileReader(fetcher)
	if err != nil {
		return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err)
	}
	return fr, nil
}
Exemple #4
0
// contToken is of forms:
//    ""                : start from beginning of zip files
//    "sha1-xxxxx:n"    : start at == (sha1-xxxx, file n), else next zip
func (st largeBlobStreamer) StreamBlobs(ctx context.Context, dest chan<- blobserver.BlobAndToken, contToken string) (err error) {
	defer close(dest)
	s := st.sto
	large := s.large

	var after string // for enumerateAll
	var skipFiles int
	var firstRef blob.Ref // first we care about

	if contToken != "" {
		f := strings.SplitN(contToken, ":", 2)
		if len(f) != 2 {
			return errContToken
		}
		firstRef, _ = blob.Parse(f[0])
		skipFiles, err = strconv.Atoi(f[1])
		if !firstRef.Valid() || err != nil {
			return errContToken
		}
		// EnumerateAllFrom takes a cursor that's greater, but
		// we want to start _at_ firstRef. So start
		// enumerating right before our target.
		after = firstRef.StringMinusOne()
	}
	return blobserver.EnumerateAllFrom(ctx, large, after, func(sb blob.SizedRef) error {
		if firstRef.Valid() {
			if sb.Ref.Less(firstRef) {
				// Skip.
				return nil
			}
			if firstRef.Less(sb.Ref) {
				skipFiles = 0 // reset it.
			}
		}
		fileN := 0
		return s.foreachZipBlob(sb.Ref, func(bap BlobAndPos) error {
			if skipFiles > 0 {
				skipFiles--
				fileN++
				return nil
			}
			select {
			case dest <- blobserver.BlobAndToken{
				Blob: blob.NewBlob(bap.Ref, bap.Size, func() types.ReadSeekCloser {
					return blob.NewLazyReadSeekCloser(s, bap.Ref)
				}),
				Token: fmt.Sprintf("%s:%d", sb.Ref, fileN),
			}:
				fileN++
				return nil
			case <-ctx.Done():
				return ctx.Err()
			}
		})
	})
}
Exemple #5
0
func (x *Index) AppendClaims(dst []camtypes.Claim, permaNode blob.Ref,
	signerFilter blob.Ref,
	attrFilter string) ([]camtypes.Claim, error) {
	if x.corpus != nil {
		return x.corpus.AppendClaims(dst, permaNode, signerFilter, attrFilter)
	}
	var (
		keyId string
		err   error
		it    sorted.Iterator
	)
	if signerFilter.Valid() {
		keyId, err = x.KeyId(signerFilter)
		if err == sorted.ErrNotFound {
			return nil, nil
		}
		if err != nil {
			return nil, err
		}
		it = x.queryPrefix(keyPermanodeClaim, permaNode, keyId)
	} else {
		it = x.queryPrefix(keyPermanodeClaim, permaNode)
	}
	defer closeIterator(it, &err)

	// In the common case, an attribute filter is just a plain
	// token ("camliContent") unescaped. If so, fast path that
	// check to skip the row before we even split it.
	var mustHave string
	if attrFilter != "" && urle(attrFilter) == attrFilter {
		mustHave = attrFilter
	}

	for it.Next() {
		val := it.Value()
		if mustHave != "" && !strings.Contains(val, mustHave) {
			continue
		}
		cl, ok := kvClaim(it.Key(), val, blob.Parse)
		if !ok {
			continue
		}
		if x.IsDeleted(cl.BlobRef) {
			continue
		}
		if attrFilter != "" && cl.Attr != attrFilter {
			continue
		}
		if signerFilter.Valid() && cl.Signer != signerFilter {
			continue
		}
		dst = append(dst, cl)
	}
	return dst, nil
}
Exemple #6
0
// PermanodeAttrValue returns a single-valued attribute or "".
func (c *Corpus) PermanodeAttrValue(permaNode blob.Ref,
	attr string,
	at time.Time,
	signerFilter blob.Ref) string {
	pm, ok := c.permanodes[permaNode]
	if !ok {
		return ""
	}
	if !signerFilter.Valid() && pm.canUseAttrs(at) {
		v := pm.Attrs[attr]
		if len(v) != 0 {
			return v[0]
		}
		return ""
	}
	if at.IsZero() {
		at = time.Now()
	}
	var v []string
	for _, cl := range pm.Claims {
		if cl.Attr != attr || cl.Date.After(at) {
			continue
		}
		if signerFilter.Valid() && signerFilter != cl.Signer {
			continue
		}
		switch cl.Type {
		case string(schema.DelAttributeClaim):
			if cl.Value == "" {
				v = v[:0]
			} else {
				i := 0
				for _, w := range v {
					if w != cl.Value {
						v[i] = w
						i++
					}
				}
				v = v[:i]
			}
		case string(schema.SetAttributeClaim):
			v = append(v[:0], cl.Value)
		case string(schema.AddAttributeClaim):
			v = append(v, cl.Value)
		}
	}
	if len(v) != 0 {
		return v[0]
	}
	return ""
}
Exemple #7
0
func (c *KvHaveCache) NoteBlobExists(br blob.Ref, size uint32) {
	if !br.Valid() {
		return
	}
	if size < 0 {
		log.Fatalf("Got a negative blob size to note in have cache for %v", br)
	}
	binBr, _ := br.MarshalBinary()
	binVal := []byte(strconv.Itoa(int(size)))
	cachelog.Printf("Adding to have cache %v: %q", br, binVal)
	if err := c.db.Put(binBr, binVal, nil); err != nil {
		log.Fatalf("Could not write %v in have cache: %v", br, err)
	}
}
Exemple #8
0
func (ss *superset) setFromBlobRef(fetcher blob.Fetcher, blobRef blob.Ref) error {
	if !blobRef.Valid() {
		return errors.New("schema/dirreader: blobref invalid")
	}
	ss.BlobRef = blobRef
	rc, _, err := fetcher.Fetch(blobRef)
	if err != nil {
		return fmt.Errorf("schema/dirreader: fetching schema blob %s: %v", blobRef, err)
	}
	defer rc.Close()
	if err := json.NewDecoder(rc).Decode(ss); err != nil {
		return fmt.Errorf("schema/dirreader: decoding schema blob %s: %v", blobRef, err)
	}
	return nil
}
Exemple #9
0
func (fi *FakeIndex) AppendClaims(ctx context.Context, dst []camtypes.Claim, permaNode blob.Ref,
	signerFilter blob.Ref,
	attrFilter string) ([]camtypes.Claim, error) {

	for _, cl := range fi.claims[permaNode] {
		if signerFilter.Valid() && cl.Signer != signerFilter {
			continue
		}
		if attrFilter != "" && cl.Attr != attrFilter {
			continue
		}
		dst = append(dst, cl)
	}
	return dst, nil
}
Exemple #10
0
// claimsIntfAttrValue finds the value of an attribute in a list of claims
// or empty string if not found. claims must be non-nil.
func claimsIntfAttrValue(claims claimsIntf, attr string, at time.Time, signerFilter blob.Ref) string {
	if claims == nil {
		panic("nil claims argument in claimsIntfAttrValue")
	}

	if at.IsZero() {
		at = time.Now()
	}

	// use a small static buffer as it speeds up
	// search.BenchmarkQueryPermanodeLocation by 6-7%
	// with go 1.7.1
	var buf [8]string
	v := buf[:][:0]

	for i := 0; i < claims.Len(); i++ {
		cl := claims.Claim(i)
		if cl.Attr != attr || cl.Date.After(at) {
			continue
		}
		if signerFilter.Valid() && signerFilter != cl.Signer {
			continue
		}
		switch cl.Type {
		case string(schema.DelAttributeClaim):
			if cl.Value == "" {
				v = v[:0]
			} else {
				i := 0
				for _, w := range v {
					if w != cl.Value {
						v[i] = w
						i++
					}
				}
				v = v[:i]
			}
		case string(schema.SetAttributeClaim):
			v = append(v[:0], cl.Value)
		case string(schema.AddAttributeClaim):
			v = append(v, cl.Value)
		}
	}
	if len(v) != 0 {
		return v[0]
	}
	return ""
}
Exemple #11
0
// AppendPermanodeAttrValues appends to dst all the values for the attribute
// attr set on permaNode.
// signerFilter is optional.
// dst must start with length 0 (laziness, mostly)
func (c *Corpus) AppendPermanodeAttrValues(dst []string,
	permaNode blob.Ref,
	attr string,
	at time.Time,
	signerFilter blob.Ref) []string {
	if len(dst) > 0 {
		panic("len(dst) must be 0")
	}
	pm, ok := c.permanodes[permaNode]
	if !ok {
		return dst
	}
	if values, ok := pm.valuesAtSigner(at, signerFilter); ok {
		return append(dst, values[attr]...)
	}
	if at.IsZero() {
		at = time.Now()
	}
	for _, cl := range pm.Claims {
		if cl.Attr != attr || cl.Date.After(at) {
			continue
		}
		if signerFilter.Valid() && signerFilter != cl.Signer {
			continue
		}
		switch cl.Type {
		case string(schema.DelAttributeClaim):
			if cl.Value == "" {
				dst = dst[:0] // delete all
			} else {
				for i := 0; i < len(dst); i++ {
					v := dst[i]
					if v == cl.Value {
						copy(dst[i:], dst[i+1:])
						dst = dst[:len(dst)-1]
						i--
					}
				}
			}
		case string(schema.SetAttributeClaim):
			dst = append(dst[:0], cl.Value)
		case string(schema.AddAttributeClaim):
			dst = append(dst, cl.Value)
		}
	}
	return dst
}
Exemple #12
0
func uploadString(bs blobserver.StatReceiver, br blob.Ref, s string) (blob.Ref, error) {
	if !br.Valid() {
		panic("invalid blobref")
	}
	hasIt, err := serverHasBlob(bs, br)
	if err != nil {
		return blob.Ref{}, err
	}
	if hasIt {
		return br, nil
	}
	_, err = blobserver.ReceiveNoHash(bs, br, strings.NewReader(s))
	if err != nil {
		return blob.Ref{}, err
	}
	return br, nil
}
Exemple #13
0
// HasSecureLinkTo returns true if there's a valid link from this blob
// to the other blob. This is used in access control (hence the
// somewhat redundant "Secure" in the name) and should be paranoid
// against e.g. random user/attacker-control attributes making links
// to other blobs.
//
// TODO: don't linear scan here.  rewrite this in terms of ResolvePrefixHop,
// passing down some policy perhaps?  or maybe that's enough.
func (b *DescribedBlob) HasSecureLinkTo(other blob.Ref) bool {
	if b == nil || !other.Valid() {
		return false
	}
	ostr := other.String()
	if b.Permanode != nil {
		if b.Permanode.Attr.Get("camliContent") == ostr {
			return true
		}
		for _, mstr := range b.Permanode.Attr["camliMember"] {
			if mstr == ostr {
				return true
			}
		}
	}
	return false
}
Exemple #14
0
func (c *SQLiteHaveCache) NoteBlobExists(br blob.Ref, size int64) {
	if size < 0 {
		log.Fatalf("Got a negative blob size to note in have cache")
	}
	if !br.Valid() {
		return
	}
	repl := strings.NewReplacer("?1", br.String(), "?2", fmt.Sprint(size))
	query := repl.Replace(noteHaveStmt)
	c.mu.Lock()
	defer c.mu.Unlock()
	err := c.startSQLiteChild()
	if err != nil {
		log.Fatalf("Could not start sqlite child process: %v", err)
	}
	_, err = c.w.Write([]byte(query))
	if err != nil {
		log.Fatalf("failed to write to have cache: %v", err)
	}
}
Exemple #15
0
func (c *KvHaveCache) StatBlobCache(br blob.Ref) (size int64, ok bool) {
	if !br.Valid() {
		return
	}
	binBr, _ := br.MarshalBinary()
	binVal, err := c.db.Get(nil, binBr)
	if err != nil {
		log.Fatalf("Could not query have cache %v for %v: %v", c.filename, br, err)
	}
	if binVal == nil {
		cachelog.Printf("have cache MISS on %v", br)
		return
	}
	val, err := strconv.Atoi(string(binVal))
	if err != nil {
		log.Fatalf("Could not decode have cache binary value for %v: %v", br, err)
	}
	cachelog.Printf("have cache HIT on %v", br)
	return int64(val), true
}
Exemple #16
0
func (c *Corpus) AppendClaims(ctx context.Context, dst []camtypes.Claim, permaNode blob.Ref,
	signerFilter blob.Ref,
	attrFilter string) ([]camtypes.Claim, error) {
	pm, ok := c.permanodes[permaNode]
	if !ok {
		return nil, nil
	}
	for _, cl := range pm.Claims {
		if c.IsDeleted(cl.BlobRef) {
			continue
		}
		if signerFilter.Valid() && cl.Signer != signerFilter {
			continue
		}
		if attrFilter != "" && cl.Attr != attrFilter {
			continue
		}
		dst = append(dst, *cl)
	}
	return dst, nil
}
Exemple #17
0
// serveNav serves some navigation links (prev, next, up) if the
// pr.subject is member of a collection (its parent has members).
// It is meant to be called from serveFile.
func (pr *publishRequest) serveNav() error {
	// first get the parent path and blob
	parentPath, parentbr, err := pr.parent()
	if err != nil {
		return fmt.Errorf("Errors building nav links for %s: %v", pr.subject, err)
	}
	parentNav := fmt.Sprintf("[<a href='%s'>up</a>]", strings.TrimSuffix(parentPath, resSeparator))

	// describe the parent so we get the siblings (members of the parent)
	dr := pr.ph.Search.NewDescribeRequest()
	dr.Describe(parentbr, 3)
	parentRes, err := dr.Result()
	if err != nil {
		return fmt.Errorf("Errors loading %s, permanode %s: %v, %#v", pr.req.URL, pr.subject, err, err)
	}
	members := parentRes[parentbr.String()].Members()
	if len(members) == 0 {
		pr.pf("<div class='camlifile'>[<a href='%s'>up</a>]</div>", parentNav)
		return nil
	}

	pos := 0
	var prev, next blob.Ref
	for k, member := range members {
		if member.BlobRef.String() == pr.subject.String() {
			pos = k
			break
		}
	}
	if pos > 0 {
		prev = members[pos-1].BlobRef
	}
	if pos < len(members)-1 {
		next = members[pos+1].BlobRef
	}
	if prev.Valid() || next.Valid() {
		var prevNav, nextNav string
		if prev.Valid() {
			prevNav = fmt.Sprintf("[<a href='%s/h%s'>prev</a>]",
				parentPath, prev.DigestPrefix(10))
		}
		if next.Valid() {
			nextNav = fmt.Sprintf("[<a href='%s/h%s'>next</a>]",
				parentPath, next.DigestPrefix(10))
		}
		pr.pf("<div class='camlifile'>%s %s %s</div>", parentNav, prevNav, nextNav)
	}

	return nil
}
Exemple #18
0
func (c *KvHaveCache) NoteBlobExists(br blob.Ref, size int64) {
	if !br.Valid() {
		return
	}
	if size < 0 {
		log.Fatalf("Got a negative blob size to note in have cache for %v", br)
	}
	binBr, _ := br.MarshalBinary()
	binVal := []byte(strconv.Itoa(int(size)))
	cachelog.Printf("Adding to have cache %v: %q", br, binVal)
	_, _, err := c.db.Put(nil, binBr,
		func(binBr, old []byte) ([]byte, bool, error) {
			// We do not overwrite dups
			if old != nil {
				return nil, false, nil
			}
			return binVal, true, nil
		})
	if err != nil {
		log.Fatalf("Could not write %v in have cache: %v", br, err)
	}
}
Exemple #19
0
func (pr *publishRequest) fileNavigation() (*publish.Nav, error) {
	// first get the parent path and blob
	parentPath, parentbr, err := pr.parent()
	if err != nil {
		return nil, fmt.Errorf("Could not get subject %v's parent's info: %v", pr.subject, err)
	}
	parentNav := strings.TrimSuffix(parentPath, resSeparator)
	fileNav := &publish.Nav{
		ParentPath: parentNav,
	}

	// describe the parent so we get the siblings (members of the parent)
	dr := pr.ph.Search.NewDescribeRequest()
	dr.Describe(parentbr, 3)
	parentRes, err := dr.Result()
	if err != nil {
		return nil, fmt.Errorf("Could not \"deeply\" describe subject %v's parent %v: %v", pr.subject, parentbr, err)
	}
	members := parentRes[parentbr.String()].Members()
	if len(members) == 0 {
		return fileNav, nil
	}

	pos := 0
	var prev, next blob.Ref
	for k, member := range members {
		if member.BlobRef.String() == pr.subject.String() {
			pos = k
			break
		}
	}
	if pos > 0 {
		prev = members[pos-1].BlobRef
	}
	if pos < len(members)-1 {
		next = members[pos+1].BlobRef
	}
	if !prev.Valid() && !next.Valid() {
		return fileNav, nil
	}
	if prev.Valid() {
		fileNav.PrevPath = fmt.Sprintf("%s/%s%s", parentPath, digestPrefix, prev.DigestPrefix(10))
	}
	if next.Valid() {
		fileNav.NextPath = fmt.Sprintf("%s/%s%s", parentPath, digestPrefix, next.DigestPrefix(10))
	}
	return fileNav, nil
}
Exemple #20
0
func (c *KvHaveCache) StatBlobCache(br blob.Ref) (size uint32, ok bool) {
	if !br.Valid() {
		return
	}
	binBr, _ := br.MarshalBinary()
	binVal, err := c.db.Get(binBr, nil)
	if err != nil {
		if err == leveldb.ErrNotFound {
			cachelog.Printf("have cache MISS on %v", br)
			return
		}
		log.Fatalf("Could not query have cache %v for %v: %v", c.filename, br, err)
	}
	val, err := strconv.ParseUint(string(binVal), 10, 32)
	if err != nil {
		log.Fatalf("Could not decode have cache binary value for %v: %v", br, err)
	}
	if val < 0 {
		log.Fatalf("Error decoding have cache binary value for %v: size=%d", br, val)
	}
	cachelog.Printf("have cache HIT on %v", br)
	return uint32(val), true
}
Exemple #21
0
// valuesAtSigner returns an attrValues to query permanode attr values at the
// given time for the signerFilter.
// It returns ok == true if v represents attrValues valid for the specified
// parameters.
// It returns (nil, false) if neither pm.attr nor pm.signer should be used for
// the given time, because e.g. some claims are more recent than this time. In
// which case, the caller should resort to querying another source, such as pm.Claims.
// If signerFilter is valid and pm has no attributes for it, (nil, true) is
// returned.
// The returned map must not be changed by the caller.
func (pm *PermanodeMeta) valuesAtSigner(at time.Time,
	signerFilter blob.Ref) (v attrValues, ok bool) {

	if pm.attr == nil {
		return nil, false
	}
	var m attrValues
	if signerFilter.Valid() {
		m = pm.signer[signerFilter]
		if m == nil {
			return nil, true
		}
	} else {
		m = pm.attr
	}
	if at.IsZero() {
		return m, true
	}
	if n := len(pm.Claims); n == 0 || !pm.Claims[n-1].Date.After(at) {
		return m, true
	}
	return nil, false
}
Exemple #22
0
// PermanodeAttrValueLocked returns a single-valued attribute or "".
func (c *Corpus) PermanodeAttrValueLocked(permaNode blob.Ref,
	attr string,
	at time.Time,
	signerFilter blob.Ref) string {
	pm, ok := c.permanodes[permaNode]
	if !ok {
		return ""
	}
	if at.IsZero() {
		at = time.Now()
	}
	var v string
	for _, cl := range pm.Claims {
		if cl.Attr != attr || cl.Date.After(at) {
			continue
		}
		if signerFilter.Valid() && signerFilter != cl.Signer {
			continue
		}
		switch cl.Type {
		case string(schema.DelAttributeClaim):
			if cl.Value == "" {
				v = ""
			} else if v == cl.Value {
				v = ""
			}
		case string(schema.SetAttributeClaim):
			v = cl.Value
		case string(schema.AddAttributeClaim):
			if v == "" {
				v = cl.Value
			}
		}
	}
	return v
}
Exemple #23
0
// BlobReader returns a new Blob from the provided Reader r,
// which should be the body of the provided blobref.
// Note: the hash checksum is not verified.
func BlobFromReader(ref blob.Ref, r io.Reader) (*Blob, error) {
	if !ref.Valid() {
		return nil, errors.New("schema.BlobFromReader: invalid blobref")
	}
	var buf bytes.Buffer
	tee := io.TeeReader(r, &buf)
	ss, err := parseSuperset(tee)
	if err != nil {
		return nil, err
	}
	var wb [16]byte
	afterObj := 0
	for {
		n, err := tee.Read(wb[:])
		afterObj += n
		for i := 0; i < n; i++ {
			if !isASCIIWhite(wb[i]) {
				return nil, fmt.Errorf("invalid bytes after JSON schema blob in %v", ref)
			}
		}
		if afterObj > MaxSchemaBlobSize {
			break
		}
		if err == io.EOF {
			break
		}
		if err != nil {
			return nil, err
		}
	}
	json := buf.String()
	if len(json) > MaxSchemaBlobSize {
		return nil, fmt.Errorf("schema: metadata blob %v is over expected limit; size=%d", ref, len(json))
	}
	return &Blob{ref, json, ss}, nil
}
Exemple #24
0
func NewBlobSniffer(ref blob.Ref) *BlobSniffer {
	if !ref.Valid() {
		panic("invalid ref")
	}
	return &BlobSniffer{br: ref}
}
Exemple #25
0
func main() {
	var conn *fuse.Conn

	// Scans the arg list and sets up flags
	client.AddFlags()
	flag.Parse()

	narg := flag.NArg()
	if narg > 2 {
		usage()
	}

	var mountPoint string
	var err error
	if narg > 0 {
		mountPoint = flag.Arg(0)
	} else {
		mountPoint, err = ioutil.TempDir("", "cammount")
		if err != nil {
			log.Fatal(err)
		}
		defer os.Remove(mountPoint)
	}

	errorf := func(msg string, args ...interface{}) {
		fmt.Fprintf(os.Stderr, msg, args...)
		fmt.Fprint(os.Stderr, "\n")
		usage()
	}

	var (
		cl    *client.Client
		root  blob.Ref // nil if only one arg
		camfs *fs.CamliFileSystem
	)
	if narg == 2 {
		rootArg := flag.Arg(1)
		// not trying very hard since NewFromShareRoot will do it better with a regex
		if strings.HasPrefix(rootArg, "http://") ||
			strings.HasPrefix(rootArg, "https://") {
			if client.ExplicitServer() != "" {
				errorf("Can't use an explicit blobserver with a share URL; the blobserver is implicit from the share URL.")
			}
			var err error
			cl, root, err = client.NewFromShareRoot(rootArg)
			if err != nil {
				log.Fatal(err)
			}
		} else {
			cl = client.NewOrFail() // automatic from flags
			var ok bool
			root, ok = blob.Parse(rootArg)
			if !ok {
				log.Fatalf("Error parsing root blobref: %q\n", rootArg)
			}
			cl.SetHTTPClient(&http.Client{Transport: cl.TransportForConfig(nil)})
		}
	} else {
		cl = client.NewOrFail() // automatic from flags
		cl.SetHTTPClient(&http.Client{Transport: cl.TransportForConfig(nil)})
	}

	diskCacheFetcher, err := cacher.NewDiskCache(cl)
	if err != nil {
		log.Fatalf("Error setting up local disk cache: %v", err)
	}
	defer diskCacheFetcher.Clean()
	if root.Valid() {
		var err error
		camfs, err = fs.NewRootedCamliFileSystem(diskCacheFetcher, root)
		if err != nil {
			log.Fatalf("Error creating root with %v: %v", root, err)
		}
	} else {
		camfs = fs.NewCamliFileSystem(cl, diskCacheFetcher)
	}

	if *debug {
		fuse.Debugf = log.Printf
		// TODO: set fs's logger
	}

	// This doesn't appear to work on OS X:
	sigc := make(chan os.Signal, 1)

	conn, err = fuse.Mount(mountPoint)
	if err != nil {
		if err.Error() == "cannot find load_fusefs" && runtime.GOOS == "darwin" {
			log.Fatal("FUSE not available; install from http://osxfuse.github.io/")
		}
		log.Fatalf("Mount: %v", err)
	}

	xtermDone := make(chan bool, 1)
	if *xterm {
		cmd := exec.Command("xterm")
		cmd.Dir = mountPoint
		if err := cmd.Start(); err != nil {
			log.Printf("Error starting xterm: %v", err)
		} else {
			go func() {
				cmd.Wait()
				xtermDone <- true
			}()
			defer cmd.Process.Kill()
		}
	}
	if *open {
		if runtime.GOOS == "darwin" {
			cmd := exec.Command("open", mountPoint)
			go cmd.Run()
		}
	}

	signal.Notify(sigc, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)

	doneServe := make(chan error, 1)
	go func() {
		doneServe <- conn.Serve(camfs)
	}()

	quitKey := make(chan bool, 1)
	go awaitQuitKey(quitKey)

	select {
	case err := <-doneServe:
		log.Printf("conn.Serve returned %v", err)
	case sig := <-sigc:
		log.Printf("Signal %s received, shutting down.", sig)
	case <-quitKey:
		log.Printf("Quit key pressed. Shutting down.")
	case <-xtermDone:
		log.Printf("xterm done")
	}

	time.AfterFunc(2*time.Second, func() {
		os.Exit(1)
	})
	log.Printf("Unmounting...")
	err = fs.Unmount(mountPoint)
	log.Printf("Unmount = %v", err)

	log.Printf("cammount FUSE process ending.")
}
Exemple #26
0
// The PermanodeConstraint matching of RelationConstraint.
func (rc *RelationConstraint) match(s *search, pn blob.Ref, at time.Time) (ok bool, err error) {
	corpus := s.h.corpus
	if corpus == nil {
		// TODO: care?
		return false, errors.New("RelationConstraint requires an in-memory corpus")
	}

	if rc.Relation != "parent" {
		panic("bogus")
	}

	var matcher matchFn
	if rc.Any != nil {
		matcher = rc.Any.matcher()
	} else {
		matcher = rc.All.matcher()
	}

	var anyGood bool
	var anyBad bool
	var lastChecked blob.Ref
	var permanodesChecked map[blob.Ref]bool // lazily created to optimize for common case of 1 match
	corpus.ForeachClaimBackLocked(pn, at, func(cl *camtypes.Claim) bool {
		if !rc.matchesAttr(cl.Attr) {
			return true // skip claim
		}
		if lastChecked.Valid() {
			if permanodesChecked == nil {
				permanodesChecked = make(map[blob.Ref]bool)
			}
			permanodesChecked[lastChecked] = true
			lastChecked = blob.Ref{} // back to zero
		}
		if permanodesChecked[cl.Permanode] {
			return true // skip checking
		}
		if !corpus.PermanodeHasAttrValueLocked(cl.Permanode, at, cl.Attr, cl.Value) {
			return true // claim once matched permanode, but no longer
		}

		var bm camtypes.BlobMeta
		bm, err = s.blobMeta(cl.Permanode)
		if err != nil {
			return false
		}
		var ok bool
		ok, err = matcher(s, cl.Permanode, bm)
		if err != nil {
			return false
		}
		if ok {
			anyGood = true
			if rc.Any != nil {
				return false // done. stop searching.
			}
		} else {
			anyBad = true
			if rc.All != nil {
				return false // fail fast
			}
		}
		lastChecked = cl.Permanode
		return true
	})
	if err != nil {
		return false, err
	}
	if rc.All != nil {
		return anyGood && !anyBad, nil
	}
	return anyGood, nil
}
Exemple #27
0
// trunc is a hint about which blob to truncate after. It may be zero.
// If the returned error is of type 'needsTruncatedAfterError', then
// the zip should be attempted to be written again, but truncating the
// data after the listed blob.
func (pk *packer) writeAZip(trunc blob.Ref) (err error) {
	defer func() {
		if e := recover(); e != nil {
			if v, ok := e.(error); ok && err == nil {
				err = v
			} else {
				panic(e)
			}
		}
	}()
	mf := Manifest{
		WholeRef:       pk.wholeRef,
		WholeSize:      pk.wholeSize,
		WholePartIndex: len(pk.zips),
	}
	var zbuf bytes.Buffer
	cw := &countWriter{w: &zbuf}
	zw := zip.NewWriter(cw)

	var approxSize = zipFixedOverhead // can't use zbuf.Len because zw buffers
	var dataRefsWritten []blob.Ref
	var dataBytesWritten int64
	var schemaBlobSeen = map[blob.Ref]bool{}
	var schemaBlobs []blob.Ref // to add after the main file

	baseFileName := pk.fr.FileName()
	if strings.Contains(baseFileName, "/") || strings.Contains(baseFileName, "\\") {
		return fmt.Errorf("File schema blob %v filename had a slash in it: %q", pk.fr.SchemaBlobRef(), baseFileName)
	}
	fh := &zip.FileHeader{
		Name:   baseFileName,
		Method: zip.Store, // uncompressed
	}
	fh.SetModTime(pk.fr.ModTime())
	fh.SetMode(0644)
	fw, err := zw.CreateHeader(fh)
	check(err)
	check(zw.Flush())
	dataStart := cw.n
	approxSize += zipPerEntryOverhead // for the first FileHeader w/ the data

	zipMax := pk.s.maxZipBlobSize()
	chunks := pk.chunksRemain
	chunkWholeHash := blob.NewHash()
	for len(chunks) > 0 {
		dr := chunks[0] // the next chunk to maybe write

		if trunc.Valid() && trunc == dr {
			if approxSize == 0 {
				return errors.New("first blob is too large to pack, once you add the zip overhead")
			}
			break
		}

		schemaBlobsSave := schemaBlobs
		for _, parent := range pk.schemaParent[dr] {
			if !schemaBlobSeen[parent] {
				schemaBlobSeen[parent] = true
				schemaBlobs = append(schemaBlobs, parent)
				approxSize += int(pk.schemaBlob[parent].Size()) + zipPerEntryOverhead
			}
		}

		thisSize := pk.dataSize[dr]
		approxSize += int(thisSize)
		if approxSize+mf.approxSerializedSize() > zipMax {
			if fn := testHookStopBeforeOverflowing; fn != nil {
				fn()
			}
			schemaBlobs = schemaBlobsSave // restore it
			break
		}

		// Copy the data to the zip.
		rc, size, err := pk.s.Fetch(dr)
		check(err)
		if size != thisSize {
			rc.Close()
			return errors.New("unexpected size")
		}
		if n, err := io.Copy(io.MultiWriter(fw, chunkWholeHash), rc); err != nil || n != int64(size) {
			rc.Close()
			return fmt.Errorf("copy to zip = %v, %v; want %v bytes", n, err, size)
		}
		rc.Close()

		dataRefsWritten = append(dataRefsWritten, dr)
		dataBytesWritten += int64(size)
		chunks = chunks[1:]
	}
	mf.DataBlobsOrigin = blob.RefFromHash(chunkWholeHash)

	// zipBlobs is where a schema or data blob is relative to the beginning
	// of the zip file.
	var zipBlobs []BlobAndPos

	var dataOffset int64
	for _, br := range dataRefsWritten {
		size := pk.dataSize[br]
		mf.DataBlobs = append(mf.DataBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataOffset})

		zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataStart + dataOffset})
		dataOffset += int64(size)
	}

	for _, br := range schemaBlobs {
		fw, err := zw.CreateHeader(&zip.FileHeader{
			Name:   "camlistore/" + br.String() + ".json",
			Method: zip.Store, // uncompressed
		})
		check(err)
		check(zw.Flush())
		b := pk.schemaBlob[br]
		zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: b.Size()}, cw.n})
		rc := b.Open()
		n, err := io.Copy(fw, rc)
		rc.Close()
		check(err)
		if n != int64(b.Size()) {
			return fmt.Errorf("failed to write all of schema blob %v: %d bytes, not wanted %d", br, n, b.Size())
		}
	}

	// Manifest file
	fw, err = zw.Create(zipManifestPath)
	check(err)
	enc, err := json.MarshalIndent(mf, "", "  ")
	check(err)
	_, err = fw.Write(enc)
	check(err)
	err = zw.Close()
	check(err)

	if zbuf.Len() > zipMax {
		// We guessed wrong. Back up. Find out how many blobs we went over.
		overage := zbuf.Len() - zipMax
		for i := len(dataRefsWritten) - 1; i >= 0; i-- {
			dr := dataRefsWritten[i]
			if overage <= 0 {
				return needsTruncatedAfterError{dr}
			}
			overage -= int(pk.dataSize[dr])
		}
		return errors.New("file is unpackable; first blob is too big to fit")
	}

	zipRef := blob.SHA1FromBytes(zbuf.Bytes())
	zipSB, err := blobserver.ReceiveNoHash(pk.s.large, zipRef, bytes.NewReader(zbuf.Bytes()))
	if err != nil {
		return err
	}

	bm := pk.s.meta.BeginBatch()
	bm.Set(fmt.Sprintf("%s%s:%d", wholeMetaPrefix, pk.wholeRef, len(pk.zips)),
		fmt.Sprintf("%s %d %d %d",
			zipRef,
			dataStart,
			pk.wholeBytesWritten,
			dataBytesWritten))

	pk.wholeBytesWritten += dataBytesWritten
	pk.zips = append(pk.zips, writtenZip{
		SizedRef: zipSB,
		dataRefs: dataRefsWritten,
	})

	for _, zb := range zipBlobs {
		bm.Set(blobMetaPrefix+zb.Ref.String(), fmt.Sprintf("%d %v %d", zb.Size, zipRef, zb.Offset))
	}
	if err := pk.s.meta.CommitBatch(bm); err != nil {
		return err
	}

	// Delete from small
	if !pk.s.skipDelete {
		toDelete := make([]blob.Ref, 0, len(dataRefsWritten)+len(schemaBlobs))
		toDelete = append(toDelete, dataRefsWritten...)
		toDelete = append(toDelete, schemaBlobs...)
		if err := pk.s.small.RemoveBlobs(toDelete); err != nil {
			// Can't really do anything about it and doesn't really matter, so
			// just log for now.
			pk.s.Logf("Error removing blobs from %s: %v", pk.s.small, err)
		}
	}

	// On success, consume the chunks we wrote from pk.chunksRemain.
	pk.chunksRemain = pk.chunksRemain[len(dataRefsWritten):]
	return nil
}
Exemple #28
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	var filebb *schema.Builder
	if up.fileOpts.contentsOnly {
		filebb = schema.NewFileMap("")
	} else {
		filebb = schema.NewCommonFileMap(n.fullPath, n.fi)
	}
	filebb.SetType("file")

	up.fdGate.Start()
	defer up.fdGate.Done()

	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	if !up.fileOpts.contentsOnly {
		if up.fileOpts.exifTime {
			ra, ok := file.(io.ReaderAt)
			if !ok {
				return nil, errors.New("Error asserting local file to io.ReaderAt")
			}
			modtime, err := schema.FileTime(ra)
			if err != nil {
				log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
			} else {
				filebb.SetModTime(modtime)
			}
		}
		if up.fileOpts.wantCapCtime() {
			filebb.CapCreationTime()
		}
	}

	var (
		size                           = n.fi.Size()
		fileContents io.Reader         = io.LimitReader(file, size)
		br           blob.Ref          // of file schemaref
		sum          string            // sha1 hashsum of the file to upload
		pr           *client.PutResult // of the final "file" schema blob
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			ok := false
			pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
			if ok {
				br = pr.BlobRef
				android.NoteFileUploaded(n.fullPath, !pr.Skipped)
				if up.fileOpts.wantVivify() {
					// we can return early in that case, because the other options
					// are disallowed in the vivify case.
					return pr, nil
				}
			}
		}
	}

	if up.fileOpts.wantVivify() {
		// If vivify wasn't already done in fileMapFromDuplicate.
		err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := filebb.JSON()
		if err != nil {
			return nil, err
		}
		br = blob.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  br,
			Size:     uint32(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		pr, err = up.Upload(h)
		if err != nil {
			return nil, err
		}
		android.NoteFileUploaded(n.fullPath, true)
		return pr, nil
	}

	if !br.Valid() {
		// br still zero means fileMapFromDuplicate did not find the file on the server,
		// and the file has not just been uploaded subsequently to a vivify request.
		// So we do the full file + file schema upload here.
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// The work for those planned permanodes (and the claims) is redone
	// everytime we get here (i.e past the stat cache). However, they're
	// caught by the have cache, so they won't be reuploaded for nothing
	// at least.
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime, ok := filebb.ModTime()
		if !ok {
			return nil, fmt.Errorf("couldn't get modtime for file %v", n.fullPath)
		}
		err = up.uploadFilePermanode(sum, br, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := filebb.JSON()
	pr = &client.PutResult{BlobRef: br, Size: uint32(len(json)), Skipped: false}
	return pr, nil
}
func CheckEnumerate(sto blobserver.Storage, wantUnsorted []blob.SizedRef, opts ...interface{}) error {
	var after string
	var n = 1000
	for _, opt := range opts {
		switch v := opt.(type) {
		case string:
			after = v
		case int:
			n = v
		default:
			panic("bad option of type " + fmt.Sprintf("%T", v))
		}
	}

	want := append([]blob.SizedRef(nil), wantUnsorted...)
	sort.Sort(blob.SizedByRef(want))

	sbc := make(chan blob.SizedRef, 10)

	var got []blob.SizedRef
	var grp syncutil.Group
	sawEnd := make(chan bool, 1)
	grp.Go(func() error {
		ctx := context.New()
		defer ctx.Cancel()
		if err := sto.EnumerateBlobs(ctx, sbc, after, n); err != nil {
			return fmt.Errorf("EnumerateBlobs(%q, %d): %v", after, n, err)
		}
		return nil
	})
	grp.Go(func() error {
		var lastRef blob.Ref
		for sb := range sbc {
			if !sb.Valid() {
				return fmt.Errorf("invalid blobref %#v received in enumerate", sb)
			}
			got = append(got, sb)
			if lastRef.Valid() && sb.Ref.Less(lastRef) {
				return fmt.Errorf("blobs appearing out of order")
			}
			lastRef = sb.Ref
		}
		sawEnd <- true
		return nil

	})
	grp.Go(func() error {
		select {
		case <-sawEnd:
			return nil
		case <-time.After(10 * time.Second):
			return errors.New("timeout waiting for EnumerateBlobs to close its channel")
		}

	})
	if err := grp.Err(); err != nil {
		return fmt.Errorf("Enumerate error: %v", err)
	}
	if len(got) == 0 && len(want) == 0 {
		return nil
	}
	var gotSet = map[blob.SizedRef]bool{}
	for _, sb := range got {
		if gotSet[sb] {
			return fmt.Errorf("duplicate blob %v returned in enumerate", sb)
		}
		gotSet[sb] = true
	}

	if !reflect.DeepEqual(got, want) {
		return fmt.Errorf("Enumerate mismatch. Got %d; want %d.\n Got: %v\nWant: %v\n",
			len(got), len(want), got, want)
	}
	return nil
}
Exemple #30
0
func (m MetaMap) Get(br blob.Ref) *DescribedBlob {
	if !br.Valid() {
		return nil
	}
	return m[br.String()]
}