Exemple #1
0
func (mi *Indexer) GetOwnerClaims(permanode, owner *blobref.BlobRef) (claims search.ClaimList, err os.Error) {
	claims = make(search.ClaimList, 0)

	// TODO: ignore rows where unverified = 'N'
	rs, err := mi.db.Query("SELECT blobref, date, claim, attr, value FROM claims WHERE permanode = ? AND signer = ?",
		permanode.String(), owner.String())
	if err != nil {
		return
	}
	defer rs.Close()

	var row claimsRow
	for rs.Next() {
		err = rs.Scan(&row.blobref, &row.date, &row.claim, &row.attr, &row.value)
		if err != nil {
			return
		}
		t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(row.date))
		if err != nil {
			log.Printf("Skipping; error parsing time %q: %v", row.date, err)
			continue
		}
		claims = append(claims, &search.Claim{
			BlobRef:   blobref.Parse(row.blobref),
			Signer:    owner,
			Permanode: permanode,
			Type:      row.claim,
			Date:      t,
			Attr:      row.attr,
			Value:     row.value,
		})
	}
	return
}
Exemple #2
0
func (c *FlatHaveCache) NoteBlobExists(br *blobref.BlobRef) {
	c.mu.Lock()
	defer c.mu.Unlock()
	k := br.String()
	c.m[k] = true
	c.dirty[k] = true
}
Exemple #3
0
func (fi *FakeIndex) AddClaim(owner, permanode *blobref.BlobRef, claimType, attr, value string) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	date := fi.nextDate()

	claim := &search.Claim{
		Permanode: permanode,
		Signer:    nil,
		BlobRef:   nil,
		Date:      date,
		Type:      claimType,
		Attr:      attr,
		Value:     value,
	}
	key := permanode.String() + "/" + owner.String()
	fi.ownerClaims[key] = append(fi.ownerClaims[key], claim)

	if claimType == "set-attribute" && strings.HasPrefix(attr, "camliPath:") {
		suffix := attr[len("camliPath:"):]
		path := &search.Path{
			Target: blobref.MustParse(value),
			Suffix: suffix,
		}
		fi.path[fmt.Sprintf("%s\x00%s\x00%s", owner, permanode, suffix)] = path
	}
}
Exemple #4
0
func (fs *CamliFileSystem) fetchSchemaSuperset(br *blobref.BlobRef) (*schema.Superset, os.Error) {
	blobStr := br.String()
	if ss, ok := fs.blobToSchema.Get(blobStr); ok {
		return ss.(*schema.Superset), nil
	}
	log.Printf("schema cache MISS on %q", blobStr)

	rsc, _, err := fs.fetcher.Fetch(br)
	if err != nil {
		return nil, err
	}
	defer rsc.Close()
	jd := json.NewDecoder(rsc)
	ss := new(schema.Superset)
	err = jd.Decode(ss)
	if err != nil {
		log.Printf("Error parsing %s as schema blob: %v", br, err)
		return nil, os.EINVAL
	}
	if ss.Type == "" {
		log.Printf("blob %s is JSON but lacks camliType", br)
		return nil, os.EINVAL
	}
	ss.BlobRef = br
	fs.blobToSchema.Add(blobStr, ss)
	return ss, nil
}
Exemple #5
0
func (mi *Indexer) PathsLookup(signer, base *blobref.BlobRef, suffix string) (paths []*search.Path, err os.Error) {
	keyId, err := mi.keyIdOfSigner(signer)
	if err != nil {
		return
	}
	rs, err := mi.db.Query("SELECT claimref, claimdate, targetref FROM path "+
		"WHERE keyid=? AND baseref=? AND suffix=?",
		keyId, base.String(), suffix)
	if err != nil {
		return
	}
	defer rs.Close()

	var claimref, claimdate, targetref string
	for rs.Next() {
		if err = rs.Scan(&claimref, &claimdate, &targetref); err != nil {
			return
		}
		t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(claimdate))
		if err != nil {
			log.Printf("Skipping bogus path row with bad time: %q", claimref)
			continue
		}
		_ = t // TODO: use this?
		paths = append(paths, &search.Path{
			Claim:     blobref.Parse(claimref),
			ClaimDate: claimdate,
			Base:      base,
			Target:    blobref.Parse(targetref),
			Suffix:    suffix,
		})
	}
	return
}
Exemple #6
0
// Given a blobref and a few hex characters of the digest of the next hop, return the complete
// blobref of the prefix, if that's a valid next hop.
func (sh *Handler) ResolvePrefixHop(parent *blobref.BlobRef, prefix string) (child *blobref.BlobRef, err os.Error) {
	// TODO: this is a linear scan right now. this should be
	// optimized to use a new database table of members so this is
	// a quick lookup.  in the meantime it should be in memcached
	// at least.
	if len(prefix) < 8 {
		return nil, fmt.Errorf("Member prefix %q too small", prefix)
	}
	dr := sh.NewDescribeRequest()
	dr.Describe(parent, 1)
	res, err := dr.Result()
	if err != nil {
		return
	}
	des, ok := res[parent.String()]
	if !ok {
		return nil, fmt.Errorf("Failed to describe member %q in parent %q", prefix, parent)
	}
	if des.Permanode != nil {
		if cr, ok := des.ContentRef(); ok && strings.HasPrefix(cr.Digest(), prefix) {
			return cr, nil
		}
		for _, member := range des.Members() {
			if strings.HasPrefix(member.BlobRef.Digest(), prefix) {
				return member.BlobRef, nil
			}
		}
	}
	return nil, fmt.Errorf("Member prefix %q not found in %q", prefix, parent)
}
Exemple #7
0
func (ds *DiskStorage) blobDirectory(partition string, b *blobref.BlobRef) string {
	d := b.Digest()
	if len(d) < 6 {
		d = d + "______"
	}
	return filepath.Join(ds.PartitionRoot(partition), b.HashName(), d[0:3], d[3:6])
}
Exemple #8
0
func NewClaim(permaNode *blobref.BlobRef, claimType string) map[string]interface{} {
	m := newCamliMap(1, "claim")
	m["permaNode"] = permaNode.String()
	m["claimType"] = claimType
	m["claimDate"] = RFC3339FromNanos(time.Nanoseconds())
	return m
}
Exemple #9
0
func (mi *Indexer) ExistingFileSchemas(bytesRef *blobref.BlobRef) (files []*blobref.BlobRef, err os.Error) {
	client, err := mi.getConnection()
	if err != nil {
		return
	}
	defer func() {
		if err == nil {
			mi.releaseConnection(client)
		} else {
			client.Close()
		}
	}()

	err = client.Query(fmt.Sprintf("SELECT fileschemaref FROM files WHERE bytesref=%q", bytesRef.String()))
	if err != nil {
		return
	}

	result, err := client.StoreResult()
	if err != nil {
		return
	}
	defer client.FreeResult()

	for {
		row := result.FetchRow()
		if row == nil {
			break
		}
		files = append(files, blobref.Parse(row[0].(string)))
	}
	return
}
Exemple #10
0
func (mi *Indexer) GetBlobMimeType(blob *blobref.BlobRef) (mime string, size int64, err os.Error) {
	client, err := mi.getConnection()
	if err != nil {
		return
	}
	defer func() {
		if err == nil || err == os.ENOENT {
			mi.releaseConnection(client)
		} else {
			client.Close()
		}
	}()

	err = client.Query(fmt.Sprintf("SELECT type, size FROM blobs WHERE blobref=%q", blob.String()))
	if err != nil {
		return
	}

	result, err := client.StoreResult()
	if err != nil {
		return
	}
	defer client.FreeResult()

	row := result.FetchRow()
	if row == nil {
		err = os.ENOENT
		return
	}

	//log.Printf("got row: %#v (2 is %T)", row, row[1])
	mime, _ = row[0].(string)
	size, _ = row[1].(int64)
	return
}
Exemple #11
0
func (mi *Indexer) populatePermanode(client *mysql.Client, blobRef *blobref.BlobRef, camli *schema.Superset) (err os.Error) {
	err = execSQL(client,
		"INSERT IGNORE INTO permanodes (blobref, unverified, signer, lastmod) "+
			"VALUES (?, 'Y', ?, '')",
		blobRef.String(), camli.Signer)
	return
}
Exemple #12
0
func (h *SimpleBlobHub) NotifyBlobReceived(blob *blobref.BlobRef) {
	h.l.Lock()
	defer h.l.Unlock()

	// Callback channels to notify, nil until non-empty
	var notify []chan *blobref.BlobRef

	// Append global listeners
	for ch, _ := range h.listeners {
		notify = append(notify, ch)
	}

	// Append blob-specific listeners
	if h.blobListeners != nil {
		blobstr := blob.String()
		if set, ok := h.blobListeners[blobstr]; ok {
			for ch, _ := range set {
				notify = append(notify, ch)
			}
		}
	}

	// Run in a separate Goroutine so NotifyBlobReceived doesn't block
	// callers if callbacks are slow.
	go func() {
		for _, ch := range notify {
			ch <- blob
		}
	}()
}
Exemple #13
0
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) map[string]interface{} {
	m := newCamliMap(1, "share")
	m["authType"] = authType
	m["target"] = target.String()
	m["transitive"] = transitive
	return m
}
Exemple #14
0
func (mi *Indexer) populatePermanode(blobRef *blobref.BlobRef, camli *schema.Superset) (err os.Error) {
	err = mi.db.Execute(
		"INSERT IGNORE INTO permanodes (blobref, unverified, signer, lastmod) "+
			"VALUES (?, 'Y', ?, '') "+
			"ON DUPLICATE KEY UPDATE unverified = 'Y', signer = ?",
		blobRef.String(), camli.Signer, camli.Signer)
	return
}
Exemple #15
0
func (dr *DescribeRequest) DescribeSync(br *blobref.BlobRef) (*DescribedBlob, os.Error) {
	dr.Describe(br, 1)
	res, err := dr.Result()
	if err != nil {
		return nil, err
	}
	return res[br.String()], nil
}
Exemple #16
0
func (mi *Indexer) populateClaim(client *mysql.Client, blobRef *blobref.BlobRef, camli *schema.Superset, sniffer *blobSniffer) (err os.Error) {
	pnBlobref := blobref.Parse(camli.Permanode)
	if pnBlobref == nil {
		// Skip bogus claim with malformed permanode.
		return
	}

	verifiedKeyId := ""
	if rawJson, err := sniffer.Body(); err == nil {
		vr := jsonsign.NewVerificationRequest(rawJson, mi.KeyFetcher)
		if vr.Verify() {
			verifiedKeyId = vr.SignerKeyId
			log.Printf("mysqlindex: verified claim %s from %s", blobRef, verifiedKeyId)

			if err = execSQL(client, "INSERT IGNORE INTO signerkeyid (blobref, keyid) "+
				"VALUES (?, ?)", vr.CamliSigner.String(), verifiedKeyId); err != nil {
				return
			}
		} else {
			log.Printf("mysqlindex: verification failure on claim %s: %v", blobRef, vr.Err)
		}
	}

	if err = execSQL(client,
		"INSERT IGNORE INTO claims (blobref, signer, verifiedkeyid, date, unverified, claim, permanode, attr, value) "+
			"VALUES (?, ?, ?, ?, 'Y', ?, ?, ?, ?)",
		blobRef.String(), camli.Signer, verifiedKeyId, camli.ClaimDate,
		camli.ClaimType, camli.Permanode,
		camli.Attribute, camli.Value); err != nil {
		return
	}

	if verifiedKeyId != "" {
		// TODO: limit this to only certain attributes (for now, just "camliRoot") once search handler
		// is working and the UI permits setting camliRoot.
		if err = execSQL(client, "INSERT IGNORE INTO signerattrvalue (keyid, attr, value, claimdate, blobref, permanode) "+
			"VALUES (?, ?, ?, ?, ?, ?)",
			verifiedKeyId, camli.Attribute, camli.Value,
			camli.ClaimDate, blobRef.String(), camli.Permanode); err != nil {
			return
		}
	}

	// And update the lastmod on the permanode row.
	if err = execSQL(client,
		"INSERT IGNORE INTO permanodes (blobref) VALUES (?)",
		pnBlobref.String()); err != nil {
		return
	}
	if err = execSQL(client,
		"UPDATE permanodes SET lastmod=? WHERE blobref=? AND ? > lastmod",
		camli.ClaimDate, pnBlobref.String(), camli.ClaimDate); err != nil {
		return
	}

	return nil
}
Exemple #17
0
func (dr *DescribeRequest) addError(br *blobref.BlobRef, err os.Error) {
	if err == nil {
		return
	}
	dr.lk.Lock()
	defer dr.lk.Unlock()
	// TODO: append? meh.
	dr.errs[br.String()] = err
}
Exemple #18
0
func (fi *FakeIndex) GetBlobMimeType(blob *blobref.BlobRef) (mime string, size int64, err os.Error) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	bs := blob.String()
	mime, ok := fi.mimeType[bs]
	if !ok {
		return "", 0, os.ENOENT
	}
	return mime, fi.size[bs], nil
}
Exemple #19
0
func (sr *statsStatReceiver) ReceiveBlob(blob *blobref.BlobRef, source io.Reader) (sb blobref.SizedBlobRef, err os.Error) {
	n, err := io.Copy(ioutil.Discard, source)
	if err != nil {
		return
	}
	sr.lock()
	defer sr.mu.Unlock()
	sr.have[blob.String()] = n
	return blobref.SizedBlobRef{blob, n}, nil
}
Exemple #20
0
func (mi *Indexer) GetOwnerClaims(permanode, owner *blobref.BlobRef) (claims search.ClaimList, reterr os.Error) {
	claims = make(search.ClaimList, 0)
	client, err := mi.getConnection()
	if err != nil {
		reterr = err
		return
	}
	defer mi.releaseConnection(client)

	// TODO: ignore rows where unverified = 'N'
	stmt, err := client.Prepare("SELECT blobref, date, claim, attr, value FROM claims WHERE permanode = ? AND signer = ?")
	if err != nil {
		reterr = err
		return
	}
	err = stmt.BindParams(permanode.String(), owner.String())
	if err != nil {
		reterr = err
		return
	}
	err = stmt.Execute()
	if err != nil {
		reterr = err
		return
	}

	var row claimsRow
	stmt.BindResult(&row.blobref, &row.date, &row.claim, &row.attr, &row.value)
	defer stmt.Close()
	for {
		done, err := stmt.Fetch()
		if err != nil {
			reterr = err
			return
		}
		if done {
			break
		}
		t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(row.date))
		if err != nil {
			log.Printf("Skipping; error parsing time %q: %v", row.date, err)
			continue
		}
		claims = append(claims, &search.Claim{
			BlobRef:   blobref.Parse(row.blobref),
			Signer:    owner,
			Permanode: permanode,
			Type:      row.claim,
			Date:      t,
			Attr:      row.attr,
			Value:     row.value,
		})
	}
	return
}
Exemple #21
0
func (dr *DescribeRequest) describedBlob(b *blobref.BlobRef) *DescribedBlob {
	dr.lk.Lock()
	defer dr.lk.Unlock()
	bs := b.String()
	if des, ok := dr.m[bs]; ok {
		return des
	}
	des := &DescribedBlob{Request: dr, BlobRef: b}
	dr.m[bs] = des
	return des
}
Exemple #22
0
func (b *DescribedBlob) PeerBlob(br *blobref.BlobRef) *DescribedBlob {
	if b.Request == nil {
		return &DescribedBlob{BlobRef: br, Stub: true}
	}
	b.Request.lk.Lock()
	defer b.Request.lk.Unlock()
	if peer, ok := b.Request.m[br.String()]; ok {
		return peer
	}
	return &DescribedBlob{Request: b.Request, BlobRef: br, Stub: true}
}
Exemple #23
0
func (h *SimpleBlobHub) RegisterBlobListener(blob *blobref.BlobRef, ch chan *blobref.BlobRef) {
	h.l.Lock()
	defer h.l.Unlock()
	if h.blobListeners == nil {
		h.blobListeners = make(map[string]map[chan *blobref.BlobRef]bool)
	}
	bstr := blob.String()
	_, ok := h.blobListeners[bstr]
	if !ok {
		h.blobListeners[bstr] = make(map[chan *blobref.BlobRef]bool)
	}
	h.blobListeners[bstr][ch] = true
}
Exemple #24
0
func (mi *Indexer) GetBlobMimeType(blob *blobref.BlobRef) (mime string, size int64, err os.Error) {
	rs, err := mi.db.Query("SELECT type, size FROM blobs WHERE blobref=?", blob.String())
	if err != nil {
		return
	}
	defer rs.Close()
	if !rs.Next() {
		err = os.ENOENT
		return
	}
	err = rs.Scan(&mime, &size)
	return
}
Exemple #25
0
func (mi *Indexer) keyIdOfSigner(signer *blobref.BlobRef) (keyid string, err os.Error) {
	rs, err := mi.db.Query("SELECT keyid FROM signerkeyid WHERE blobref=?", signer.String())
	if err != nil {
		return
	}
	defer rs.Close()

	if !rs.Next() {
		return "", fmt.Errorf("mysqlindexer: failed to find keyid of signer %q", signer.String())
	}
	err = rs.Scan(&keyid)
	return
}
Exemple #26
0
func (mi *Indexer) GetFileInfo(fileRef *blobref.BlobRef) (*search.FileInfo, os.Error) {
	rs, err := mi.db.Query("SELECT size, filename, mime FROM bytesfiles WHERE schemaref=?",
		fileRef.String())
	if err != nil {
		return nil, err
	}
	defer rs.Close()
	if !rs.Next() {
		return nil, os.ENOENT
	}
	var fi search.FileInfo
	err = rs.Scan(&fi.Size, &fi.FileName, &fi.MimeType)
	return &fi, err
}
Exemple #27
0
func (h *SimpleBlobHub) UnregisterBlobListener(blob *blobref.BlobRef, ch chan *blobref.BlobRef) {
	h.l.Lock()
	defer h.l.Unlock()
	if h.blobListeners == nil {
		panic("blobhub: UnregisterBlobListener called without RegisterBlobListener")
	}
	bstr := blob.String()
	set, ok := h.blobListeners[bstr]
	if !ok {
		panic("blobhub: UnregisterBlobListener called without RegisterBlobListener for " + bstr)
	}
	set[ch] = false, false
	if len(set) == 0 {
		h.blobListeners[bstr] = nil, false
	}
}
Exemple #28
0
func (tf *Fetcher) Fetch(ref *blobref.BlobRef) (file blobref.ReadSeekCloser, size int64, err os.Error) {
	tf.l.Lock()
	defer tf.l.Unlock()
	if tf.m == nil {
		err = os.ENOENT
		return
	}
	tb, ok := tf.m[ref.String()]
	if !ok {
		err = os.ENOENT
		return
	}
	file = &strReader{tb.Contents, 0}
	size = int64(len(tb.Contents))
	return
}
Exemple #29
0
func (mi *Indexer) ExistingFileSchemas(wholeDigest *blobref.BlobRef) (files []*blobref.BlobRef, err os.Error) {
	rs, err := mi.db.Query("SELECT schemaref FROM bytesfiles WHERE wholedigest=?", wholeDigest.String())
	if err != nil {
		return
	}
	defer rs.Close()

	ref := ""
	for rs.Next() {
		if err := rs.Scan(&ref); err != nil {
			return nil, err
		}
		files = append(files, blobref.Parse(ref))
	}
	return
}
Exemple #30
0
func (mi *Indexer) populateFile(client *mysql.Client, blobRef *blobref.BlobRef, ss *schema.Superset) (err os.Error) {
	if ss.Fragment {
		return nil
	}
	seekFetcher, err := blobref.SeekerFromStreamingFetcher(mi.BlobSource)
	if err != nil {
		return err
	}

	sha1 := sha1.New()
	fr := ss.NewFileReader(seekFetcher)
	mime, reader := magic.MimeTypeFromReader(fr)
	n, err := io.Copy(sha1, reader)
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("mysqlindex: error indexing file %s: %v", blobRef, err)
		return nil
	}

	attrs := []string{}
	if ss.UnixPermission != "" {
		attrs = append(attrs, "perm")
	}
	if ss.UnixOwnerId != 0 || ss.UnixOwner != "" || ss.UnixGroupId != 0 || ss.UnixGroup != "" {
		attrs = append(attrs, "owner")
	}
	if ss.UnixMtime != "" || ss.UnixCtime != "" || ss.UnixAtime != "" {
		attrs = append(attrs, "time")
	}

	log.Printf("file %s blobref is %s, size %d", blobRef, blobref.FromHash("sha1", sha1), n)
	err = execSQL(client,
		"INSERT IGNORE INTO files (fileschemaref, bytesref, size, filename, mime, setattrs) VALUES (?, ?, ?, ?, ?, ?)",
		blobRef.String(),
		blobref.FromHash("sha1", sha1).String(),
		n,
		ss.FileNameString(),
		mime,
		strings.Join(attrs, ","))
	return
}