Exemple #1
0
func (sh *Handler) serveSignerPaths(rw http.ResponseWriter, req *http.Request) {
	ret := jsonMap()
	defer httputil.ReturnJSON(rw, ret)
	defer setPanicError(ret)

	signer := blobref.MustParse(mustGet(req, "signer"))
	target := blobref.MustParse(mustGet(req, "target"))
	paths, err := sh.index.PathsOfSignerTarget(signer, target)
	if err != nil {
		ret["error"] = err.Error()
	} else {
		jpaths := []map[string]interface{}{}
		for _, path := range paths {
			jpaths = append(jpaths, map[string]interface{}{
				"claimRef": path.Claim.String(),
				"baseRef":  path.Base.String(),
				"suffix":   path.Suffix,
			})
		}
		ret["paths"] = jpaths
		dr := sh.NewDescribeRequest()
		for _, path := range paths {
			dr.Describe(path.Base, 2)
		}
		dr.PopulateJSON(ret)
	}
}
Exemple #2
0
func (fi *FakeIndex) AddClaim(owner, permanode *blobref.BlobRef, claimType, attr, value string) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	date := fi.nextDate()

	claim := &search.Claim{
		Permanode: permanode,
		Signer:    nil,
		BlobRef:   nil,
		Date:      date,
		Type:      claimType,
		Attr:      attr,
		Value:     value,
	}
	key := permanode.String() + "/" + owner.String()
	fi.ownerClaims[key] = append(fi.ownerClaims[key], claim)

	if claimType == "set-attribute" && strings.HasPrefix(attr, "camliPath:") {
		suffix := attr[len("camliPath:"):]
		path := &search.Path{
			Target: blobref.MustParse(value),
			Suffix: suffix,
		}
		fi.path[fmt.Sprintf("%s\x00%s\x00%s", owner, permanode, suffix)] = path
	}
}
Exemple #3
0
// fileMapFromDuplicate queries the server's search interface for an
// existing file with an entire contents of sum (a blobref string).
// If the server has it, it's validated, and then fileMap (which must
// already be partially populated) has its "parts" field populated,
// and then fileMap is uploaded (if necessary) and its blobref is
// returned.  If there's any problem, or a dup doesn't exist, ok is
// false.
func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap schema.Map, sum string) (fileSchema *blobref.BlobRef, ok bool) {
	_, err := up.Client.SearchRoot()
	if err != nil {
		return
	}
	dupFileRef, err := up.Client.SearchExistingFileSchema(blobref.MustParse(sum))
	if err != nil {
		log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err)
		return nil, false
	}
	if dupFileRef == nil {
		return nil, false
	}
	if *flagVerbose {
		log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef)
	}
	dupMap, err := up.Client.FetchMap(dupFileRef)
	if err != nil {
		log.Printf("Warning: error fetching %v: %v", dupFileRef, err)
		return nil, false
	}
	parts, ok := dupMap["parts"].([]interface{})
	if !ok {
		return nil, false
	}

	fileMap["parts"] = parts // safe, since dupMap never escapes, so sharing parts is okay

	// Hack: convert all the parts' float64 to int64, so they encode as e.g. "1000035"
	// and not "1.000035e+06".  Perhaps we should work in *schema.SuperSets here, and not
	// JSON maps.
	// TODO(bradfitz): clean up?
	for _, p := range parts {
		pm := p.(map[string]interface{})
		pm["size"] = int64(pm["size"].(float64))
	}

	json, err := fileMap.JSON()
	if err != nil {
		return nil, false
	}
	uh := client.NewUploadHandleFromString(json)
	if uh.BlobRef.Equal(dupFileRef) {
		// Unchanged (same filename, modtime, JSON serialization, etc)
		return dupFileRef, true
	}
	pr, err := up.uploadHandle(uh)
	if err != nil {
		log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err)
		return nil, false
	}
	return pr.BlobRef, true
}
Exemple #4
0
// wholeFileDigest returns the sha1 digest of the regular file's absolute
// path given in fullPath.
func (up *Uploader) wholeFileDigest(fullPath string) (*blobref.BlobRef, error) {
	// TODO(bradfitz): cache this.
	file, err := up.open(fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	td := &trackDigestReader{r: file}
	_, err = io.Copy(ioutil.Discard, td)
	atomic.AddInt64(&atomicDigestOps, 1)
	if err != nil {
		return nil, err
	}
	return blobref.MustParse(td.Sum()), nil
}
Exemple #5
0
func TestDescribe(t *testing.T) {
	for testn, tt := range describeTests {
		idx := test.NewFakeIndex()
		tt.setup(idx)

		h := NewHandler(idx, owner)
		js := make(map[string]interface{})
		dr := h.NewDescribeRequest()
		dr.Describe(blobref.MustParse(tt.blob), tt.depth)
		dr.PopulateJSON(js)
		got, _ := json.MarshalIndent(js, "", "  ")
		want, _ := json.MarshalIndent(tt.expect, "", "  ")
		if !bytes.Equal(got, want) {
			t.Errorf("test %d:\nwant: %s\n got: %s", testn, string(want), string(got))
		}
	}
}
Exemple #6
0
// fileMapFromDuplicate queries the server's search interface for an
// existing file with an entire contents of sum (a blobref string).
// If the server has it, it's validated, and then fileMap (which must
// already be partially populated) has its "parts" field populated,
// and then fileMap is uploaded (if necessary) and a PutResult with
// its blobref is returned. If there's any problem, or a dup doesn't
// exist, ok is false.
// If required, Vivify is also done here.
func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap *schema.Builder, sum string) (pr *client.PutResult, ok bool) {
	if noDupSearch {
		return
	}
	_, err := up.Client.SearchRoot()
	if err != nil {
		return
	}
	dupFileRef, err := up.Client.SearchExistingFileSchema(blobref.MustParse(sum))
	if err != nil {
		log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err)
		return nil, false
	}
	if dupFileRef == nil {
		return nil, false
	}
	if *cmdmain.FlagVerbose {
		log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef)
	}
	dupMap, err := up.Client.FetchSchemaBlob(dupFileRef)
	if err != nil {
		log.Printf("Warning: error fetching %v: %v", dupFileRef, err)
		return nil, false
	}

	fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts())

	json, err := fileMap.JSON()
	if err != nil {
		return nil, false
	}
	uh := client.NewUploadHandleFromString(json)
	if up.fileOpts.wantVivify() {
		uh.Vivify = true
	}
	if !uh.Vivify && uh.BlobRef.Equal(dupFileRef) {
		// Unchanged (same filename, modtime, JSON serialization, etc)
		return &client.PutResult{BlobRef: dupFileRef, Size: int64(len(json)), Skipped: true}, true
	}
	pr, err = up.Upload(uh)
	if err != nil {
		log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err)
		return nil, false
	}
	return pr, true
}
Exemple #7
0
func TestBlobFromReader(t *testing.T) {
	br := blobref.MustParse("sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
	blob, err := BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"}  `))
	if err != nil {
		t.Error(err)
	} else if blob.Type() != "foo" {
		t.Errorf("got type %q; want foo", blob.Type())
	}

	blob, err = BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"}  X  `))
	if err == nil {
		// TODO(bradfitz): fix this somehow. Currently encoding/json's
		// decoder over-reads.
		// See: https://code.google.com/p/go/issues/detail?id=1955 ,
		// which was "fixed", but not really.
		t.Logf("TODO(bradfitz): make sure bogus non-whitespace after the JSON object causes an error.")
	}
}
Exemple #8
0
// wholeFileDigest returns the sha1 digest of the regular file's absolute
// path given in fullPath.
func (up *Uploader) wholeFileDigest(fullPath string) (*blobref.BlobRef, error) {
	// TODO(bradfitz): cache this.
	file, err := up.open(fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	td := &trackDigestReader{r: file}
	// Warning: the struct in the following line exists to hide
	// ioutil.Discard's ReadFrom from io.Copy, since ReadFrom uses
	// an intentionally-racy buffer that's passed to the reader,
	// which was causing SHA-1 calculation corruption.
	_, err = io.Copy(struct{ io.Writer }{ioutil.Discard}, td)
	atomic.AddInt64(&atomicDigestOps, 1)
	if err != nil {
		return nil, err
	}
	return blobref.MustParse(td.Sum()), nil
}
Exemple #9
0
func (sh *Handler) serveSignerAttrValue(rw http.ResponseWriter, req *http.Request) {
	ret := jsonMap()
	defer httputil.ReturnJSON(rw, ret)
	defer setPanicError(ret)

	signer := blobref.MustParse(mustGet(req, "signer"))
	attr := mustGet(req, "attr")
	value := mustGet(req, "value")
	pn, err := sh.index.PermanodeOfSignerAttrValue(signer, attr, value)
	if err != nil {
		ret["error"] = err.Error()
	} else {
		ret["permanode"] = pn.String()

		dr := sh.NewDescribeRequest()
		dr.Describe(pn, 2)
		dr.PopulateJSON(ret)
	}
}
Exemple #10
0
func (c *Client) viaPathTo(b *blobref.BlobRef) (path []*blobref.BlobRef) {
	if c.via == nil {
		return nil
	}
	it := b.String()
	// Append path backwards first,
	for {
		v := c.via[it]
		if v == "" {
			break
		}
		path = append(path, blobref.MustParse(v))
		it = v
	}
	// Then reverse it
	for i := 0; i < len(path)/2; i++ {
		path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
	}
	return
}
Exemple #11
0
	"testing"

	"camlistore.org/pkg/blobref"
	"camlistore.org/pkg/test"
)

type describeTest struct {
	setup func(fi *test.FakeIndex)

	blob  string // blobref to describe
	depth int

	expect map[string]interface{}
}

var owner = blobref.MustParse("abcown-123")

var describeTests = []describeTest{
	{
		func(fi *test.FakeIndex) {},
		"abc-555",
		1,
		map[string]interface{}{},
	},

	{
		func(fi *test.FakeIndex) {
			fi.AddMeta(blobref.MustParse("abc-555"), "image/jpeg", 999)
		},
		"abc-555",
		1,
Exemple #12
0
// servePermanodesWithAttr uses the indexer to search for the permanodes matching
// the request.
// The valid values for the "attr" key in the request (i.e the only attributes
// for a permanode which are actually indexed as such) are "tag" and "title".
func (sh *Handler) servePermanodesWithAttr(rw http.ResponseWriter, req *http.Request) {
	ret := jsonMap()
	defer httputil.ReturnJSON(rw, ret)
	defer setPanicError(ret)

	signer := blobref.MustParse(mustGet(req, "signer"))
	value := req.FormValue("value")
	fuzzy := req.FormValue("fuzzy") // exact match if empty
	fuzzyMatch := false
	if fuzzy != "" {
		lowered := strings.ToLower(fuzzy)
		if lowered == "true" || lowered == "t" {
			fuzzyMatch = true
		}
	}
	attr := req.FormValue("attr") // all attributes if empty
	if attr == "" {               // and force fuzzy in that case.
		fuzzyMatch = true
	}
	maxResults := maxPermanodes
	max := req.FormValue("max")
	if max != "" {
		maxR, err := strconv.Atoi(max)
		if err != nil {
			log.Printf("Invalid specified max results 'max': " + err.Error())
			return
		}
		if maxR < maxResults {
			maxResults = maxR
		}
	}

	ch := make(chan *blobref.BlobRef, buffered)
	errch := make(chan error)
	go func() {
		errch <- sh.index.SearchPermanodesWithAttr(ch,
			&PermanodeByAttrRequest{Attribute: attr,
				Query:      value,
				Signer:     signer,
				FuzzyMatch: fuzzyMatch,
				MaxResults: maxResults})
	}()

	dr := sh.NewDescribeRequest()

	withAttr := jsonMapList()
	for res := range ch {
		dr.Describe(res, 2)
		jm := jsonMap()
		jm["permanode"] = res.String()
		withAttr = append(withAttr, jm)
	}

	err := <-errch
	if err != nil {
		ret["error"] = err.Error()
		ret["errorType"] = "server"
		return
	}

	ret["withAttr"] = withAttr
	dr.PopulateJSON(ret)
}
Exemple #13
0
// Unlike the index interface's EdgesTo method, the "edgesto" Handler
// here additionally filters out since-deleted permanode edges.
func (sh *Handler) serveEdgesTo(rw http.ResponseWriter, req *http.Request) {
	ret := jsonMap()
	defer httputil.ReturnJSON(rw, ret)
	defer setPanicError(ret)

	toRef := blobref.MustParse(mustGet(req, "blobref"))
	toRefStr := toRef.String()
	blobInfo := jsonMap()
	ret[toRefStr] = blobInfo

	jsonEdges := jsonMapList()

	edges, err := sh.index.EdgesTo(toRef, nil)
	if err != nil {
		panic(err)
	}

	type mapOrError struct {
		m   map[string]interface{} // nil if no map
		err error
	}
	resc := make(chan mapOrError)
	verify := func(edge *Edge) {
		fromStr := edge.From.String()
		db, err := sh.NewDescribeRequest().DescribeSync(edge.From)
		if err != nil {
			resc <- mapOrError{err: err}
			return
		}
		found := false
		if db.Permanode != nil {
			for attr, vv := range db.Permanode.Attr {
				if IsBlobReferenceAttribute(attr) {
					for _, v := range vv {
						if v == toRefStr {
							found = true
						}
					}
				}
			}
		}
		var em map[string]interface{}
		if found {
			em = jsonMap()
			em["from"] = fromStr
			em["fromType"] = "permanode"
		}
		resc <- mapOrError{m: em}
	}
	verifying := 0
	for _, edge := range edges {
		if edge.FromType == "permanode" {
			verifying++
			go verify(edge)
			continue
		}
		em := jsonMap()
		em["from"] = edge.From.String()
		em["fromType"] = edge.FromType
		jsonEdges = append(jsonEdges, em)
	}
	for i := 0; i < verifying; i++ {
		res := <-resc
		if res.err != nil {
			panic(res.err) // caught and put in JSON response
		}
		if res.m != nil {
			jsonEdges = append(jsonEdges, res.m)
		}
	}
	blobInfo["edgesTo"] = jsonEdges
}
Exemple #14
0
func TestAttribute(t *testing.T) {
	tm := time.Unix(123, 456)
	br := blobref.MustParse("xxx-123")
	tests := []struct {
		bb   *Builder
		want string
	}{
		{
			bb: NewSetAttributeClaim(br, "attr1", "val1"),
			want: `{"camliVersion": 1,
  "attribute": "attr1",
  "camliType": "claim",
  "claimDate": "1970-01-01T00:02:03.000000456Z",
  "claimType": "set-attribute",
  "value": "val1"
}`,
		},
		{
			bb: NewAddAttributeClaim(br, "tag", "funny"),
			want: `{"camliVersion": 1,
  "attribute": "tag",
  "camliType": "claim",
  "claimDate": "1970-01-01T00:02:03.000000456Z",
  "claimType": "add-attribute",
  "value": "funny"
}`,
		},
		{
			bb: NewDelAttributeClaim(br, "attr1"),
			want: `{"camliVersion": 1,
  "attribute": "attr1",
  "camliType": "claim",
  "claimDate": "1970-01-01T00:02:03.000000456Z",
  "claimType": "del-attribute"
}`,
		},
		{
			bb: NewClaim(&ClaimParam{
				Permanode: br,
				Type:      SetAttribute,
				Attribute: "foo",
				Value:     "bar",
			}, &ClaimParam{
				Permanode: br,
				Type:      DelAttribute,
				Attribute: "foo",
				Value:     "specific-del",
			}, &ClaimParam{
				Permanode: br,
				Type:      DelAttribute,
				Attribute: "foo",
			}),
			want: `{"camliVersion": 1,
  "camliType": "claim",
  "claimDate": "1970-01-01T00:02:03.000000456Z",
  "claimType": "multi",
  "claims": [
    {
      "attribute": "foo",
      "claimType": "set-attribute",
      "value": "bar"
    },
    {
      "attribute": "foo",
      "claimType": "del-attribute",
      "value": "specific-del"
    },
    {
      "attribute": "foo",
      "claimType": "del-attribute"
    }
  ]
}`,
		},
	}
	for i, tt := range tests {
		tt.bb.SetClaimDate(tm)
		got, err := tt.bb.JSON()
		if err != nil {
			t.Errorf("%d. JSON error = %v", i, err)
			continue
		}
		if got != tt.want {
			t.Errorf("%d.\t got:\n%s\n\twant:q\n%s", i, got, tt.want)
		}
	}
}
Exemple #15
0
		return nil, err
	}
	elseSto, err := ld.GetStorage(elseTarget)
	if err != nil {
		return nil, err
	}

	switch ifStr {
	case "isSchema":
		return isSchemaPicker(thenSto, elseSto), nil
	}
	return nil, fmt.Errorf("cond: unsupported 'if' type of %q", ifStr)
}

// dummyRef is just a dummy reference to give to BlobFromReader.
var dummyRef = blobref.MustParse("sha1-829c3804401b0727f70f73d4415e162400cbe57b")

func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc {
	return func(src io.Reader) (dest blobserver.Storage, overRead []byte, err error) {
		var buf bytes.Buffer
		tee := io.TeeReader(src, &buf)
		blob, err := schema.BlobFromReader(dummyRef, tee)
		if err != nil || blob.Type() == "" {
			return elseSto, buf.Bytes(), nil
		}
		return thenSto, buf.Bytes(), nil
	}
}

func (sto *condStorage) ReceiveBlob(b *blobref.BlobRef, source io.Reader) (sb blobref.SizedBlobRef, err error) {
	destSto, overRead, err := sto.storageForReceive(source)
Exemple #16
0
func TestPublishURLs(t *testing.T) {
	owner := blobref.MustParse("owner-123")
	picNode := blobref.MustParse("picpn-123")
	galRef := blobref.MustParse("gal-123")
	rootRef := blobref.MustParse("root-abc")
	camp0 := blobref.MustParse("picpn-98765432100")
	camp1 := blobref.MustParse("picpn-98765432111")
	camp0f := blobref.MustParse("picfile-f00f00f00a5")
	camp1f := blobref.MustParse("picfile-f00f00f00b6")

	rootName := "foo"

	for ti, tt := range publishURLTests {
		idx := test.NewFakeIndex()
		idx.AddSignerAttrValue(owner, "camliRoot", rootName, rootRef)
		sh := search.NewHandler(idx, owner)
		ph := &PublishHandler{
			RootName: rootName,
			Search:   sh,
		}

		idx.AddMeta(owner, "text/x-openpgp-public-key", 100)
		for _, br := range []*blobref.BlobRef{picNode, galRef, rootRef, camp0, camp1} {
			idx.AddMeta(br, "application/json; camliType=permanode", 100)
		}
		for _, br := range []*blobref.BlobRef{camp0f, camp1f} {
			idx.AddMeta(br, "application/json; camliType=file", 100)
		}

		idx.AddClaim(owner, rootRef, "set-attribute", "camliPath:singlepic", picNode.String())
		idx.AddClaim(owner, rootRef, "set-attribute", "camliPath:camping", galRef.String())
		idx.AddClaim(owner, galRef, "add-attribute", "camliMember", camp0.String())
		idx.AddClaim(owner, galRef, "add-attribute", "camliMember", camp1.String())
		idx.AddClaim(owner, camp0, "set-attribute", "camliContent", camp0f.String())
		idx.AddClaim(owner, camp1, "set-attribute", "camliContent", camp1f.String())

		rw := httptest.NewRecorder()
		if !strings.HasPrefix(tt.path, "/pics/") {
			panic("expected /pics/ prefix on " + tt.path)
		}
		req, _ := http.NewRequest("GET", "http://foo.com"+tt.path, nil)

		pfxh := &httputil.PrefixHandler{
			Prefix: "/pics/",
			Handler: http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) {
				pr := ph.NewRequest(rw, req)

				err := pr.findSubject()
				if tt.subject != "" {
					if err != nil {
						t.Errorf("test #%d, findSubject: %v", ti, err)
						return
					}
					if pr.subject.String() != tt.subject {
						t.Errorf("test #%d, got subject %q, want %q", ti, pr.subject, tt.subject)
					}
				}
				if pr.subres != tt.subres {
					t.Errorf("test #%d, got subres %q, want %q", ti, pr.subres, tt.subres)
				}
			}),
		}
		pfxh.ServeHTTP(rw, req)
	}
}
Exemple #17
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
	sf, ok := blobReceiver.(blobref.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blobref.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	// The file schema must have a modtime to vivify, as the modtime is used for all three of:
	// 1) the permanode's signature
	// 2) the camliContent attribute claim's "claimDate"
	// 3) the signature time of 2)
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	if err != nil {
		return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
	}

	permanodeBB := schema.NewHashPlannedPermanode(h)
	permanodeBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	permanodeBB.SetClaimDate(claimDate)
	permanodeSigned, err := sigHelper.Sign(permanodeBB)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
	}
	permanodeRef := blobref.SHA1FromString(permanodeSigned)
	_, err = blobReceiver.ReceiveBlob(permanodeRef, strings.NewReader(permanodeSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
	}

	contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.BlobRef.String())
	contentClaimBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	contentClaimBB.SetClaimDate(claimDate)
	contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	contentClaimRef := blobref.SHA1FromString(contentClaimSigned)
	_, err = blobReceiver.ReceiveBlob(contentClaimRef, strings.NewReader(contentClaimSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
	}
	return nil
}