示例#1
0
// Issue 228: only follow transitive blobref links in known trusted schema fields.
func TestSharingTransitiveSafety(t *testing.T) {
	st := newShareTester(t)
	defer st.done()

	content := "the secret"
	contentRef := blob.SHA1FromString(content)

	// User-injected blob, somehow.
	evilClaim := fmt.Sprintf("Some payload containing the ref: %v", contentRef)
	evilClaimRef := blob.SHA1FromString(evilClaim)

	share := schema.NewShareRef(schema.ShareHaveRef, false).
		SetShareTarget(evilClaimRef).
		SetShareIsTransitive(true).
		SetSigner(blob.SHA1FromString("irrelevant")).
		SetRawStringField("camliSig", "alsounused")
	shareRef := func() blob.Ref { return share.Blob().BlobRef() }

	st.put(share.Blob())
	st.putRaw(contentRef, content)
	st.putRaw(evilClaimRef, evilClaim)

	st.testGet(shareRef().String(), noError)
	st.testGet(fmt.Sprintf("%s?via=%s", evilClaimRef, shareRef()), noError)

	st.testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, shareRef(), evilClaimRef), viaChainInvalidLink)
	if !st.slept() {
		t.Error("expected sleep after miss")
	}
}
示例#2
0
func TestHandleGetViaSharing(t *testing.T) {
	st := newShareTester(t)
	defer st.done()

	content := "monkey" // the secret
	contentRef := blob.SHA1FromString(content)

	link := fmt.Sprintf(`{"camliVersion": 1,
"camliType": "file",
"parts": [
   {"blobRef": "%v", "size": %d}
]}`, contentRef, len(content))
	linkRef := blob.SHA1FromString(link)

	share := schema.NewShareRef(schema.ShareHaveRef, false).
		SetShareTarget(linkRef).
		SetSigner(blob.SHA1FromString("irrelevant")).
		SetRawStringField("camliSig", "alsounused")
	shareRef := func() blob.Ref { return share.Blob().BlobRef() }

	t.Logf("Checking share blob doesn't yet exist...")
	st.testGet(shareRef().String(), shareFetchFailed)
	if !st.slept() {
		t.Error("expected sleep after miss")
	}
	st.put(share.Blob())
	t.Logf("Checking share blob now exists...")
	st.testGet(shareRef().String(), noError)

	t.Logf("Checking we can't get the content directly via the share...")
	st.testGet(fmt.Sprintf("%s?via=%s", contentRef, shareRef()), shareTargetInvalid)

	t.Logf("Checking we can't get the link (file) blob directly...")
	st.putRaw(linkRef, link)
	st.testGet(linkRef.String(), shareBlobInvalid)

	t.Logf("Checking we can get the link (file) blob via the share...")
	st.testGet(fmt.Sprintf("%s?via=%s", linkRef, shareRef()), noError)

	t.Logf("Checking we can't get the content via the non-transitive share...")
	st.testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, shareRef(), linkRef), shareNotTransitive)

	// TODO: new test?
	share.SetShareIsTransitive(true)
	st.put(share.Blob())
	st.testGet(fmt.Sprintf("%s?via=%s,%s", linkRef, shareRef(), linkRef), viaChainInvalidLink)

	st.putRaw(contentRef, content)
	st.testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, shareRef(), linkRef), noError)

	// new test?
	share.SetShareExpiration(time.Now().Add(-time.Duration(10) * time.Minute))
	st.put(share.Blob())
	st.testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, shareRef(), linkRef), shareExpired)

	share.SetShareExpiration(time.Now().Add(time.Duration(10) * time.Minute))
	st.put(share.Blob())
	st.testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, shareRef(), linkRef), noError)
}
示例#3
0
func TestQueryFileConstraint_WholeRef(t *testing.T) {
	testQueryTypes(t, memIndexTypes, func(qt *queryTest) {
		id := qt.id
		fileRef, _ := id.UploadFile("some-stuff.txt", "hello", time.Unix(123, 0))
		qt.t.Logf("fileRef = %q", fileRef)
		p1 := id.NewPlannedPermanode("1")
		id.SetAttribute(p1, "camliContent", fileRef.String())

		fileRef2, _ := id.UploadFile("other-file", "hellooooo", time.Unix(456, 0))
		qt.t.Logf("fileRef2 = %q", fileRef2)
		p2 := id.NewPlannedPermanode("2")
		id.SetAttribute(p2, "camliContent", fileRef2.String())

		sq := &SearchQuery{
			Constraint: &Constraint{
				Permanode: &PermanodeConstraint{
					Attr: "camliContent",
					ValueInSet: &Constraint{
						File: &FileConstraint{
							WholeRef: blob.SHA1FromString("hello"),
						},
					},
				},
			},
		}
		qt.wantRes(sq, p1)
	})
}
示例#4
0
// fileMapFromDuplicate queries the server's search interface for an
// existing file blob for the file contents of wholeRef.
// If the server has it, it's validated, and then fileMap (which must
// already be partially populated) has its "parts" field populated,
// and then fileMap is uploaded (if necessary).
// If no file blob is found, a zero blob.Ref (and no error) is returned.
func (cl *Client) fileMapFromDuplicate(fileMap *schema.Builder, wholeRef blob.Ref) (blob.Ref, error) {
	dupFileRef, err := cl.SearchExistingFileSchema(wholeRef)
	if err != nil {
		return blob.Ref{}, err
	}
	if !dupFileRef.Valid() {
		// because SearchExistingFileSchema returns blob.Ref{}, nil when file is not found.
		return blob.Ref{}, nil
	}
	dupMap, err := cl.FetchSchemaBlob(dupFileRef)
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not find existing file blob for wholeRef %q: %v", wholeRef, err)
	}
	fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts())
	json, err := fileMap.JSON()
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not write file map for wholeRef %q: %v", wholeRef, err)
	}
	bref := blob.SHA1FromString(json)
	if bref == dupFileRef {
		// Unchanged (same filename, modtime, JSON serialization, etc)
		return dupFileRef, nil
	}
	sbr, err := cl.ReceiveBlob(bref, strings.NewReader(json))
	if err != nil {
		return blob.Ref{}, err
	}
	return sbr.Ref, nil
}
示例#5
0
func (c *Client) initSignerPublicKeyBlobref() {
	if c.paramsOnly {
		log.Print("client: paramsOnly set; cannot get public key from config or env vars.")
		return
	}
	keyId := os.Getenv("CAMLI_KEYID")
	if keyId == "" {
		configOnce.Do(parseConfig)
		keyId = config.Identity
		if keyId == "" {
			log.Fatalf("No 'identity' key in JSON configuration file %q; have you run \"camput init\"?", osutil.UserClientConfigPath())
		}
	}
	keyRing := c.SecretRingFile()
	if !fileExists(keyRing) {
		log.Fatalf("Could not find keyId %q, because secret ring file %q does not exist.", keyId, keyRing)
	}
	entity, err := jsonsign.EntityFromSecring(keyId, keyRing)
	if err != nil {
		log.Fatalf("Couldn't find keyId %q in secret ring %v: %v", keyId, keyRing, err)
	}
	armored, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		log.Fatalf("Error serializing public key: %v", err)
	}

	c.signerPublicKeyRef = blob.SHA1FromString(armored)
	c.publicKeyArmored = armored
}
示例#6
0
func TestSigner(t *testing.T) {
	if testing.Short() {
		t.Skip("skipping in short mode")
	}
	ent, err := jsonsign.NewEntity()
	if err != nil {
		t.Fatal(err)
	}
	armorPub, err := jsonsign.ArmoredPublicKey(ent)
	if err != nil {
		t.Fatal(err)
	}
	pubRef := blob.SHA1FromString(armorPub)
	sig, err := NewSigner(pubRef, strings.NewReader(armorPub), ent)
	if err != nil {
		t.Fatalf("NewSigner: %v", err)
	}
	pn, err := NewUnsignedPermanode().Sign(sig)
	if err != nil {
		t.Fatalf("NewPermanode: %v", err)
	}
	if !strings.Contains(pn, `,"camliSig":"`) {
		t.Errorf("Permanode doesn't look signed: %v", pn)
	}
}
示例#7
0
// uploadBytes populates bb (a builder of either type "bytes" or
// "file", which is a superset of "bytes"), sets it to the provided
// size, and populates with provided spans.  The bytes or file schema
// blob is uploaded and its blobref is returned.
func uploadBytes(bs blobserver.StatReceiver, bb *Builder, size int64, s []span) *uploadBytesFuture {
	future := newUploadBytesFuture()
	parts := []BytesPart{}
	addBytesParts(bs, &parts, s, future)

	if err := bb.PopulateParts(size, parts); err != nil {
		future.errc <- err
		return future
	}

	// Hack until camlistore.org/issue/102 is fixed. If we happen to upload
	// the "file" schema before any of its parts arrive, then the indexer
	// can get confused.  So wait on the parts before, and then upload
	// the "file" blob afterwards.
	if bb.Type() == "file" {
		future.errc <- nil
		_, err := future.Get() // may not be nil, if children parts failed
		future = newUploadBytesFuture()
		if err != nil {
			future.errc <- err
			return future
		}
	}

	json := bb.Blob().JSON()
	br := blob.SHA1FromString(json)
	future.br = br
	go func() {
		_, err := uploadString(bs, br, json)
		future.errc <- err
	}()
	return future
}
示例#8
0
文件: env.go 项目: rn2dy/camlistore
func (e *Env) SetCamdevVars(altkey bool) {
	e.Set("CAMLI_CONFIG_DIR", filepath.Join("config", "dev-client-dir"))
	e.Set("CAMLI_AUTH", "userpass:camlistore:pass3179")
	e.Set("CAMLI_DEV_KEYBLOBS", filepath.FromSlash("config/dev-client-dir/keyblobs"))

	secring := defaultSecring
	identity := defaultIdentity

	if altkey {
		secring = filepath.FromSlash("pkg/jsonsign/testdata/password-foo-secring.gpg")
		identity = "C7C3E176"
		println("**\n** Note: password is \"foo\"\n**\n")
	} else {
		if *flagSecretRing != "" {
			secring = *flagSecretRing
		}
		if *flagIdentity != "" {
			identity = *flagIdentity
		}
	}

	entity, err := jsonsign.EntityFromSecring(identity, secring)
	if err != nil {
		panic(err)
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		panic(err)
	}
	pubKeyRef := blob.SHA1FromString(armoredPublicKey)

	e.Set("CAMLI_SECRET_RING", secring)
	e.Set("CAMLI_KEYID", identity)
	e.Set("CAMLI_PUBKEY_BLOBREF", pubKeyRef.String())
}
示例#9
0
func doKeyStuff(b *testing.B) keyStuff {
	camliRootPath, err := osutil.GoPackagePath("camlistore.org")
	if err != nil {
		b.Fatal("Package camlistore.org no found in $GOPATH or $GOPATH not defined")
	}
	secretRingFile := filepath.Join(camliRootPath, "pkg", "jsonsign", "testdata", "test-secring.gpg")
	pubKey := `-----BEGIN PGP PUBLIC KEY BLOCK-----

xsBNBEzgoVsBCAC/56aEJ9BNIGV9FVP+WzenTAkg12k86YqlwJVAB/VwdMlyXxvi
bCT1RVRfnYxscs14LLfcMWF3zMucw16mLlJCBSLvbZ0jn4h+/8vK5WuAdjw2YzLs
WtBcjWn3lV6tb4RJz5gtD/o1w8VWxwAnAVIWZntKAWmkcChCRgdUeWso76+plxE5
aRYBJqdT1mctGqNEISd/WYPMgwnWXQsVi3x4z1dYu2tD9uO1dkAff12z1kyZQIBQ
rexKYRRRh9IKAayD4kgS0wdlULjBU98aeEaMz1ckuB46DX3lAYqmmTEL/Rl9cOI0
Enpn/oOOfYFa5h0AFndZd1blMvruXfdAobjVABEBAAE=
=28/7
-----END PGP PUBLIC KEY BLOCK-----`
	return keyStuff{
		secretRingFile: secretRingFile,
		pubKey:         pubKey,
		pubKeyRef:      blob.SHA1FromString(pubKey),
		entityFetcher: &jsonsign.CachingEntityFetcher{
			Fetcher: &jsonsign.FileEntityFetcher{File: secretRingFile},
		},
	}
}
示例#10
0
// This is the simple 1MB chunk version. The rolling checksum version is below.
func writeFileMapOld(bs blobserver.StatReceiver, file *Builder, r io.Reader) (blob.Ref, error) {
	parts, size := []BytesPart{}, int64(0)

	var buf bytes.Buffer
	for {
		buf.Reset()
		n, err := io.Copy(&buf, io.LimitReader(r, maxBlobSize))
		if err != nil {
			return blob.Ref{}, err
		}
		if n == 0 {
			break
		}

		hash := blob.NewHash()
		io.Copy(hash, bytes.NewReader(buf.Bytes()))
		br := blob.RefFromHash(hash)
		hasBlob, err := serverHasBlob(bs, br)
		if err != nil {
			return blob.Ref{}, err
		}
		if !hasBlob {
			sb, err := bs.ReceiveBlob(br, &buf)
			if err != nil {
				return blob.Ref{}, err
			}
			if want := (blob.SizedRef{br, uint32(n)}); sb != want {
				return blob.Ref{}, fmt.Errorf("schema/filewriter: wrote %s, expect", sb, want)
			}
		}

		size += n
		parts = append(parts, BytesPart{
			BlobRef: br,
			Size:    uint64(n),
			Offset:  0, // into BlobRef to read from (not of dest)
		})
	}

	err := file.PopulateParts(size, parts)
	if err != nil {
		return blob.Ref{}, err
	}

	json := file.Blob().JSON()
	if err != nil {
		return blob.Ref{}, err
	}
	br := blob.SHA1FromString(json)
	sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
	if err != nil {
		return blob.Ref{}, err
	}
	if expect := (blob.SizedRef{br, uint32(len(json))}); expect != sb {
		return blob.Ref{}, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
	}

	return br, nil
}
示例#11
0
func (c *initCmd) RunCommand(args []string) error {
	if len(args) > 0 {
		return cmdmain.ErrUsage
	}

	if c.newKey && c.keyId != "" {
		log.Fatal("--newkey and --gpgkey are mutually exclusive")
	}

	if c.userPass != "" {
		cc, err := c.clientConfigFromServer()
		if err != nil {
			return err
		}
		return c.writeConfig(cc)
	}

	var err error
	if c.newKey {
		c.secretRing = osutil.DefaultSecretRingFile()
		c.keyId, err = jsonsign.GenerateNewSecRing(c.secretRing)
		if err != nil {
			return err
		}
	} else {
		if err := c.initSecretRing(); err != nil {
			return err
		}
		if err := c.initKeyId(); err != nil {
			return err
		}
	}

	pubArmor, err := c.getPublicKeyArmored()
	if err != nil {
		return err
	}

	bref := blob.SHA1FromString(string(pubArmor))

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	if c.noconfig {
		return nil
	}

	return c.writeConfig(&clientconfig.Config{
		Servers: map[string]*clientconfig.Server{
			"localhost": {
				Server:    "http://localhost:3179",
				IsDefault: true,
				Auth:      "localhost",
			},
		},
		Identity:     c.keyId,
		IgnoredFiles: []string{".DS_Store"},
	})
}
示例#12
0
func getSignerPublicKeyBlobref() (signerRef blob.Ref, armored string, ok bool) {
	configOnce.Do(parseConfig)
	key := "keyId"
	keyId, ok := config[key].(string)
	if !ok {
		log.Printf("No key %q in JSON configuration file %q; have you run \"camput init\"?", key, osutil.UserClientConfigPath())
		return
	}
	keyRing, hasKeyRing := config["secretRing"].(string)
	if !hasKeyRing {
		if fn := osutil.IdentitySecretRing(); fileExists(fn) {
			keyRing = fn
		} else if fn := jsonsign.DefaultSecRingPath(); fileExists(fn) {
			keyRing = fn
		} else {
			log.Printf("Couldn't find keyId %q; no 'secretRing' specified in config file, and no standard secret ring files exist.")
			return
		}
	}
	entity, err := jsonsign.EntityFromSecring(keyId, keyRing)
	if err != nil {
		log.Printf("Couldn't find keyId %q in secret ring: %v", keyId, err)
		return
	}
	armored, err = jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		log.Printf("Error serializing public key: %v", err)
		return
	}

	// TODO(mpl): integrate with getSelfPubKeyDir if possible.
	selfPubKeyDir, ok := config["selfPubKeyDir"].(string)
	if !ok {
		selfPubKeyDir = osutil.KeyBlobsDir()
		log.Printf("No 'selfPubKeyDir' defined in %q, defaulting to %v", osutil.UserClientConfigPath(), selfPubKeyDir)
	}
	fi, err := os.Stat(selfPubKeyDir)
	if err != nil || !fi.IsDir() {
		log.Printf("selfPubKeyDir of %q doesn't exist or not a directory", selfPubKeyDir)
		return
	}

	br := blob.SHA1FromString(armored)

	pubFile := filepath.Join(selfPubKeyDir, br.String()+".camli")
	fi, err = os.Stat(pubFile)
	if err != nil {
		err = ioutil.WriteFile(pubFile, []byte(armored), 0644)
		if err != nil {
			log.Printf("Error writing public key to %q: %v", pubFile, err)
			return
		}
	}

	return br, armored, true
}
示例#13
0
// Error is errNoOwner if no identity configured
func (b *lowBuilder) searchOwner() (br blob.Ref, err error) {
	if b.high.Identity == "" {
		return br, errNoOwner
	}
	entity, err := jsonsign.EntityFromSecring(b.high.Identity, b.high.IdentitySecretRing)
	if err != nil {
		return br, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return br, err
	}
	return blob.SHA1FromString(armoredPublicKey), nil
}
示例#14
0
func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
	var (
		// either a short form ("26F5ABDA") or one the longer forms.
		keyId = conf.RequiredString("keyId")

		pubKeyDestPrefix = conf.OptionalString("publicKeyDest", "")
		secretRing       = conf.OptionalString("secretRing", "")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}

	h := &Handler{
		secretRing: secretRing,
	}

	var err error
	h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath())
	if err != nil {
		return nil, err
	}

	h.pubKey, err = jsonsign.ArmoredPublicKey(h.entity)

	ms := &memory.Storage{}
	h.pubKeyBlobRef = blob.SHA1FromString(h.pubKey)
	if _, err := ms.ReceiveBlob(h.pubKeyBlobRef, strings.NewReader(h.pubKey)); err != nil {
		return nil, fmt.Errorf("could not store pub key blob: %v", err)
	}
	h.pubKeyFetcher = ms

	if pubKeyDestPrefix != "" {
		sto, err := ld.GetStorage(pubKeyDestPrefix)
		if err != nil {
			return nil, err
		}
		h.pubKeyDest = sto
	}
	h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String()
	h.pubKeyHandler = &gethandler.Handler{
		Fetcher: ms,
	}

	h.signer, err = schema.NewSigner(h.pubKeyBlobRef, strings.NewReader(h.pubKey), h.entity)
	if err != nil {
		return nil, err
	}

	return h, nil
}
示例#15
0
func setCamdevVarsFor(e *Env, altkey bool) {
	var setenv func(string, string) error
	if e != nil {
		setenv = func(k, v string) error { e.Set(k, v); return nil }
	} else {
		setenv = os.Setenv
	}

	setenv("CAMLI_AUTH", "userpass:camlistore:pass3179")
	// env values for clients. server will overwrite them anyway in its setEnvVars.
	root, err := rootInTmpDir()
	if err != nil {
		log.Fatal(err)
	}
	setenv("CAMLI_CACHE_DIR", filepath.Join(root, "client", "cache"))
	setenv("CAMLI_CONFIG_DIR", filepath.Join("config", "dev-client-dir"))

	secring := defaultSecring
	identity := defaultIdentity

	if altkey {
		secring = filepath.FromSlash("pkg/jsonsign/testdata/password-foo-secring.gpg")
		identity = "C7C3E176"
		println("**\n** Note: password is \"foo\"\n**\n")
	} else {
		if *flagSecretRing != "" {
			secring = *flagSecretRing
		}
		if *flagIdentity != "" {
			identity = *flagIdentity
		}
	}

	entity, err := jsonsign.EntityFromSecring(identity, secring)
	if err != nil {
		panic(err)
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		panic(err)
	}
	pubKeyRef := blob.SHA1FromString(armoredPublicKey)

	setenv("CAMLI_SECRET_RING", secring)
	setenv("CAMLI_KEYID", identity)
	setenv("CAMLI_PUBKEY_BLOBREF", pubKeyRef.String())
	setenv("CAMLI_KV_VERIFY", "true")
}
func TestMultiStat(t *testing.T) {
	ds := NewStorage(t)
	defer cleanUp(ds)

	blobfoo := &test.Blob{"foo"}
	blobbar := &test.Blob{"bar!"}
	blobfoo.MustUpload(t, ds)
	blobbar.MustUpload(t, ds)

	need := make(map[blob.Ref]bool)
	need[blobfoo.BlobRef()] = true
	need[blobbar.BlobRef()] = true

	blobs := []blob.Ref{blobfoo.BlobRef(), blobbar.BlobRef()}

	// In addition to the two "foo" and "bar" blobs, add
	// maxParallelStats other dummy blobs, to exercise the stat
	// rate-limiting (which had a deadlock once after a cleanup)
	for i := 0; i < maxParallelStats; i++ {
		blobs = append(blobs, blob.SHA1FromString(strconv.Itoa(i)))
	}

	ch := make(chan blob.SizedRef, 0)
	errch := make(chan error, 1)
	go func() {
		errch <- ds.StatBlobs(ch, blobs)
		close(ch)
	}()
	got := 0
	for sb := range ch {
		got++
		if !need[sb.Ref] {
			t.Errorf("didn't need %s", sb.Ref)
		}
		delete(need, sb.Ref)
	}
	if want := 2; got != want {
		t.Errorf("number stats = %d; want %d", got, want)
	}
	if err := <-errch; err != nil {
		t.Errorf("StatBlobs: %v", err)
	}
	if len(need) != 0 {
		t.Errorf("Not all stat results returned; still need %d", len(need))
	}
}
示例#17
0
func TestQueryFileCandidateSource(t *testing.T) {
	testQueryTypes(t, memIndexTypes, func(qt *queryTest) {
		id := qt.id
		fileRef, _ := id.UploadFile("some-stuff.txt", "hello", time.Unix(123, 0))
		qt.t.Logf("fileRef = %q", fileRef)
		p1 := id.NewPlannedPermanode("1")
		id.SetAttribute(p1, "camliContent", fileRef.String())

		sq := &SearchQuery{
			Constraint: &Constraint{
				File: &FileConstraint{
					WholeRef: blob.SHA1FromString("hello"),
				},
			},
		}
		qt.candidateSource = "corpus_file_meta"
		qt.wantRes(sq, fileRef)
	})
}
示例#18
0
func (c *Client) initSignerPublicKeyBlobref() {
	configOnce.Do(parseConfig)
	keyId := config.identity
	if keyId == "" {
		log.Fatalf("No 'identity' key in JSON configuration file %q; have you run \"camput init\"?", osutil.UserClientConfigPath())
	}
	keyRing := c.SecretRingFile()
	if !fileExists(keyRing) {
		log.Fatalf("Could not find keyId %q, because secret ring file %q does not exist.", keyId, keyRing)
	}
	entity, err := jsonsign.EntityFromSecring(keyId, keyRing)
	if err != nil {
		log.Fatalf("Couldn't find keyId %q in secret ring %v: %v", keyId, keyRing, err)
	}
	armored, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		log.Fatalf("Error serializing public key: %v", err)
	}

	// TODO(mpl): completely get rid of it if possible
	// http://camlistore.org/issue/377
	selfPubKeyDir := osutil.KeyBlobsDir()
	fi, err := os.Stat(selfPubKeyDir)
	if err != nil || !fi.IsDir() {
		log.Fatalf("selfPubKeyDir as %q doesn't exist or not a directory", selfPubKeyDir)
	}

	br := blob.SHA1FromString(armored)
	pubFile := filepath.Join(selfPubKeyDir, br.String()+".camli")
	fi, err = os.Stat(pubFile)
	if err != nil {
		if !os.IsNotExist(err) {
			log.Fatalf("Could not stat %q: %v", pubFile, err)
		}
		err = ioutil.WriteFile(pubFile, []byte(armored), 0644)
		if err != nil {
			log.Fatalf("Error writing public key to %q: %v", pubFile, err)
		}
	}
	c.signerPublicKeyRef = br
	c.publicKeyArmored = armored
}
示例#19
0
func TestMultiStat(t *testing.T) {
	ds := NewStorage(t)
	defer cleanUp(ds)

	blobfoo := &test.Blob{"foo"}
	blobbar := &test.Blob{"bar!"}
	blobfoo.MustUpload(t, ds)
	blobbar.MustUpload(t, ds)

	need := make(map[string]bool)
	need[blobfoo.BlobRef().String()] = true
	need[blobbar.BlobRef().String()] = true

	blobs := []blob.Ref{blobfoo.BlobRef(), blobbar.BlobRef()}

	// In addition to the two "foo" and "bar" blobs, add
	// maxParallelStats other dummy blobs, to exercise the stat
	// rate-limiting (which had a deadlock once after a cleanup)
	for i := 0; i < maxParallelStats; i++ {
		blobs = append(blobs, blob.SHA1FromString(strconv.Itoa(i)))
	}

	ch := make(chan blob.SizedRef, 0)
	errch := make(chan error, 1)
	go func() {
		errch <- ds.StatBlobs(ch, blobs)
		close(ch)
	}()
	got := 0
	for sb := range ch {
		got++
		br := sb.Ref
		brstr := br.String()
		Expect(t, need[brstr], "need stat of blobref "+brstr)
		delete(need, brstr)
	}
	ExpectInt(t, 2, got, "number stat results")
	ExpectNil(t, <-errch, "result from stat")
	ExpectInt(t, 0, len(need), "all stat results needed returned")
}
示例#20
0
// camlistore.org/issue/305
func TestIssue305(t *testing.T) {
	var in = `{"camliVersion": 1,
  "camliType": "file",
  "fileName": "2012-03-10 15.03.18.m4v",
  "parts": [
    {
      "bytesRef": "sha1-c76d8b17b887c207875e61a77b7eccc60289e61c",
      "size": 20032564
    }
  ]
}`
	var ss superset
	if err := json.NewDecoder(strings.NewReader(in)).Decode(&ss); err != nil {
		t.Fatal(err)
	}
	inref := blob.SHA1FromString(in)
	blob, err := BlobFromReader(inref, strings.NewReader(in))
	if err != nil {
		t.Fatal(err)
	}
	if blob.BlobRef() != inref {
		t.Errorf("original ref = %s; want %s", blob.BlobRef(), inref)
	}
	bb := blob.Builder()
	jback, err := bb.JSON()
	if err != nil {
		t.Fatal(err)
	}
	if jback != in {
		t.Errorf("JSON doesn't match:\n got: %q\nwant: %q\n", jback, in)
	}
	out := bb.Blob()
	if got := out.BlobRef(); got != inref {
		t.Errorf("cloned ref = %v; want %v", got, inref)
	}
}
示例#21
0
文件: init.go 项目: rn2dy/camlistore
func (c *initCmd) RunCommand(args []string) error {
	if len(args) > 0 {
		return cmdmain.ErrUsage
	}

	if c.newKey && c.gpgkey != "" {
		log.Fatal("--newkey and --gpgkey are mutually exclusive")
	}

	blobDir := osutil.KeyBlobsDir()
	if err := os.MkdirAll(blobDir, 0700); err != nil {
		return err
	}

	var keyId string
	var err error
	secRing := osutil.IdentitySecretRing()
	if c.newKey {
		keyId, err = jsonsign.GenerateNewSecRing(secRing)
		if err != nil {
			return err
		}
	} else {
		keyId, err = c.keyId(secRing)
		if err != nil {
			return err
		}
	}

	pubArmor, err := c.getPublicKeyArmored(keyId)
	if err != nil {
		return err
	}

	bref := blob.SHA1FromString(string(pubArmor))

	keyBlobPath := path.Join(blobDir, bref.String()+".camli")
	if err = ioutil.WriteFile(keyBlobPath, pubArmor, 0644); err != nil {
		log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
	}

	if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
		log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
	}

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	if c.noconfig {
		return nil
	}

	configFilePath := osutil.UserClientConfigPath()
	_, err = os.Stat(configFilePath)
	if err == nil {
		log.Fatalf("Config file %q already exists; quitting without touching it.", configFilePath)
	}

	if f, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
		defer f.Close()
		m := &clientconfig.Config{
			Servers: map[string]*clientconfig.Server{
				"localhost": {
					Server:    "http://localhost:3179",
					IsDefault: true,
					Auth:      "localhost",
				},
			},
			Identity:     keyId,
			IgnoredFiles: []string{".DS_Store"},
		}

		jsonBytes, err := json.MarshalIndent(m, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}
		_, err = f.Write(jsonBytes)
		if err != nil {
			log.Fatalf("Error writing to %q: %v", configFilePath, err)
		}
		log.Printf("Wrote %q; modify as necessary.", configFilePath)
	}
	return nil
}
示例#22
0
func writeFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) (n int64, spans []span, outerr error) {
	src := &noteEOFReader{r: r}
	bufr := bufio.NewReaderSize(src, bufioReaderSize)
	spans = []span{} // the tree of spans, cut on interesting rollsum boundaries
	rs := rollsum.New()
	var last int64
	var buf bytes.Buffer
	blobSize := 0 // of the next blob being built, should be same as buf.Len()

	const chunksInFlight = 32 // at ~64 KB chunks, this is ~2MB memory per file
	gatec := syncutil.NewGate(chunksInFlight)
	firsterrc := make(chan error, 1)

	// uploadLastSpan runs in the same goroutine as the loop below and is responsible for
	// starting uploading the contents of the buf.  It returns false if there's been
	// an error and the loop below should be stopped.
	uploadLastSpan := func() bool {
		chunk := buf.String()
		buf.Reset()
		br := blob.SHA1FromString(chunk)
		spans[len(spans)-1].br = br
		select {
		case outerr = <-firsterrc:
			return false
		default:
			// No error seen so far, continue.
		}
		gatec.Start()
		go func() {
			defer gatec.Done()
			if _, err := uploadString(bs, br, chunk); err != nil {
				select {
				case firsterrc <- err:
				default:
				}
			}
		}()
		return true
	}

	for {
		c, err := bufr.ReadByte()
		if err == io.EOF {
			if n != last {
				spans = append(spans, span{from: last, to: n})
				if !uploadLastSpan() {
					return
				}
			}
			break
		}
		if err != nil {
			return 0, nil, err
		}

		buf.WriteByte(c)
		n++
		blobSize++
		rs.Roll(c)

		var bits int
		onRollSplit := rs.OnSplit()
		switch {
		case blobSize == maxBlobSize:
			bits = 20 // arbitrary node weight; 1<<20 == 1MB
		case src.sawEOF:
			// Don't split. End is coming soon enough.
			continue
		case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold:
			bits = rs.Bits()
		case n == firstChunkSize:
			bits = 18 // 1 << 18 == 256KB
		default:
			// Don't split.
			continue
		}
		blobSize = 0

		// Take any spans from the end of the spans slice that
		// have a smaller 'bits' score and make them children
		// of this node.
		var children []span
		childrenFrom := len(spans)
		for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
			childrenFrom--
		}
		if nCopy := len(spans) - childrenFrom; nCopy > 0 {
			children = make([]span, nCopy)
			copy(children, spans[childrenFrom:])
			spans = spans[:childrenFrom]
		}

		spans = append(spans, span{from: last, to: n, bits: bits, children: children})
		last = n
		if !uploadLastSpan() {
			return
		}
	}

	// Loop was already hit earlier.
	if outerr != nil {
		return 0, nil, outerr
	}

	// Wait for all uploads to finish, one way or another, and then
	// see if any generated errors.
	// Once this loop is done, we own all the tokens in gatec, so nobody
	// else can have one outstanding.
	for i := 0; i < chunksInFlight; i++ {
		gatec.Start()
	}
	select {
	case err := <-firsterrc:
		return 0, nil, err
	default:
	}

	return n, spans, nil

}
示例#23
0
func (c *initCmd) RunCommand(args []string) error {
	if len(args) > 0 {
		return cmdmain.ErrUsage
	}

	var err error

	if c.dumpJSON {
		type jsonConfig struct {
			Identity_secring *jsonsign.IdentitySecring
			Client_config    *clientconfig.Config
			Server_config    *serverconfig.Config
		}
		var config jsonConfig
		// generate a new secring struct
		config.Identity_secring, err = jsonsign.GenerateNewSecRingStruct()
		if err != nil {
			return err
		}
		c.keyId = config.Identity_secring.KeyId

		// generate a new server config struct
		config.Server_config = GenerateServerConfig(c.keyId)
		// generate a new client config struct
		config.Client_config = GenerateClientConfig(c.keyId)
		jsonBytes, err := json.MarshalIndent(config, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}

		//log.Printf("%+#v\n", string(jsonBytes))

		_, err = os.Stdout.Write(jsonBytes)

		return err
	}

	if c.newKey && c.keyId != "" {
		log.Fatal("--newkey and --gpgkey are mutually exclusive")
	}

	if c.newKey {
		c.secretRing = osutil.DefaultSecretRingFile()
		c.keyId, err = jsonsign.GenerateNewSecRing(c.secretRing)
		if err != nil {
			return err
		}
	} else {
		if err := c.initSecretRing(); err != nil {
			return err
		}
		if err := c.initKeyId(); err != nil {
			return err
		}
	}

	pubArmor, err := c.getPublicKeyArmored()
	if err != nil {
		return err
	}

	bref := blob.SHA1FromString(string(pubArmor))

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	if c.noconfig {
		return nil
	}

	configFilePath := osutil.UserClientConfigPath()
	_, err = os.Stat(configFilePath)
	if err == nil {
		log.Fatalf("Config file %q already exists; quitting without touching it.", configFilePath)
	}
	if err := os.MkdirAll(filepath.Dir(configFilePath), 0700); err != nil {
		return err
	}
	if f, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
		defer f.Close()

		// refactored to a service routine
		m := GenerateClientConfig(c.keyId)
		jsonBytes, err := json.MarshalIndent(m, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}

		_, err = f.Write(jsonBytes)
		if err != nil {
			log.Fatalf("Error writing to %q: %v", configFilePath, err)
		}
		log.Printf("Wrote %q; modify as necessary.", configFilePath)
	} else {
		return fmt.Errorf("could not write client config file %v: %v", configFilePath, err)
	}
	return nil
}
示例#24
0
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("populating %v", dbfile)
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
	if err := os.MkdirAll(bsRoot, 0700); err != nil {
		b.Fatal(err)
	}
	dataDir, err := os.Open("testdata")
	if err != nil {
		b.Fatal(err)
	}
	fis, err := dataDir.Readdir(-1)
	if err != nil {
		b.Fatal(err)
	}
	if len(fis) == 0 {
		b.Fatalf("no files in %s dir", "testdata")
	}

	ks := doKeyStuff(b)

	bs, err := localdisk.New(bsRoot)
	if err != nil {
		b.Fatal(err)
	}
	if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())

	b.ResetTimer()
	for _, v := range fis {
		f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
		if err != nil {
			b.Fatal(err)
		}
		td := &trackDigestReader{r: f}
		fm := schema.NewFileMap(v.Name())
		fm.SetModTime(v.ModTime())
		fileRef, err := schema.WriteFileMap(bs, fm, td)
		if err != nil {
			b.Fatal(err)
		}
		f.Close()

		unsigned := schema.NewPlannedPermanode(td.Sum())
		unsigned.SetSigner(ks.pubKeyRef)
		sr := &jsonsign.SignRequest{
			UnsignedJSON: unsigned.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: time.Unix(0, 0),
		}
		signed, err := sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		pn := blob.SHA1FromString(signed)
		// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
		if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}

		contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
		claimTime, ok := fm.ModTime()
		if !ok {
			b.Fatal(err)
		}
		contentAttr.SetClaimDate(claimTime)
		contentAttr.SetSigner(ks.pubKeyRef)
		sr = &jsonsign.SignRequest{
			UnsignedJSON: contentAttr.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: claimTime,
		}
		signed, err = sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		cl := blob.SHA1FromString(signed)
		if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}
	}
	sh.IdleWait()

	return idx
}
示例#25
0
func (ih *ImageHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file blob.Ref) {
	if !httputil.IsGet(req) {
		http.Error(rw, "Invalid method", 400)
		return
	}
	mw, mh := ih.MaxWidth, ih.MaxHeight
	if mw == 0 || mh == 0 || mw > search.MaxImageSize || mh > search.MaxImageSize {
		http.Error(rw, "bogus dimensions", 400)
		return
	}

	key := cacheKey(file.String(), mw, mh)
	etag := blob.SHA1FromString(key).String()[5:]
	inm := req.Header.Get("If-None-Match")
	if inm != "" {
		if strings.Trim(inm, `"`) == etag {
			rw.WriteHeader(http.StatusNotModified)
			return
		}
	} else {
		if !disableThumbCache && req.Header.Get("If-Modified-Since") != "" {
			rw.WriteHeader(http.StatusNotModified)
			return
		}
	}

	var imageData []byte
	format := ""
	cacheHit := false
	if ih.thumbMeta != nil && !disableThumbCache {
		var buf bytes.Buffer
		format = ih.scaledCached(&buf, file)
		if format != "" {
			cacheHit = true
			imageData = buf.Bytes()
		}
	}

	if !cacheHit {
		imi, err := singleResize.Do(key, func() (interface{}, error) {
			return ih.scaleImage(file)
		})
		if err != nil {
			http.Error(rw, err.Error(), 500)
			return
		}
		im := imi.(*formatAndImage)
		imageData = im.image
		if ih.thumbMeta != nil {
			err := ih.cacheScaled(bytes.NewReader(imageData), key)
			if err != nil {
				log.Printf("image resize: %v", err)
			}
		}
	}

	h := rw.Header()
	if !disableThumbCache {
		h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat))
		h.Set("Last-Modified", time.Now().Format(http.TimeFormat))
		h.Set("Etag", strconv.Quote(etag))
	}
	h.Set("Content-Type", imageContentTypeOfFormat(format))
	size := len(imageData)
	h.Set("Content-Length", fmt.Sprint(size))
	imageBytesServedVar.Add(int64(size))

	if req.Method == "GET" {
		n, err := rw.Write(imageData)
		if err != nil {
			if strings.Contains(err.Error(), "broken pipe") {
				// boring.
				return
			}
			// TODO: vlog this:
			log.Printf("error serving thumbnail of file schema %s: %v", file, err)
			return
		}
		if n != size {
			log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d",
				file, n, size)
			return
		}
	}
}
示例#26
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blob.SizedRef) error {
	sf, ok := blobReceiver.(blob.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blob.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.Ref)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.Ref.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.Ref.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.Ref.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	// The file schema must have a modtime to vivify, as the modtime is used for all three of:
	// 1) the permanode's signature
	// 2) the camliContent attribute claim's "claimDate"
	// 3) the signature time of 2)
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	if err != nil {
		return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
	}

	permanodeBB := schema.NewHashPlannedPermanode(h)
	permanodeBB.SetSigner(blob.MustParse(publicKeyBlobRef))
	permanodeBB.SetClaimDate(claimDate)
	permanodeSigned, err := sigHelper.Sign(permanodeBB)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
	}
	permanodeRef := blob.SHA1FromString(permanodeSigned)
	_, err = blobserver.ReceiveNoHash(blobReceiver, permanodeRef, strings.NewReader(permanodeSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
	}

	contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.Ref.String())
	contentClaimBB.SetSigner(blob.MustParse(publicKeyBlobRef))
	contentClaimBB.SetClaimDate(claimDate)
	contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	contentClaimRef := blob.SHA1FromString(contentClaimSigned)
	_, err = blobserver.ReceiveNoHash(blobReceiver, contentClaimRef, strings.NewReader(contentClaimSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
	}
	return nil
}
示例#27
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	var filebb *schema.Builder
	if up.fileOpts.contentsOnly {
		filebb = schema.NewFileMap("")
	} else {
		filebb = schema.NewCommonFileMap(n.fullPath, n.fi)
	}
	filebb.SetType("file")

	up.fdGate.Start()
	defer up.fdGate.Done()

	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	if !up.fileOpts.contentsOnly {
		if up.fileOpts.exifTime {
			ra, ok := file.(io.ReaderAt)
			if !ok {
				return nil, errors.New("Error asserting local file to io.ReaderAt")
			}
			modtime, err := schema.FileTime(ra)
			if err != nil {
				log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
			} else {
				filebb.SetModTime(modtime)
			}
		}
		if up.fileOpts.wantCapCtime() {
			filebb.CapCreationTime()
		}
	}

	var (
		size                           = n.fi.Size()
		fileContents io.Reader         = io.LimitReader(file, size)
		br           blob.Ref          // of file schemaref
		sum          string            // sha1 hashsum of the file to upload
		pr           *client.PutResult // of the final "file" schema blob
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			ok := false
			pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
			if ok {
				br = pr.BlobRef
				android.NoteFileUploaded(n.fullPath, !pr.Skipped)
				if up.fileOpts.wantVivify() {
					// we can return early in that case, because the other options
					// are disallowed in the vivify case.
					return pr, nil
				}
			}
		}
	}

	if up.fileOpts.wantVivify() {
		// If vivify wasn't already done in fileMapFromDuplicate.
		err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := filebb.JSON()
		if err != nil {
			return nil, err
		}
		br = blob.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  br,
			Size:     uint32(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		pr, err = up.Upload(h)
		if err != nil {
			return nil, err
		}
		android.NoteFileUploaded(n.fullPath, true)
		return pr, nil
	}

	if !br.Valid() {
		// br still zero means fileMapFromDuplicate did not find the file on the server,
		// and the file has not just been uploaded subsequently to a vivify request.
		// So we do the full file + file schema upload here.
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// The work for those planned permanodes (and the claims) is redone
	// everytime we get here (i.e past the stat cache). However, they're
	// caught by the have cache, so they won't be reuploaded for nothing
	// at least.
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime, ok := filebb.ModTime()
		if !ok {
			return nil, fmt.Errorf("couldn't get modtime for file %v", n.fullPath)
		}
		err = up.uploadFilePermanode(sum, br, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := filebb.JSON()
	pr = &client.PutResult{BlobRef: br, Size: uint32(len(json)), Skipped: false}
	return pr, nil
}
示例#28
0
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *serverconfig.Config) (lowLevelConf *Config, err error) {
	obj := jsonconfig.Obj{}
	if conf.HTTPS {
		if (conf.HTTPSCert != "") != (conf.HTTPSKey != "") {
			return nil, errors.New("Must set both httpsCert and httpsKey (or neither to generate a self-signed cert)")
		}
		if conf.HTTPSCert != "" {
			obj["httpsCert"] = conf.HTTPSCert
			obj["httpsKey"] = conf.HTTPSKey
		} else {
			obj["httpsCert"] = osutil.DefaultTLSCert()
			obj["httpsKey"] = osutil.DefaultTLSKey()
		}
	}

	if conf.BaseURL != "" {
		u, err := url.Parse(conf.BaseURL)
		if err != nil {
			return nil, fmt.Errorf("Error parsing baseURL %q as a URL: %v", conf.BaseURL, err)
		}
		if u.Path != "" && u.Path != "/" {
			return nil, fmt.Errorf("baseURL can't have a path, only a scheme, host, and optional port.")
		}
		u.Path = ""
		obj["baseURL"] = u.String()
	}
	if conf.Listen != "" {
		obj["listen"] = conf.Listen
	}
	obj["https"] = conf.HTTPS
	obj["auth"] = conf.Auth

	username := ""
	if conf.DBName == "" {
		username = osutil.Username()
		if username == "" {
			return nil, fmt.Errorf("USER (USERNAME on windows) env var not set; needed to define dbname")
		}
		conf.DBName = "camli" + username
	}

	var indexerPath string
	numIndexers := numSet(conf.Mongo, conf.MySQL, conf.PostgreSQL, conf.SQLite, conf.KVFile)
	runIndex := conf.RunIndex.Get()
	switch {
	case runIndex && numIndexers == 0:
		return nil, fmt.Errorf("Unless runIndex is set to false, you must specify an index option (kvIndexFile, mongo, mysql, postgres, sqlite).")
	case runIndex && numIndexers != 1:
		return nil, fmt.Errorf("With runIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite).")
	case !runIndex && numIndexers != 0:
		return nil, fmt.Errorf("With runIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite.")
	case conf.MySQL != "":
		indexerPath = "/index-mysql/"
	case conf.PostgreSQL != "":
		indexerPath = "/index-postgres/"
	case conf.Mongo != "":
		indexerPath = "/index-mongo/"
	case conf.SQLite != "":
		indexerPath = "/index-sqlite/"
	case conf.KVFile != "":
		indexerPath = "/index-kv/"
	}

	entity, err := jsonsign.EntityFromSecring(conf.Identity, conf.IdentitySecretRing)
	if err != nil {
		return nil, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return nil, err
	}

	nolocaldisk := conf.BlobPath == ""
	if nolocaldisk {
		if conf.S3 == "" && conf.GoogleCloudStorage == "" {
			return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or googlecloudstorage configured for a blobserver.")
		}
		if conf.S3 != "" && conf.GoogleCloudStorage != "" {
			return nil, errors.New("Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now.")
		}
	}

	if conf.ShareHandler && conf.ShareHandlerPath == "" {
		conf.ShareHandlerPath = "/share/"
	}

	prefixesParams := &configPrefixesParams{
		secretRing:       conf.IdentitySecretRing,
		keyId:            conf.Identity,
		indexerPath:      indexerPath,
		blobPath:         conf.BlobPath,
		packBlobs:        conf.PackBlobs,
		searchOwner:      blob.SHA1FromString(armoredPublicKey),
		shareHandlerPath: conf.ShareHandlerPath,
		flickr:           conf.Flickr,
		memoryIndex:      conf.MemoryIndex.Get(),
	}

	prefixes := genLowLevelPrefixes(prefixesParams, conf.OwnerName)
	var cacheDir string
	if nolocaldisk {
		// Whether camlistored is run from EC2 or not, we use
		// a temp dir as the cache when primary storage is S3.
		// TODO(mpl): s3CacheBucket
		// See http://code.google.com/p/camlistore/issues/detail?id=85
		cacheDir = filepath.Join(tempDir(), "camli-cache")
	} else {
		cacheDir = filepath.Join(conf.BlobPath, "cache")
	}
	if !noMkdir {
		if err := os.MkdirAll(cacheDir, 0700); err != nil {
			return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err)
		}
	}

	published := []interface{}{}
	if len(conf.Publish) > 0 {
		if !runIndex {
			return nil, fmt.Errorf("publishing requires an index")
		}
		published, err = addPublishedConfig(prefixes, conf.Publish, conf.SourceRoot)
		if err != nil {
			return nil, fmt.Errorf("Could not generate config for published: %v", err)
		}
	}

	if runIndex {
		addUIConfig(prefixesParams, prefixes, "/ui/", published, conf.SourceRoot)
	}

	if conf.MySQL != "" {
		addMySQLConfig(prefixes, conf.DBName, conf.MySQL)
	}
	if conf.PostgreSQL != "" {
		addPostgresConfig(prefixes, conf.DBName, conf.PostgreSQL)
	}
	if conf.Mongo != "" {
		addMongoConfig(prefixes, conf.DBName, conf.Mongo)
	}
	if conf.SQLite != "" {
		addSQLiteConfig(prefixes, conf.SQLite)
	}
	if conf.KVFile != "" {
		addKVConfig(prefixes, conf.KVFile)
	}
	if conf.S3 != "" {
		if err := addS3Config(prefixesParams, prefixes, conf.S3); err != nil {
			return nil, err
		}
	}
	if conf.GoogleDrive != "" {
		if err := addGoogleDriveConfig(prefixes, conf.GoogleDrive); err != nil {
			return nil, err
		}
	}
	if conf.GoogleCloudStorage != "" {
		if err := addGoogleCloudStorageConfig(prefixes, conf.GoogleCloudStorage); err != nil {
			return nil, err
		}
	}

	obj["prefixes"] = (map[string]interface{})(prefixes)

	lowLevelConf = &Config{
		Obj: obj,
	}
	return lowLevelConf, nil
}
示例#29
0
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
	var (
		baseURL    = conf.OptionalString("baseURL", "")
		listen     = conf.OptionalString("listen", "")
		auth       = conf.RequiredString("auth")
		keyId      = conf.RequiredString("identity")
		secretRing = conf.RequiredString("identitySecretRing")
		tlsOn      = conf.OptionalBool("https", false)
		tlsCert    = conf.OptionalString("HTTPSCertFile", "")
		tlsKey     = conf.OptionalString("HTTPSKeyFile", "")

		// Blob storage options
		blobPath           = conf.OptionalString("blobPath", "")
		packBlobs          = conf.OptionalBool("packBlobs", false)         // use diskpacked instead of the default filestorage
		s3                 = conf.OptionalString("s3", "")                 // "access_key_id:secret_access_key:bucket[:hostname]"
		googlecloudstorage = conf.OptionalString("googlecloudstorage", "") // "clientId:clientSecret:refreshToken:bucket"
		googledrive        = conf.OptionalString("googledrive", "")        // "clientId:clientSecret:refreshToken:parentId"
		swift              = conf.OptionalString("swift", "")              // "tenant:secret:container:auth_url"
		// Enable the share handler. If true, and shareHandlerPath is empty,
		// then shareHandlerPath defaults to "/share/".
		shareHandler = conf.OptionalBool("shareHandler", false)
		// URL prefix for the share handler. If set, overrides shareHandler.
		shareHandlerPath = conf.OptionalString("shareHandlerPath", "")

		// Index options
		memoryIndex = conf.OptionalBool("memoryIndex", true) // copy disk-based index to memory on start-up
		runIndex    = conf.OptionalBool("runIndex", true)    // if false: no search, no UI, etc.
		dbname      = conf.OptionalString("dbname", "")      // for mysql, postgres, mongo
		mysql       = conf.OptionalString("mysql", "")
		postgres    = conf.OptionalString("postgres", "")
		mongo       = conf.OptionalString("mongo", "")
		sqliteFile  = conf.OptionalString("sqlite", "")
		kvFile      = conf.OptionalString("kvIndexFile", "")

		// Importer options
		flickr = conf.OptionalString("flickr", "")

		_       = conf.OptionalList("replicateTo")
		publish = conf.OptionalObject("publish")
		// alternative source tree, to override the embedded ui and/or closure resources.
		// If non empty, the ui files will be expected at
		// sourceRoot + "/server/camlistored/ui" and the closure library at
		// sourceRoot + "/third_party/closure/lib"
		// Also used by the publish handler.
		sourceRoot = conf.OptionalString("sourceRoot", "")

		ownerName = conf.OptionalString("ownerName", "")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}

	obj := jsonconfig.Obj{}
	if tlsOn {
		if (tlsCert != "") != (tlsKey != "") {
			return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
		}
		if tlsCert != "" {
			obj["TLSCertFile"] = tlsCert
			obj["TLSKeyFile"] = tlsKey
		} else {
			obj["TLSCertFile"] = osutil.DefaultTLSCert()
			obj["TLSKeyFile"] = osutil.DefaultTLSKey()
		}
	}

	if baseURL != "" {
		u, err := url.Parse(baseURL)
		if err != nil {
			return nil, fmt.Errorf("Error parsing baseURL %q as a URL: %v", baseURL, err)
		}
		if u.Path != "" && u.Path != "/" {
			return nil, fmt.Errorf("baseURL can't have a path, only a scheme, host, and optional port.")
		}
		u.Path = ""
		obj["baseURL"] = u.String()
	}
	if listen != "" {
		obj["listen"] = listen
	}
	obj["https"] = tlsOn
	obj["auth"] = auth

	username := ""
	if dbname == "" {
		username = osutil.Username()
		if username == "" {
			return nil, fmt.Errorf("USER (USERNAME on windows) env var not set; needed to define dbname")
		}
		dbname = "camli" + username
	}

	var indexerPath string
	numIndexers := numSet(mongo, mysql, postgres, sqliteFile, kvFile)
	switch {
	case runIndex && numIndexers == 0:
		return nil, fmt.Errorf("Unless runIndex is set to false, you must specify an index option (kvIndexFile, mongo, mysql, postgres, sqlite).")
	case runIndex && numIndexers != 1:
		return nil, fmt.Errorf("With runIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite).")
	case !runIndex && numIndexers != 0:
		return nil, fmt.Errorf("With runIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite.")
	case mysql != "":
		indexerPath = "/index-mysql/"
	case postgres != "":
		indexerPath = "/index-postgres/"
	case mongo != "":
		indexerPath = "/index-mongo/"
	case sqliteFile != "":
		indexerPath = "/index-sqlite/"
	case kvFile != "":
		indexerPath = "/index-kv/"
	}

	entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
	if err != nil {
		return nil, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return nil, err
	}

	nolocaldisk := blobPath == ""
	if nolocaldisk {
		if s3 == "" && googlecloudstorage == "" {
			return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or googlecloudstorage configured for a blobserver.")
		}
		if s3 != "" && googlecloudstorage != "" {
			return nil, errors.New("Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now.")
		}
	}

	if shareHandler && shareHandlerPath == "" {
		shareHandlerPath = "/share/"
	}

	prefixesParams := &configPrefixesParams{
		secretRing:       secretRing,
		keyId:            keyId,
		indexerPath:      indexerPath,
		blobPath:         blobPath,
		packBlobs:        packBlobs,
		searchOwner:      blob.SHA1FromString(armoredPublicKey),
		shareHandlerPath: shareHandlerPath,
		flickr:           flickr,
		memoryIndex:      memoryIndex,
	}

	prefixes := genLowLevelPrefixes(prefixesParams, ownerName)
	var cacheDir string
	if nolocaldisk {
		// Whether camlistored is run from EC2 or not, we use
		// a temp dir as the cache when primary storage is S3.
		// TODO(mpl): s3CacheBucket
		// See http://code.google.com/p/camlistore/issues/detail?id=85
		cacheDir = filepath.Join(tempDir(), "camli-cache")
	} else {
		cacheDir = filepath.Join(blobPath, "cache")
	}
	if !noMkdir {
		if err := os.MkdirAll(cacheDir, 0700); err != nil {
			return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err)
		}
	}

	published := []interface{}{}
	if len(publish) > 0 {
		if !runIndex {
			return nil, fmt.Errorf("publishing requires an index")
		}
		published, err = addPublishedConfig(prefixes, publish, sourceRoot)
		if err != nil {
			return nil, fmt.Errorf("Could not generate config for published: %v", err)
		}
	}

	if runIndex {
		addUIConfig(prefixesParams, prefixes, "/ui/", published, sourceRoot)
	}

	if mysql != "" {
		addMySQLConfig(prefixes, dbname, mysql)
	}
	if postgres != "" {
		addPostgresConfig(prefixes, dbname, postgres)
	}
	if mongo != "" {
		addMongoConfig(prefixes, dbname, mongo)
	}
	if sqliteFile != "" {
		addSQLiteConfig(prefixes, sqliteFile)
	}
	if kvFile != "" {
		addKVConfig(prefixes, kvFile)
	}
	if s3 != "" {
		if err := addS3Config(prefixesParams, prefixes, s3); err != nil {
			return nil, err
		}
	}
	if googledrive != "" {
		if err := addGoogleDriveConfig(prefixes, googledrive); err != nil {
			return nil, err
		}
	}
	if googlecloudstorage != "" {
		if err := addGoogleCloudStorageConfig(prefixes, googlecloudstorage); err != nil {
			return nil, err
		}
	}
	if swift != "" {
		if err := addSwiftConfig(prefixesParams, prefixes, swift); err != nil {
			return nil, err
		}
	}

	obj["prefixes"] = (map[string]interface{})(prefixes)

	lowLevelConf = &Config{
		Obj:        obj,
		configPath: conf.configPath,
	}
	return lowLevelConf, nil
}
示例#30
0
文件: upload.go 项目: pgu/camlistore
// NewUploadHandleFromString returns an upload handle
func NewUploadHandleFromString(data string) *UploadHandle {
	bref := blob.SHA1FromString(data)
	r := strings.NewReader(data)
	return &UploadHandle{BlobRef: bref, Size: int64(len(data)), Contents: r}
}