Beispiel #1
0
// This is the simple 1MB chunk version. The rolling checksum version is below.
func WriteFileMap(bs blobserver.StatReceiver, fileMap map[string]interface{}, r io.Reader) (*blobref.BlobRef, error) {
	parts, size := []BytesPart{}, int64(0)

	buf := new(bytes.Buffer)
	for {
		buf.Reset()

		n, err := io.Copy(buf, io.LimitReader(r, 1<<20))
		if err != nil {
			return nil, err
		}
		if n == 0 {
			break
		}

		hash := crypto.SHA1.New()
		io.Copy(hash, bytes.NewBuffer(buf.Bytes()))
		br := blobref.FromHash("sha1", hash)
		hasBlob, err := serverHasBlob(bs, br)
		if err != nil {
			return nil, err
		}
		if !hasBlob {
			sb, err := bs.ReceiveBlob(br, buf)
			if err != nil {
				return nil, err
			}
			if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
				return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
			}
		}

		size += n
		parts = append(parts, BytesPart{
			BlobRef: br,
			Size:    uint64(n),
			Offset:  0, // into BlobRef to read from (not of dest)
		})
	}

	err := PopulateParts(fileMap, size, parts)
	if err != nil {
		return nil, err
	}

	json, err := MapToCamliJSON(fileMap)
	if err != nil {
		return nil, err
	}
	br := blobref.SHA1FromString(json)
	sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
	if err != nil {
		return nil, err
	}
	if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
		return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
	}

	return br, nil
}
Beispiel #2
0
// uploadBytes populates bb (a builder of either type "bytes" or
// "file", which is a superset of "bytes"), sets it to the provided
// size, and populates with provided spans.  The bytes or file schema
// blob is uploaded and its blobref is returned.
func uploadBytes(bs blobserver.StatReceiver, bb *Builder, size int64, s []span) *uploadBytesFuture {
	future := newUploadBytesFuture()
	parts := []BytesPart{}
	addBytesParts(bs, &parts, s, future)

	if err := bb.PopulateParts(size, parts); err != nil {
		future.errc <- err
		return future
	}

	// Hack until camlistore.org/issue/102 is fixed. If we happen to upload
	// the "file" schema before any of its parts arrive, then the indexer
	// can get confused.  So wait on the parts before, and then upload
	// the "file" blob afterwards.
	if bb.Type() == "file" {
		future.errc <- nil
		_, err := future.Get() // may not be nil, if children parts failed
		future = newUploadBytesFuture()
		if err != nil {
			future.errc <- err
			return future
		}
	}

	json := bb.Blob().JSON()
	br := blobref.SHA1FromString(json)
	future.br = br
	go func() {
		_, err := uploadString(bs, br, json)
		future.errc <- err
	}()
	return future
}
Beispiel #3
0
// This is the simple 1MB chunk version. The rolling checksum version is below.
func writeFileMapOld(bs blobserver.StatReceiver, file *Builder, r io.Reader) (*blobref.BlobRef, error) {
	parts, size := []BytesPart{}, int64(0)

	var buf bytes.Buffer
	for {
		buf.Reset()
		n, err := io.Copy(&buf, io.LimitReader(r, maxBlobSize))
		if err != nil {
			return nil, err
		}
		if n == 0 {
			break
		}

		hash := blobref.NewHash()
		io.Copy(hash, bytes.NewReader(buf.Bytes()))
		br := blobref.FromHash(hash)
		hasBlob, err := serverHasBlob(bs, br)
		if err != nil {
			return nil, err
		}
		if !hasBlob {
			sb, err := bs.ReceiveBlob(br, &buf)
			if err != nil {
				return nil, err
			}
			if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
				return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
			}
		}

		size += n
		parts = append(parts, BytesPart{
			BlobRef: br,
			Size:    uint64(n),
			Offset:  0, // into BlobRef to read from (not of dest)
		})
	}

	err := file.PopulateParts(size, parts)
	if err != nil {
		return nil, err
	}

	json := file.Blob().JSON()
	if err != nil {
		return nil, err
	}
	br := blobref.SHA1FromString(json)
	sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
	if err != nil {
		return nil, err
	}
	if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
		return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
	}

	return br, nil
}
Beispiel #4
0
// TODO: move to config package?
func SignerPublicKeyBlobref() *blobref.BlobRef {
	configOnce.Do(parseConfig)
	key := "keyId"
	keyId, ok := config[key].(string)
	if !ok {
		log.Printf("No key %q in JSON configuration file %q; have you run \"camput init\"?", key, ConfigFilePath())
		return nil
	}
	keyRing, hasKeyRing := config["secretRing"].(string)
	if !hasKeyRing {
		if fn := osutil.IdentitySecretRing(); fileExists(fn) {
			keyRing = fn
		} else if fn := jsonsign.DefaultSecRingPath(); fileExists(fn) {
			keyRing = fn
		} else {
			log.Printf("Couldn't find keyId %q; no 'secretRing' specified in config file, and no standard secret ring files exist.")
			return nil
		}
	}
	entity, err := jsonsign.EntityFromSecring(keyId, keyRing)
	if err != nil {
		log.Printf("Couldn't find keyId %q in secret ring: %v", keyId, err)
		return nil
	}
	armored, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		log.Printf("Error serializing public key: %v", err)
		return nil
	}

	selfPubKeyDir, ok := config["selfPubKeyDir"].(string)
	if !ok {
		log.Printf("No 'selfPubKeyDir' defined in %q", ConfigFilePath())
		return nil
	}
	fi, err := os.Stat(selfPubKeyDir)
	if err != nil || !fi.IsDir() {
		log.Printf("selfPubKeyDir of %q doesn't exist or not a directory", selfPubKeyDir)
		return nil
	}

	br := blobref.SHA1FromString(armored)

	pubFile := filepath.Join(selfPubKeyDir, br.String()+".camli")
	log.Printf("key file: %q", pubFile)
	fi, err = os.Stat(pubFile)
	if err != nil {
		err = ioutil.WriteFile(pubFile, []byte(armored), 0644)
		if err != nil {
			log.Printf("Error writing public key to %q: %v", pubFile, err)
			return nil
		}
	}

	return br
}
Beispiel #5
0
func uploadString(bs blobserver.StatReceiver, s string) (*blobref.BlobRef, error) {
	br := blobref.SHA1FromString(s)
	hasIt, err := serverHasBlob(bs, br)
	if err != nil {
		return nil, err
	}
	if hasIt {
		return br, nil
	}
	_, err = bs.ReceiveBlob(br, strings.NewReader(s))
	if err != nil {
		return nil, err
	}
	return br, nil
}
Beispiel #6
0
func writeFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) (n int64, spans []span, outerr error) {
	src := &noteEOFReader{r: r}
	bufr := bufio.NewReaderSize(src, bufioReaderSize)
	spans = []span{} // the tree of spans, cut on interesting rollsum boundaries
	rs := rollsum.New()
	var last int64
	var buf bytes.Buffer
	blobSize := 0 // of the next blob being built, should be same as buf.Len()

	const chunksInFlight = 32 // at ~64 KB chunks, this is ~2MB memory per file
	gatec := make(chan bool, chunksInFlight)
	firsterrc := make(chan error, 1)

	// uploadLastSpan runs in the same goroutine as the loop below and is responsible for
	// starting uploading the contents of the buf.  It returns false if there's been
	// an error and the loop below should be stopped.
	uploadLastSpan := func() bool {
		chunk := buf.String()
		buf.Reset()
		br := blobref.SHA1FromString(chunk)
		spans[len(spans)-1].br = br
		select {
		case outerr = <-firsterrc:
			return false
		default:
			// No error seen so far, continue.
		}
		gatec <- true
		go func() {
			if _, err := uploadString(bs, br, chunk); err != nil {
				select {
				case firsterrc <- err:
				default:
				}
			}
			<-gatec
		}()
		return true
	}

	for {
		c, err := bufr.ReadByte()
		if err == io.EOF {
			if n != last {
				spans = append(spans, span{from: last, to: n})
				if !uploadLastSpan() {
					return
				}
			}
			break
		}
		if err != nil {
			return 0, nil, err
		}

		buf.WriteByte(c)
		n++
		blobSize++
		rs.Roll(c)

		var bits int
		onRollSplit := rs.OnSplit()
		switch {
		case blobSize == maxBlobSize:
			bits = 20 // arbitrary node weight; 1<<20 == 1MB
		case src.sawEOF:
			// Don't split. End is coming soon enough.
			continue
		case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold:
			bits = rs.Bits()
		case n == firstChunkSize:
			bits = 18 // 1 << 18 == 256KB
		default:
			// Don't split.
			continue
		}
		blobSize = 0

		// Take any spans from the end of the spans slice that
		// have a smaller 'bits' score and make them children
		// of this node.
		var children []span
		childrenFrom := len(spans)
		for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
			childrenFrom--
		}
		if nCopy := len(spans) - childrenFrom; nCopy > 0 {
			children = make([]span, nCopy)
			copy(children, spans[childrenFrom:])
			spans = spans[:childrenFrom]
		}

		spans = append(spans, span{from: last, to: n, bits: bits, children: children})
		last = n
		if !uploadLastSpan() {
			return
		}
	}

	// Loop was already hit earlier.
	if outerr != nil {
		return 0, nil, outerr
	}

	// Wait for all uploads to finish, one way or another, and then
	// see if any generated errors.
	// Once this loop is done, we own all the tokens in gatec, so nobody
	// else can have one outstanding.
	for i := 0; i < chunksInFlight; i++ {
		gatec <- true
	}
	select {
	case err := <-firsterrc:
		return 0, nil, err
	default:
	}

	return n, spans, nil

}
Beispiel #7
0
func (c *initCmd) RunCommand(args []string) error {
	if len(args) > 0 {
		return cmdmain.ErrUsage
	}

	if c.newKey && c.gpgkey != "" {
		log.Fatal("--newkey and --gpgkey are mutually exclusive")
	}

	blobDir := path.Join(osutil.CamliConfigDir(), "keyblobs")
	os.Mkdir(osutil.CamliConfigDir(), 0700)
	os.Mkdir(blobDir, 0700)

	var keyId string
	var err error
	secRing := osutil.IdentitySecretRing()
	if c.newKey {
		keyId, err = jsonsign.GenerateNewSecRing(secRing)
		if err != nil {
			return err
		}
	} else {
		keyId, err = c.keyId(secRing)
		if err != nil {
			return err
		}
	}

	if os.Getenv("GPG_AGENT_INFO") == "" {
		log.Printf("No GPG_AGENT_INFO found in environment; you should setup gnupg-agent.  camput might be annoying otherwise, if your private key is encrypted.")
	}

	pubArmor, err := c.getPublicKeyArmored(keyId)
	if err != nil {
		return err
	}

	bref := blobref.SHA1FromString(string(pubArmor))

	keyBlobPath := path.Join(blobDir, bref.String()+".camli")
	if err = ioutil.WriteFile(keyBlobPath, pubArmor, 0644); err != nil {
		log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
	}

	if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
		log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
	}

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	configFilePath := osutil.UserClientConfigPath()
	_, err = os.Stat(configFilePath)
	if err == nil {
		log.Fatalf("Config file %q already exists; quitting without touching it.", configFilePath)
	}

	if f, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
		defer f.Close()
		m := make(map[string]interface{})
		m["keyId"] = keyId                    // TODO(bradfitz): make this 'identity' to match server config?
		m["publicKeyBlobref"] = bref.String() // TODO(bradfitz): not used anymore?
		m["server"] = "http://localhost:3179/"
		m["selfPubKeyDir"] = blobDir
		m["auth"] = "localhost"

		jsonBytes, err := json.MarshalIndent(m, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}
		_, err = f.Write(jsonBytes)
		if err != nil {
			log.Fatalf("Error writing to %q: %v", configFilePath, err)
		}
		log.Printf("Wrote %q; modify as necessary.", configFilePath)
	}
	return nil
}
Beispiel #8
0
func NewUploadHandleFromString(data string) *UploadHandle {
	bref := blobref.SHA1FromString(data)
	r := strings.NewReader(data)
	return &UploadHandle{BlobRef: bref, Size: int64(len(data)), Contents: r}
}
Beispiel #9
0
func (c *initCmd) RunCommand(_ *Uploader, args []string) error {
	if len(args) > 0 {
		return ErrUsage
	}

	blobDir := path.Join(osutil.CamliConfigDir(), "keyblobs")
	os.Mkdir(osutil.CamliConfigDir(), 0700)
	os.Mkdir(blobDir, 0700)

	keyId, err := c.keyId()
	if err != nil {
		return err
	}

	if os.Getenv("GPG_AGENT_INFO") == "" {
		log.Printf("No GPG_AGENT_INFO found in environment; you should setup gnupg-agent.  camput might be annoying otherwise, if your private key is encrypted.")
	}

	pubArmor, err := c.getPublicKeyArmored(keyId)
	if err != nil {
		return err
	}

	bref := blobref.SHA1FromString(string(pubArmor))

	keyBlobPath := path.Join(blobDir, bref.String()+".camli")
	if err = ioutil.WriteFile(keyBlobPath, pubArmor, 0644); err != nil {
		log.Fatalf("Error writing public key blob to %q: %v", keyBlobPath, err)
	}

	if ok, err := jsonsign.VerifyPublicKeyFile(keyBlobPath, keyId); !ok {
		log.Fatalf("Error verifying public key at %q: %v", keyBlobPath, err)
	}

	log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String())

	_, err = os.Stat(client.ConfigFilePath())
	if err == nil {
		log.Fatalf("Config file %q already exists; quitting without touching it.", client.ConfigFilePath())
	}

	if f, err := os.OpenFile(client.ConfigFilePath(), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600); err == nil {
		defer f.Close()
		m := make(map[string]interface{})
		m["keyId"] = keyId                    // TODO(bradfitz): make this 'identity' to match server config?
		m["publicKeyBlobref"] = bref.String() // TODO(bradfitz): not used anymore?
		m["blobServer"] = "http://localhost:3179/"
		m["selfPubKeyDir"] = blobDir
		m["auth"] = "none"

		blobPut := make([]map[string]string, 1)
		blobPut[0] = map[string]string{
			"alias":    "local",
			"host":     "http://localhost:3179/",
			"password": "",
		}
		m["blobPut"] = blobPut

		blobGet := make([]map[string]string, 2)
		blobGet[0] = map[string]string{
			"alias": "keyblobs",
			"path":  blobDir,
		}
		blobGet[1] = map[string]string{
			"alias":    "local",
			"host":     "http://localhost:3179/",
			"password": "",
		}
		m["blobGet"] = blobGet

		jsonBytes, err := json.MarshalIndent(m, "", "  ")
		if err != nil {
			log.Fatalf("JSON serialization error: %v", err)
		}
		_, err = f.Write(jsonBytes)
		if err != nil {
			log.Fatalf("Error writing to %q: %v", client.ConfigFilePath(), err)
		}
		log.Printf("Wrote %q; modify as necessary.", client.ConfigFilePath())
	}
	return nil
}
Beispiel #10
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	// TODO(mpl): maybe break this func into more maintainable pieces?
	filebb := schema.NewCommonFileMap(n.fullPath, n.fi)
	filebb.SetType("file")
	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	if up.fileOpts.exifTime {
		ra, ok := file.(io.ReaderAt)
		if !ok {
			return nil, errors.New("Error asserting local file to io.ReaderAt")
		}
		modtime, err := schema.FileTime(ra)
		if err != nil {
			log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
		} else {
			filebb.SetModTime(modtime)
		}
	}

	var (
		size                           = n.fi.Size()
		fileContents io.Reader         = io.LimitReader(file, size)
		br           *blobref.BlobRef  // of file schemaref
		sum          string            // sha1 hashsum of the file to upload
		pr           *client.PutResult // of the final "file" schema blob
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			ok := false
			pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
			if ok {
				br = pr.BlobRef
				noteFileUploaded(n.fullPath, !pr.Skipped)
				if up.fileOpts.wantVivify() {
					// we can return early in that case, because the other options
					// are disallowed in the vivify case.
					return pr, nil
				}
			}
		}
	}

	if up.fileOpts.wantVivify() {
		// If vivify wasn't already done in fileMapFromDuplicate.
		err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := filebb.JSON()
		if err != nil {
			return nil, err
		}
		br = blobref.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  br,
			Size:     int64(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		pr, err = up.Upload(h)
		if err != nil {
			return nil, err
		}
		noteFileUploaded(n.fullPath, true)
		return pr, nil
	}

	if br == nil {
		// br still nil means fileMapFromDuplicate did not find the file on the server,
		// and the file has not just been uploaded subsequently to a vivify request.
		// So we do the full file + file schema upload here.
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// TODO(mpl): test that none of these claims get uploaded if they've already been done
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// Use a fixed time value for signing; not using modtime
		// so two identical files don't have different modtimes?
		// TODO(bradfitz): consider this more?
		permaNodeSigTime := time.Unix(0, 0)
		permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
		handleResult("node-permanode", permaNode, nil)

		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime, ok := filebb.ModTime()
		if !ok {
			return nil, fmt.Errorf("couldn't get modtime back for file %v", n.fullPath)
		}
		contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", br.String())
		contentAttr.SetClaimDate(claimTime)
		signed, err := up.SignBlob(contentAttr, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err)
		}
		put, err := up.uploadString(signed)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err)
		}
		handleResult("node-permanode-contentattr", put, nil)
		if tags := up.fileOpts.tags(); len(tags) > 0 {
			errch := make(chan error)
			for _, tag := range tags {
				go func(tag string) {
					m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag)
					m.SetClaimDate(claimTime)
					signed, err := up.SignBlob(m, claimTime)
					if err != nil {
						errch <- fmt.Errorf("Failed to sign tag claim for node %v: %v", n, err)
						return
					}
					put, err := up.uploadString(signed)
					if err != nil {
						errch <- fmt.Errorf("Error uploading permanode's tag attribute %v for node %v: %v", tag, n, err)
						return
					}
					handleResult("node-permanode-tag", put, nil)
					errch <- nil
				}(tag)
			}

			for _ = range tags {
				if e := <-errch; e != nil && err == nil {
					err = e
				}
			}
			if err != nil {
				return nil, err
			}
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := filebb.JSON()
	pr = &client.PutResult{BlobRef: br, Size: int64(len(json)), Skipped: false}
	return pr, nil
}
Beispiel #11
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	filebb := schema.NewCommonFileMap(n.fullPath, n.fi)
	filebb.SetType("file")
	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	if up.fileOpts.exifTime {
		ra, ok := file.(io.ReaderAt)
		if !ok {
			return nil, errors.New("Error asserting local file to io.ReaderAt")
		}
		modtime, err := schema.FileTime(ra)
		if err != nil {
			log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
		} else {
			filebb.SetModTime(modtime)
		}
	}

	var (
		size                           = n.fi.Size()
		fileContents io.Reader         = io.LimitReader(file, size)
		br           *blobref.BlobRef  // of file schemaref
		sum          string            // sha1 hashsum of the file to upload
		pr           *client.PutResult // of the final "file" schema blob
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			ok := false
			pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
			if ok {
				br = pr.BlobRef
				client.NoteFileUploaded(n.fullPath, !pr.Skipped)
				if up.fileOpts.wantVivify() {
					// we can return early in that case, because the other options
					// are disallowed in the vivify case.
					return pr, nil
				}
			}
		}
	}

	if up.fileOpts.wantVivify() {
		// If vivify wasn't already done in fileMapFromDuplicate.
		err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := filebb.JSON()
		if err != nil {
			return nil, err
		}
		br = blobref.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  br,
			Size:     int64(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		pr, err = up.Upload(h)
		if err != nil {
			return nil, err
		}
		client.NoteFileUploaded(n.fullPath, true)
		return pr, nil
	}

	if br == nil {
		// br still nil means fileMapFromDuplicate did not find the file on the server,
		// and the file has not just been uploaded subsequently to a vivify request.
		// So we do the full file + file schema upload here.
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// The work for those planned permanodes (and the claims) is redone
	// everytime we get here (i.e past the stat cache). However, they're
	// caught by the have cache, so they won't be reuploaded for nothing
	// at least.
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime, ok := filebb.ModTime()
		if !ok {
			return nil, fmt.Errorf("couldn't get modtime for file %v", n.fullPath)
		}
		err = up.uploadFilePermanode(sum, br, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := filebb.JSON()
	pr = &client.PutResult{BlobRef: br, Size: int64(len(json)), Skipped: false}
	return pr, nil
}
Beispiel #12
0
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
	var (
		baseURL    = conf.OptionalString("baseURL", "")
		listen     = conf.OptionalString("listen", "")
		auth       = conf.RequiredString("auth")
		keyId      = conf.RequiredString("identity")
		secretRing = conf.RequiredString("identitySecretRing")
		blobPath   = conf.RequiredString("blobPath")
		tlsOn      = conf.OptionalBool("https", false)
		tlsCert    = conf.OptionalString("HTTPSCertFile", "")
		tlsKey     = conf.OptionalString("HTTPSKeyFile", "")
		dbname     = conf.OptionalString("dbname", "")
		mysql      = conf.OptionalString("mysql", "")
		postgres   = conf.OptionalString("postgres", "")
		mongo      = conf.OptionalString("mongo", "")
		_          = conf.OptionalList("replicateTo")
		s3         = conf.OptionalString("s3", "")
		publish    = conf.OptionalObject("publish")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}

	obj := jsonconfig.Obj{}
	if tlsOn {
		if (tlsCert != "") != (tlsKey != "") {
			return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
		}
		if tlsCert != "" {
			obj["TLSCertFile"] = tlsCert
			obj["TLSKeyFile"] = tlsKey
		} else {
			obj["TLSCertFile"] = "config/selfgen_cert.pem"
			obj["TLSKeyFile"] = "config/selfgen_key.pem"
		}
	}

	if baseURL != "" {
		if strings.HasSuffix(baseURL, "/") {
			baseURL = baseURL[:len(baseURL)-1]
		}
		obj["baseURL"] = baseURL
	}
	if listen != "" {
		obj["listen"] = listen
	}
	obj["https"] = tlsOn
	obj["auth"] = auth

	if dbname == "" {
		username := os.Getenv("USER")
		if username == "" {
			return nil, fmt.Errorf("USER env var not set; needed to define dbname")
		}
		dbname = "camli" + username
	}

	var indexerPath string
	switch {
	case mongo != "" && mysql != "" || mongo != "" && postgres != "" || mysql != "" && postgres != "":
		return nil, fmt.Errorf("You can only pick one of the db engines (mongo, mysql, postgres).")
	case mysql != "":
		indexerPath = "/index-mysql/"
	case postgres != "":
		indexerPath = "/index-postgres/"
	case mongo != "":
		indexerPath = "/index-mongo/"
	default:
		indexerPath = "/index-mem/"
	}

	entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
	if err != nil {
		return nil, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return nil, err
	}

	prefixesParams := &configPrefixesParams{
		secretRing:  secretRing,
		keyId:       keyId,
		indexerPath: indexerPath,
		blobPath:    blobPath,
		searchOwner: blobref.SHA1FromString(armoredPublicKey),
	}

	prefixes := genLowLevelPrefixes(prefixesParams)
	cacheDir := filepath.Join(blobPath, "/cache")
	if err := os.MkdirAll(cacheDir, 0700); err != nil {
		return nil, fmt.Errorf("Could not create blobs dir %s: %v", cacheDir, err)
	}

	published := []interface{}{}
	if publish != nil {
		published, err = addPublishedConfig(prefixes, publish)
		if err != nil {
			return nil, fmt.Errorf("Could not generate config for published: %v", err)
		}
	}

	addUIConfig(prefixes, "/ui/", published)

	if mysql != "" {
		addMySQLConfig(prefixes, dbname, mysql)
	}
	if postgres != "" {
		addPostgresConfig(prefixes, dbname, postgres)
	}
	if mongo != "" {
		addMongoConfig(prefixes, dbname, mongo)
	}
	if s3 != "" {
		if err := addS3Config(prefixes, s3); err != nil {
			return nil, err
		}
	}
	if indexerPath == "/index-mem/" {
		addMemindexConfig(prefixes)
	}

	obj["prefixes"] = (map[string]interface{})(prefixes)

	lowLevelConf = &Config{
		Obj:        obj,
		configPath: conf.configPath,
	}
	return lowLevelConf, nil
}
Beispiel #13
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	m := schema.NewCommonFileMap(n.fullPath, n.fi)
	m["camliType"] = "file"
	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()

	size := n.fi.Size()

	var fileContents io.Reader = io.LimitReader(file, size)

	if up.fileOpts.wantVivify() {
		err := schema.WriteFileChunks(up.statReceiver(), m, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := m.JSON()
		if err != nil {
			return nil, err
		}
		bref := blobref.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  bref,
			Size:     int64(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		return up.Upload(h)
	}

	var (
		blobref *blobref.BlobRef // of file schemaref
		sum     string           // "sha1-xxxxx"
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			if ref, ok := up.fileMapFromDuplicate(up.statReceiver(), m, sum); ok {
				blobref = ref
			}
		}
	}

	if blobref == nil {
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		blobref, err = schema.WriteFileMap(up.statReceiver(), m, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// TODO(mpl): test that none of these claims get uploaded if they've already been done
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// Use a fixed time value for signing; not using modtime
		// so two identical files don't have different modtimes?
		// TODO(bradfitz): consider this more?
		permaNodeSigTime := time.Unix(0, 0)
		permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
		handleResult("node-permanode", permaNode, nil)

		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime := n.fi.ModTime()

		contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", blobref.String())
		contentAttr.SetClaimDate(claimTime)
		signed, err := up.SignMap(contentAttr, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err)
		}
		put, err := up.uploadString(signed)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err)
		}
		handleResult("node-permanode-contentattr", put, nil)
		if tags := up.fileOpts.tags(); len(tags) > 0 {
			// TODO(mpl): do these claims concurrently, not in series
			for _, tag := range tags {
				m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag)
				m.SetClaimDate(claimTime)
				// TODO(mpl): verify that SetClaimDate does modify the GPG signature date of the claim
				signed, err := up.SignMap(m, claimTime)
				if err != nil {
					return nil, fmt.Errorf("Failed to sign tag claim for node %v: %v", n, err)
				}
				put, err := up.uploadString(signed)
				if err != nil {
					return nil, fmt.Errorf("Error uploading permanode's tag attribute %v for node %v: %v", tag, n, err)
				}
				handleResult("node-permanode-tag", put, nil)
			}
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := m.JSON()
	pr := &client.PutResult{BlobRef: blobref, Size: int64(len(json)), Skipped: false}
	return pr, nil
}
Beispiel #14
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
	sf, ok := blobReceiver.(blobref.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blobref.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	// TODO(mpl): second check should not be necessary, and yet it happens. Figure it out.
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	unsigned := schema.NewHashPlannedPermanode(h)
	unsigned["camliSigner"] = publicKeyBlobRef
	signed, err := sigHelper.SignMap(unsigned)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", signed, err)
	}
	signedPerm := blobref.SHA1FromString(signed)
	_, err = blobReceiver.ReceiveBlob(signedPerm, strings.NewReader(signed))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v: %v", signed, err)
	}

	contentAttr := schema.NewSetAttributeClaim(signedPerm, "camliContent", fileblob.BlobRef.String())
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	contentAttr.SetClaimDate(claimDate)
	contentAttr["camliSigner"] = publicKeyBlobRef
	signed, err = sigHelper.SignMap(contentAttr)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	signedClaim := blobref.SHA1FromString(signed)
	_, err = blobReceiver.ReceiveBlob(signedClaim, strings.NewReader(signed))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v: %v", signed, err)
	}
	return nil
}
Beispiel #15
0
func GenLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
	var (
		baseUrl    = conf.RequiredString("listen")
		auth       = conf.RequiredString("auth")
		keyId      = conf.RequiredString("identity")
		secretRing = conf.RequiredString("identitySecretRing")
		blobPath   = conf.RequiredString("blobPath")
		tlsOn      = conf.OptionalBool("TLS", false)
		dbname     = conf.OptionalString("dbname", "")
		mysql      = conf.OptionalString("mysql", "")
		mongo      = conf.OptionalString("mongo", "")
		_          = conf.OptionalList("replicateTo")
		_          = conf.OptionalString("s3", "")
		publish    = conf.OptionalObject("publish")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}

	obj := jsonconfig.Obj{}
	scheme := "http"
	if tlsOn {
		scheme = "https"
		obj["TLSCertFile"] = "config/selfgen_cert.pem"
		obj["TLSKeyFile"] = "config/selfgen_key.pem"
	}
	obj["baseURL"] = scheme + "://" + baseUrl
	obj["https"] = tlsOn
	obj["auth"] = auth

	if dbname == "" {
		username := os.Getenv("USER")
		if username == "" {
			return nil, fmt.Errorf("USER env var not set; needed to define dbname")
		}
		dbname = "camli" + username
	}

	var indexerPath string
	switch {
	case mongo != "" && mysql != "":
		return nil, fmt.Errorf("Cannot have both mysql and mongo in config, pick one")
	case mysql != "":
		indexerPath = "/index-mysql/"
	case mongo != "":
		indexerPath = "/index-mongo/"
	default:
		indexerPath = "/index-mem/"
	}

	entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
	if err != nil {
		return nil, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return nil, err
	}

	prefixesParams := &configPrefixesParams{
		secretRing:  secretRing,
		keyId:       keyId,
		indexerPath: indexerPath,
		blobPath:    blobPath,
		searchOwner: blobref.SHA1FromString(armoredPublicKey),
	}

	prefixes := genLowLevelPrefixes(prefixesParams)
	cacheDir := filepath.Join(blobPath, "/cache")
	if err := os.MkdirAll(cacheDir, 0700); err != nil {
		return nil, fmt.Errorf("Could not create blobs dir %s: %v", cacheDir, err)
	}

	published := []interface{}{}
	if publish != nil {
		published, err = addPublishedConfig(&prefixes, publish)
		if err != nil {
			return nil, fmt.Errorf("Could not generate config for published: %v", err)
		}
	}

	addUIConfig(&prefixes, "/ui/", published)

	if mysql != "" {
		addMysqlConfig(&prefixes, dbname, mysql)
	}
	if mongo != "" {
		addMongoConfig(&prefixes, dbname, mongo)
	}
	if indexerPath == "/index-mem/" {
		addMemindexConfig(&prefixes)
	}

	obj["prefixes"] = (map[string]interface{})(prefixes)

	lowLevelConf = &Config{
		jsonconfig.Obj: obj,
		configPath:     conf.configPath,
	}
	return lowLevelConf, nil
}
Beispiel #16
0
func WriteFileMapRolling(bs blobserver.StatReceiver, fileMap map[string]interface{}, r io.Reader) (outbr *blobref.BlobRef, outerr error) {
	blobSize := 0
	bufr := bufio.NewReader(r)
	spans := []span{} // the tree of spans, cut on interesting rollsum boundaries
	rs := rollsum.New()
	n := int64(0)
	last := n
	buf := new(bytes.Buffer)

	uploadString := func(s string) (*blobref.BlobRef, error) {
		br := blobref.SHA1FromString(s)
		hasIt, err := serverHasBlob(bs, br)
		if err != nil {
			return nil, err
		}
		if hasIt {
			return br, nil
		}
		_, err = bs.ReceiveBlob(br, strings.NewReader(s))
		if err != nil {
			return nil, err
		}
		return br, nil
	}

	// TODO: keep multiple of these in-flight at a time.
	uploadLastSpan := func() bool {
		defer buf.Reset()
		br, err := uploadString(buf.String())
		if err != nil {
			outerr = err
			return false
		}
		spans[len(spans)-1].br = br
		return true
	}

	for {
		c, err := bufr.ReadByte()
		if err == io.EOF {
			if n != last {
				spans = append(spans, span{from: last, to: n})
				if !uploadLastSpan() {
					return
				}
			}
			break
		}
		if err != nil {
			return nil, err
		}
		buf.WriteByte(c)

		n++
		blobSize++
		rs.Roll(c)
		if !rs.OnSplit() {
			if blobSize < MaxBlobSize {
				continue
			}
		}
		blobSize = 0
		bits := rs.Bits()

		// Take any spans from the end of the spans slice that
		// have a smaller 'bits' score and make them children
		// of this node.
		var children []span
		childrenFrom := len(spans)
		for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
			childrenFrom--
		}
		if nCopy := len(spans) - childrenFrom; nCopy > 0 {
			children = make([]span, nCopy)
			copy(children, spans[childrenFrom:])
			spans = spans[:childrenFrom]
		}

		spans = append(spans, span{from: last, to: n, bits: bits, children: children})
		last = n
		if !uploadLastSpan() {
			return
		}
	}

	var addBytesParts func(dst *[]BytesPart, s []span) error

	uploadFile := func(isFragment bool, fileSize int64, s []span) (*blobref.BlobRef, error) {
		parts := []BytesPart{}
		err := addBytesParts(&parts, s)
		if err != nil {
			return nil, err
		}
		m := fileMap
		if isFragment {
			m = NewBytes()
		}
		err = PopulateParts(m, fileSize, parts)
		if err != nil {
			return nil, err
		}
		json, err := MapToCamliJSON(m)
		if err != nil {
			return nil, err
		}
		return uploadString(json)
	}

	addBytesParts = func(dst *[]BytesPart, spansl []span) error {
		for _, sp := range spansl {
			if len(sp.children) > 0 {
				childrenSize := int64(0)
				for _, cs := range sp.children {
					childrenSize += cs.size()
				}
				br, err := uploadFile(true, childrenSize, sp.children)
				if err != nil {
					return err
				}
				*dst = append(*dst, BytesPart{
					BytesRef: br,
					Size:     uint64(childrenSize),
				})
			}
			if sp.from != sp.to {
				*dst = append(*dst, BytesPart{
					BlobRef: sp.br,
					Size:    uint64(sp.to - sp.from),
				})
			}
		}
		return nil
	}

	// The top-level content parts
	return uploadFile(false, n, spans)
}
Beispiel #17
0
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
	var (
		baseURL    = conf.OptionalString("baseURL", "")
		listen     = conf.OptionalString("listen", "")
		auth       = conf.RequiredString("auth")
		keyId      = conf.RequiredString("identity")
		secretRing = conf.RequiredString("identitySecretRing")
		tlsOn      = conf.OptionalBool("https", false)
		tlsCert    = conf.OptionalString("HTTPSCertFile", "")
		tlsKey     = conf.OptionalString("HTTPSKeyFile", "")

		// Blob storage options
		blobPath     = conf.OptionalString("blobPath", "")
		s3           = conf.OptionalString("s3", "")           // "access_key_id:secret_access_key:bucket"
		shareHandler = conf.OptionalBool("shareHandler", true) // enable the share handler

		// Index options
		runIndex   = conf.OptionalBool("runIndex", true) // if false: no search, no UI, etc.
		dbname     = conf.OptionalString("dbname", "")   // for mysql, postgres, mongo
		mysql      = conf.OptionalString("mysql", "")
		postgres   = conf.OptionalString("postgres", "")
		memIndex   = conf.OptionalBool("memIndex", false)
		mongo      = conf.OptionalString("mongo", "")
		sqliteFile = conf.OptionalString("sqlite", "")

		_       = conf.OptionalList("replicateTo")
		publish = conf.OptionalObject("publish")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}

	obj := jsonconfig.Obj{}
	if tlsOn {
		if (tlsCert != "") != (tlsKey != "") {
			return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
		}
		if tlsCert != "" {
			obj["TLSCertFile"] = tlsCert
			obj["TLSKeyFile"] = tlsKey
		} else {
			obj["TLSCertFile"] = DefaultTLSCert
			obj["TLSKeyFile"] = DefaultTLSKey
		}
	}

	if baseURL != "" {
		if strings.HasSuffix(baseURL, "/") {
			baseURL = baseURL[:len(baseURL)-1]
		}
		obj["baseURL"] = baseURL
	}
	if listen != "" {
		obj["listen"] = listen
	}
	obj["https"] = tlsOn
	obj["auth"] = auth

	if dbname == "" {
		username := os.Getenv("USER")
		if username == "" {
			return nil, fmt.Errorf("USER env var not set; needed to define dbname")
		}
		dbname = "camli" + username
	}

	var indexerPath string
	numIndexers := numSet(mongo, mysql, postgres, sqliteFile, memIndex)
	switch {
	case runIndex && numIndexers == 0:
		return nil, fmt.Errorf("Unless wantIndex is set to false, you must specify an index option (mongo, mysql, postgres, sqlite, memIndex).")
	case runIndex && numIndexers != 1:
		return nil, fmt.Errorf("With wantIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite, memIndex).")
	case !runIndex && numIndexers != 0:
		return nil, fmt.Errorf("With wantIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite, memIndex.")
	case mysql != "":
		indexerPath = "/index-mysql/"
	case postgres != "":
		indexerPath = "/index-postgres/"
	case mongo != "":
		indexerPath = "/index-mongo/"
	case sqliteFile != "":
		indexerPath = "/index-sqlite/"
	case memIndex:
		indexerPath = "/index-mem/"
	}

	entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
	if err != nil {
		return nil, err
	}
	armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
	if err != nil {
		return nil, err
	}

	nolocaldisk := blobPath == ""
	if nolocaldisk && s3 == "" {
		return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 configured for a blobserver.")
	}

	prefixesParams := &configPrefixesParams{
		secretRing:   secretRing,
		keyId:        keyId,
		indexerPath:  indexerPath,
		blobPath:     blobPath,
		searchOwner:  blobref.SHA1FromString(armoredPublicKey),
		shareHandler: shareHandler,
	}

	prefixes := genLowLevelPrefixes(prefixesParams)
	var cacheDir string
	if nolocaldisk {
		// Whether camlistored is run from EC2 or not, we use
		// a temp dir as the cache when primary storage is S3.
		// TODO(mpl): s3CacheBucket
		// See http://code.google.com/p/camlistore/issues/detail?id=85
		cacheDir = filepath.Join(tempDir(), "camli-cache")
	} else {
		cacheDir = filepath.Join(blobPath, "/cache")
	}
	if err := os.MkdirAll(cacheDir, 0700); err != nil {
		return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err)
	}

	published := []interface{}{}
	if len(publish) > 0 {
		if !runIndex {
			return nil, fmt.Errorf("publishing requires an index")
		}
		published, err = addPublishedConfig(prefixes, publish)
		if err != nil {
			return nil, fmt.Errorf("Could not generate config for published: %v", err)
		}
	}

	if runIndex {
		addUIConfig(prefixes, "/ui/", published)
	}

	if mysql != "" {
		addMySQLConfig(prefixes, dbname, mysql)
	}
	if postgres != "" {
		addPostgresConfig(prefixes, dbname, postgres)
	}
	if mongo != "" {
		addMongoConfig(prefixes, dbname, mongo)
	}
	if sqliteFile != "" {
		addSQLiteConfig(prefixes, sqliteFile)
	}
	if s3 != "" {
		if err := addS3Config(prefixes, s3); err != nil {
			return nil, err
		}
	}
	if indexerPath == "/index-mem/" {
		addMemindexConfig(prefixes)
	}

	obj["prefixes"] = (map[string]interface{})(prefixes)

	lowLevelConf = &Config{
		Obj:        obj,
		configPath: conf.configPath,
	}
	return lowLevelConf, nil
}
Beispiel #18
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
	sf, ok := blobReceiver.(blobref.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blobref.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	// The file schema must have a modtime to vivify, as the modtime is used for all three of:
	// 1) the permanode's signature
	// 2) the camliContent attribute claim's "claimDate"
	// 3) the signature time of 2)
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	if err != nil {
		return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
	}

	permanodeBB := schema.NewHashPlannedPermanode(h)
	permanodeBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	permanodeBB.SetClaimDate(claimDate)
	permanodeSigned, err := sigHelper.Sign(permanodeBB)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
	}
	permanodeRef := blobref.SHA1FromString(permanodeSigned)
	_, err = blobReceiver.ReceiveBlob(permanodeRef, strings.NewReader(permanodeSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
	}

	contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.BlobRef.String())
	contentClaimBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	contentClaimBB.SetClaimDate(claimDate)
	contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	contentClaimRef := blobref.SHA1FromString(contentClaimSigned)
	_, err = blobReceiver.ReceiveBlob(contentClaimRef, strings.NewReader(contentClaimSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
	}
	return nil
}