Exemplo n.º 1
0
func (id *IndexDeps) uploadAndSign(m *schema.Builder) blob.Ref {
	m.SetSigner(id.SignerBlobRef)
	unsigned, err := m.JSON()
	if err != nil {
		id.Fatalf("uploadAndSignMap: " + err.Error())
	}
	sr := &jsonsign.SignRequest{
		UnsignedJSON:  unsigned,
		Fetcher:       id.PublicKeyFetcher,
		EntityFetcher: id.EntityFetcher,
		SignatureTime: id.now,
	}
	signed, err := sr.Sign()
	if err != nil {
		id.Fatalf("problem signing: " + err.Error())
	}
	tb := &test.Blob{Contents: signed}
	_, err = id.BlobSource.ReceiveBlob(tb.BlobRef(), tb.Reader())
	if err != nil {
		id.Fatalf("public uploading signed blob to blob source, pre-indexing: %v, %v", tb.BlobRef(), err)
	}
	_, err = id.Index.ReceiveBlob(tb.BlobRef(), tb.Reader())
	if err != nil {
		id.Fatalf("problem indexing blob: %v\nblob was:\n%s", err, signed)
	}
	return tb.BlobRef()
}
Exemplo n.º 2
0
// fileMapFromDuplicate queries the server's search interface for an
// existing file blob for the file contents of wholeRef.
// If the server has it, it's validated, and then fileMap (which must
// already be partially populated) has its "parts" field populated,
// and then fileMap is uploaded (if necessary).
// If no file blob is found, a zero blob.Ref (and no error) is returned.
func (cl *Client) fileMapFromDuplicate(fileMap *schema.Builder, wholeRef blob.Ref) (blob.Ref, error) {
	dupFileRef, err := cl.SearchExistingFileSchema(wholeRef)
	if err != nil {
		return blob.Ref{}, err
	}
	if !dupFileRef.Valid() {
		// because SearchExistingFileSchema returns blob.Ref{}, nil when file is not found.
		return blob.Ref{}, nil
	}
	dupMap, err := cl.FetchSchemaBlob(dupFileRef)
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not find existing file blob for wholeRef %q: %v", wholeRef, err)
	}
	fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts())
	json, err := fileMap.JSON()
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not write file map for wholeRef %q: %v", wholeRef, err)
	}
	bref := blob.SHA1FromString(json)
	if bref == dupFileRef {
		// Unchanged (same filename, modtime, JSON serialization, etc)
		return dupFileRef, nil
	}
	sbr, err := cl.ReceiveBlob(bref, strings.NewReader(json))
	if err != nil {
		return blob.Ref{}, err
	}
	return sbr.Ref, nil
}
Exemplo n.º 3
0
func (h *Host) upload(bb *schema.Builder) (br blob.Ref, err error) {
	signed, err := bb.Sign(h.signer)
	if err != nil {
		return
	}
	sb, err := blobserver.ReceiveString(h.target, signed)
	if err != nil {
		return
	}
	return sb.Ref, nil
}
Exemplo n.º 4
0
// fileMapFromDuplicate queries the server's search interface for an
// existing file with an entire contents of sum (a blobref string).
// If the server has it, it's validated, and then fileMap (which must
// already be partially populated) has its "parts" field populated,
// and then fileMap is uploaded (if necessary) and a PutResult with
// its blobref is returned. If there's any problem, or a dup doesn't
// exist, ok is false.
// If required, Vivify is also done here.
func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap *schema.Builder, sum string) (pr *client.PutResult, ok bool) {
	if noDupSearch {
		return
	}
	_, err := up.Client.SearchRoot()
	if err != nil {
		return
	}
	dupFileRef, err := up.Client.SearchExistingFileSchema(blob.MustParse(sum))
	if err != nil {
		log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err)
		return nil, false
	}
	if !dupFileRef.Valid() {
		return nil, false
	}
	if *cmdmain.FlagVerbose {
		log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef)
	}
	dupMap, err := up.Client.FetchSchemaBlob(dupFileRef)
	if err != nil {
		log.Printf("Warning: error fetching %v: %v", dupFileRef, err)
		return nil, false
	}

	fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts())

	json, err := fileMap.JSON()
	if err != nil {
		return nil, false
	}
	uh := client.NewUploadHandleFromString(json)
	if up.fileOpts.wantVivify() {
		uh.Vivify = true
	}
	if !uh.Vivify && uh.BlobRef == dupFileRef {
		// Unchanged (same filename, modtime, JSON serialization, etc)
		return &client.PutResult{BlobRef: dupFileRef, Size: uint32(len(json)), Skipped: true}, true
	}
	pr, err = up.Upload(uh)
	if err != nil {
		log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err)
		return nil, false
	}
	return pr, true
}
Exemplo n.º 5
0
func (h *Handler) Sign(bb *schema.Builder) (string, error) {
	bb.SetSigner(h.pubKeyBlobRef)
	unsigned, err := bb.JSON()
	if err != nil {
		return "", err
	}
	sreq := &jsonsign.SignRequest{
		UnsignedJSON:      unsigned,
		Fetcher:           h.pubKeyFetcher,
		ServerMode:        true,
		SecretKeyringPath: h.secretRing,
	}
	claimTime, err := bb.Blob().ClaimDate()
	if err != nil {
		if !schema.IsMissingField(err) {
			return "", err
		}
	} else {
		sreq.SignatureTime = claimTime
	}
	if err := h.uploadPublicKey(); err != nil {
		log.Printf("signing handler failed to upload public key: %v", err)
	}
	return sreq.Sign()
}
Exemplo n.º 6
0
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) {
	var filebb *schema.Builder
	if up.fileOpts.contentsOnly {
		filebb = schema.NewFileMap("")
	} else {
		filebb = schema.NewCommonFileMap(n.fullPath, n.fi)
	}
	filebb.SetType("file")

	up.fdGate.Start()
	defer up.fdGate.Done()

	file, err := up.open(n.fullPath)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	if !up.fileOpts.contentsOnly {
		if up.fileOpts.exifTime {
			ra, ok := file.(io.ReaderAt)
			if !ok {
				return nil, errors.New("Error asserting local file to io.ReaderAt")
			}
			modtime, err := schema.FileTime(ra)
			if err != nil {
				log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err)
			} else {
				filebb.SetModTime(modtime)
			}
		}
		if up.fileOpts.wantCapCtime() {
			filebb.CapCreationTime()
		}
	}

	var (
		size                           = n.fi.Size()
		fileContents io.Reader         = io.LimitReader(file, size)
		br           blob.Ref          // of file schemaref
		sum          string            // sha1 hashsum of the file to upload
		pr           *client.PutResult // of the final "file" schema blob
	)

	const dupCheckThreshold = 256 << 10
	if size > dupCheckThreshold {
		sumRef, err := up.wholeFileDigest(n.fullPath)
		if err == nil {
			sum = sumRef.String()
			ok := false
			pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum)
			if ok {
				br = pr.BlobRef
				android.NoteFileUploaded(n.fullPath, !pr.Skipped)
				if up.fileOpts.wantVivify() {
					// we can return early in that case, because the other options
					// are disallowed in the vivify case.
					return pr, nil
				}
			}
		}
	}

	if up.fileOpts.wantVivify() {
		// If vivify wasn't already done in fileMapFromDuplicate.
		err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
		json, err := filebb.JSON()
		if err != nil {
			return nil, err
		}
		br = blob.SHA1FromString(json)
		h := &client.UploadHandle{
			BlobRef:  br,
			Size:     uint32(len(json)),
			Contents: strings.NewReader(json),
			Vivify:   true,
		}
		pr, err = up.Upload(h)
		if err != nil {
			return nil, err
		}
		android.NoteFileUploaded(n.fullPath, true)
		return pr, nil
	}

	if !br.Valid() {
		// br still zero means fileMapFromDuplicate did not find the file on the server,
		// and the file has not just been uploaded subsequently to a vivify request.
		// So we do the full file + file schema upload here.
		if sum == "" && up.fileOpts.wantFilePermanode() {
			fileContents = &trackDigestReader{r: fileContents}
		}
		br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents)
		if err != nil {
			return nil, err
		}
	}

	// The work for those planned permanodes (and the claims) is redone
	// everytime we get here (i.e past the stat cache). However, they're
	// caught by the have cache, so they won't be reuploaded for nothing
	// at least.
	if up.fileOpts.wantFilePermanode() {
		if td, ok := fileContents.(*trackDigestReader); ok {
			sum = td.Sum()
		}
		// claimTime is both the time of the "claimDate" in the
		// JSON claim, as well as the date in the OpenPGP
		// header.
		// TODO(bradfitz): this is a little clumsy to do by hand.
		// There should probably be a method on *Uploader to do this
		// from an unsigned schema map. Maybe ditch the schema.Claimer
		// type and just have the Uploader override the claimDate.
		claimTime, ok := filebb.ModTime()
		if !ok {
			return nil, fmt.Errorf("couldn't get modtime for file %v", n.fullPath)
		}
		err = up.uploadFilePermanode(sum, br, claimTime)
		if err != nil {
			return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err)
		}
	}

	// TODO(bradfitz): faking a PutResult here to return
	// is kinda gross.  should instead make a
	// blobserver.Storage wrapper type (wrapping
	// statReceiver) that can track some of this?  or make
	// schemaWriteFileMap return it?
	json, _ := filebb.JSON()
	pr = &client.PutResult{BlobRef: br, Size: uint32(len(json)), Skipped: false}
	return pr, nil
}