Ejemplo n.º 1
0
func (h *Handler) uploadPublicKey() error {
	h.pubKeyUploadMu.RLock()
	if h.pubKeyUploaded {
		h.pubKeyUploadMu.RUnlock()
		return nil
	}
	h.pubKeyUploadMu.RUnlock()

	sto := h.pubKeyDest

	h.pubKeyUploadMu.Lock()
	defer h.pubKeyUploadMu.Unlock()
	if h.pubKeyUploaded {
		return nil
	}
	_, err := blobserver.StatBlob(sto, h.pubKeyBlobRef)
	if err == nil {
		h.pubKeyUploaded = true
		return nil
	}
	_, err = blobserver.Receive(sto, h.pubKeyBlobRef, strings.NewReader(h.pubKey))
	log.Printf("uploadPublicKey(%T, %v) = %v", sto, h.pubKeyBlobRef, err)
	if err == nil {
		h.pubKeyUploaded = true
	}
	return err
}
Ejemplo n.º 2
0
func (ix *Index) reindex(br blob.Ref) {
	// TODO: cap how many of these can be going at once, probably more than 1,
	// and be more efficient than just blocking goroutines. For now, this:
	reindexMu.Lock()
	defer reindexMu.Unlock()

	bs := ix.BlobSource
	if bs == nil {
		log.Printf("index: can't re-index %v: no BlobSource", br)
		return
	}
	log.Printf("index: starting re-index of %v", br)
	rc, _, err := bs.FetchStreaming(br)
	if err != nil {
		log.Printf("index: failed to fetch %v for reindexing: %v", br, err)
		return
	}
	defer rc.Close()
	sb, err := blobserver.Receive(ix, br, rc)
	if err != nil {
		log.Printf("index: reindex of %v failed: %v", br, err)
		return
	}
	log.Printf("index: successfully reindexed %v", sb)
}
Ejemplo n.º 3
0
func (sto *sto) Fetch(b blob.Ref) (rc io.ReadCloser, size uint32, err error) {
	rc, size, err = sto.cache.Fetch(b)
	if err == nil {
		sto.touchBlob(blob.SizedRef{Ref: b, Size: size})
		return
	}
	if err != os.ErrNotExist {
		log.Printf("warning: proxycache cache fetch error for %v: %v", b, err)
	}
	rc, size, err = sto.cache.Fetch(b)
	if err != nil {
		return
	}
	all, err := ioutil.ReadAll(rc)
	if err != nil {
		return
	}
	go func() {
		if _, err := blobserver.Receive(sto.cache, b, bytes.NewReader(all)); err != nil {
			log.Printf("populating proxycache cache for %v: %v", b, err)
			return
		}
		sto.touchBlob(blob.SizedRef{Ref: b, Size: size})
	}()
	return ioutil.NopCloser(bytes.NewReader(all)), size, nil
}
Ejemplo n.º 4
0
func (sto *condStorage) ReceiveBlob(br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) {
	destSto, src, err := sto.storageForReceive(br, src)
	if err != nil {
		return
	}
	return blobserver.Receive(destSto, br, src)
}
Ejemplo n.º 5
0
func TestReceive(t *testing.T) {
	sto := new(test.Fetcher)
	data := []byte("some blob")
	br := blob.SHA1FromBytes(data)

	hub := blobserver.GetHub(sto)
	ch := make(chan blob.Ref, 1)
	hub.RegisterListener(ch)

	sb, err := blobserver.Receive(sto, br, bytes.NewReader(data))
	if err != nil {
		t.Fatal(err)
	}
	if sb.Size != int64(len(data)) {
		t.Errorf("received blob size = %d; want %d", sb.Size, len(data))
	}
	if sb.Ref != br {
		t.Errorf("received blob = %v; want %v", sb.Ref, br)
	}
	select {
	case got := <-ch:
		if got != br {
			t.Errorf("blobhub notified about %v; want %v", got, br)
		}
	case <-time.After(5 * time.Second):
		t.Error("timeout waiting on blobhub")
	}
}
Ejemplo n.º 6
0
func (h *Handler) uploadPublicKey(sto blobserver.Storage, key string) error {
	_, err := blobserver.StatBlob(sto, h.pubKeyBlobRef)
	if err == nil {
		return nil
	}
	_, err = blobserver.Receive(sto, h.pubKeyBlobRef, strings.NewReader(key))
	return err
}
Ejemplo n.º 7
0
func TestDoubleReceive(t *testing.T) {
	sto, cleanup := newTempDiskpacked(t)
	defer cleanup()

	size := func(n int) int64 {
		path := sto.(*storage).filename(n)
		fi, err := os.Stat(path)
		if err != nil {
			t.Fatal(err)
		}
		return fi.Size()
	}

	const blobSize = 5 << 10
	b := &test.Blob{Contents: strings.Repeat("a", blobSize)}
	br := b.BlobRef()

	_, err := blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		t.Fatal(err)
	}
	if size(0) < blobSize {
		t.Fatalf("size = %d; want at least %d", size(0), blobSize)
	}
	sto.(*storage).nextPack()

	_, err = blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		t.Fatal(err)
	}
	sizePostDup := size(1)
	if sizePostDup >= blobSize {
		t.Fatalf("size(pack1) = %d; appeared to double-write.", sizePostDup)
	}

	os.Remove(sto.(*storage).filename(0))
	_, err = blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		t.Fatal(err)
	}
	sizePostDelete := size(1)
	if sizePostDelete < blobSize {
		t.Fatalf("after packfile delete + reupload, not big enough. want size of a blob")
	}
}
Ejemplo n.º 8
0
func (sto *condStorage) ReceiveBlob(br blob.Ref, source io.Reader) (sb blob.SizedRef, err error) {
	destSto, overRead, err := sto.storageForReceive(source)
	if err != nil {
		return
	}
	if len(overRead) > 0 {
		source = io.MultiReader(bytes.NewReader(overRead), source)
	}
	return blobserver.Receive(destSto, br, source)
}
Ejemplo n.º 9
0
func (sto *replicaStorage) ReceiveBlob(b blob.Ref, source io.Reader) (_ blob.SizedRef, err error) {
	nReplicas := len(sto.replicas)
	rpipe, wpipe, writer := make([]*io.PipeReader, nReplicas), make([]*io.PipeWriter, nReplicas), make([]io.Writer, nReplicas)
	for idx := range sto.replicas {
		rpipe[idx], wpipe[idx] = io.Pipe()
		writer[idx] = wpipe[idx]
		// TODO: deal with slow/hung clients. this scheme of pipes +
		// multiwriter (even with a bufio.Writer thrown in) isn't
		// sufficient to guarantee forward progress. perhaps something
		// like &MoveOrDieWriter{Writer: wpipe[idx], HeartbeatSec: 10}
	}
	upResult := make(chan sizedBlobAndError, nReplicas)
	uploadToReplica := func(source io.Reader, dst blobserver.BlobReceiver) {
		sb, err := blobserver.Receive(dst, b, source)
		if err != nil {
			io.Copy(ioutil.Discard, source)
		}
		upResult <- sizedBlobAndError{sb, err}
	}
	for idx, replica := range sto.replicas {
		go uploadToReplica(rpipe[idx], replica)
	}
	size, err := io.Copy(io.MultiWriter(writer...), source)
	if err != nil {
		for i := range wpipe {
			wpipe[i].CloseWithError(err)
		}
		return
	}
	for idx := range sto.replicas {
		wpipe[idx].Close()
	}
	nSuccess, nFailures := 0, 0
	for _ = range sto.replicas {
		res := <-upResult
		switch {
		case res.err == nil && res.sb.Size == size:
			nSuccess++
			if nSuccess == sto.minWritesForSuccess {
				return res.sb, nil
			}
		case res.err == nil:
			nFailures++
			err = fmt.Errorf("replica: upload shard reported size %d, expected %d", res.sb.Size, size)
		default:
			nFailures++
			err = res.err
		}
	}
	if nFailures > 0 {
		log.Printf("replica: receiving blob, %d successes, %d failures; last error = %v",
			nSuccess, nFailures, err)
	}
	return
}
Ejemplo n.º 10
0
func (cf *CachingFetcher) faultIn(br blob.Ref) error {
	_, err := cf.g.Do(br.String(), func() (interface{}, error) {
		sblob, _, err := cf.sf.Fetch(br)
		if err != nil {
			return nil, err
		}
		defer sblob.Close()
		_, err = blobserver.Receive(cf.c, br, sblob)
		return nil, err
	})
	return err
}
Ejemplo n.º 11
0
func (ph *PublishHandler) signUpload(jsonSign *signhandler.Handler, name string, bb *schema.Builder) (blob.Ref, error) {
	signed, err := jsonSign.Sign(bb)
	if err != nil {
		return blob.Ref{}, fmt.Errorf("error signing %s: %v", name, err)
	}
	uh := client.NewUploadHandleFromString(signed)
	_, err = blobserver.Receive(ph.Storage, uh.BlobRef, uh.Contents)
	if err != nil {
		return blob.Ref{}, fmt.Errorf("error uploading %s: %v", name, err)
	}
	return uh.BlobRef, nil
}
Ejemplo n.º 12
0
func TestReceiveCorrupt(t *testing.T) {
	sto := new(test.Fetcher)
	data := []byte("some blob")
	br := blob.SHA1FromBytes(data)
	data[0] = 'X' // corrupt it
	_, err := blobserver.Receive(sto, br, bytes.NewReader(data))
	if err != blobserver.ErrCorruptBlob {
		t.Errorf("Receive = %v; want ErrCorruptBlob", err)
	}
	if len(sto.BlobrefStrings()) > 0 {
		t.Errorf("nothing should be stored. Got %q", sto.BlobrefStrings())
	}
}
Ejemplo n.º 13
0
func TestDoubleReceiveFailingIndex(t *testing.T) {
	sto, cleanup := newTempDiskpacked(t)
	defer cleanup()

	sto.(*storage).index = &failingIndex{KeyValue: sto.(*storage).index}

	size := func(n int) int64 {
		path := sto.(*storage).filename(n)
		fi, err := os.Stat(path)
		if err != nil {
			t.Fatal(err)
		}
		return fi.Size()
	}

	const blobSize = 5 << 10
	b := &test.Blob{Contents: strings.Repeat("a", blobSize)}
	br := b.BlobRef()

	_, err := blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		if err != dummyErr {
			t.Fatal(err)
		}
		t.Logf("dummy fail")
	}
	if size(0) >= blobSize {
		t.Fatalf("size = %d; want zero (at most %d)", size(0), blobSize-1)
	}

	_, err = blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		t.Fatal(err)
	}
	if size(0) < blobSize {
		t.Fatalf("size = %d; want at least %d", size(0), blobSize)
	}
}
Ejemplo n.º 14
0
func mustReceive(t *testing.T, dst blobserver.Storage, tb *test.Blob) blob.SizedRef {
	tbRef := tb.BlobRef()
	sb, err := blobserver.Receive(dst, tbRef, tb.Reader())
	if err != nil {
		t.Fatalf("Receive: %v", err)
	}
	if int(sb.Size) != len(tb.Contents) {
		t.Fatalf("size = %d; want %d", sb.Size, len(tb.Contents))
	}
	if sb.Ref != tbRef {
		t.Fatal("wrong blob received")
	}
	return sb
}
Ejemplo n.º 15
0
func (ix *Index) reindex(br blob.Ref) error {
	bs := ix.BlobSource
	if bs == nil {
		return fmt.Errorf("index: can't re-index %v: no BlobSource", br)
	}
	rc, _, err := bs.Fetch(br)
	if err != nil {
		return fmt.Errorf("index: failed to fetch %v for reindexing: %v", br, err)
	}
	defer rc.Close()
	if _, err := blobserver.Receive(ix, br, rc); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 16
0
func (h *DeployHandler) storeInstanceConf(conf *InstanceConf) (blob.Ref, error) {
	contents, err := json.Marshal(conf)
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not json encode instance config: %v", err)
	}
	hash := blob.NewHash()
	_, err = io.Copy(hash, bytes.NewReader(contents))
	if err != nil {
		return blob.Ref{}, fmt.Errorf("could not hash blob contents: %v", err)
	}
	br := blob.RefFromHash(hash)
	if _, err := blobserver.Receive(h.instConf, br, bytes.NewReader(contents)); err != nil {
		return blob.Ref{}, fmt.Errorf("could not store instance config blob: %v", err)
	}
	return br, nil
}
Ejemplo n.º 17
0
func uploadTestBlobs(t *testing.T, s blobserver.Storage, blobs []blobDetails) {
	for _, b := range blobs {
		ref, ok := blob.Parse(b.digest)
		if !ok {
			t.Fatalf("Invalid blob ref: %s", b.digest)
		}
		data, err := hex.DecodeString(b.data)
		if err != nil {
			t.Fatalf("hex.DecodeString(): %v", err)
		}

		_, err = blobserver.Receive(s, ref, bytes.NewBuffer(data))
		if err != nil {
			t.Fatalf("blobserver.Receive(): %v", err)
		}
	}
}
Ejemplo n.º 18
0
func (ix *Index) indexBlob(br blob.Ref) error {
	ix.mu.RLock()
	bs := ix.blobSource
	ix.mu.RUnlock()
	if bs == nil {
		panic(fmt.Sprintf("index: can't re-index %v: no blobSource", br))
	}
	rc, _, err := bs.Fetch(br)
	if err != nil {
		return fmt.Errorf("index: failed to fetch %v for reindexing: %v", br, err)
	}
	defer rc.Close()
	if _, err := blobserver.Receive(ix, br, rc); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 19
0
func writeToCache(cache blobserver.Storage, thumbBytes []byte, name string) (br blob.Ref, err error) {
	tr := bytes.NewReader(thumbBytes)
	if len(thumbBytes) < constants.MaxBlobSize {
		br = blob.SHA1FromBytes(thumbBytes)
		_, err = blobserver.Receive(cache, br, tr)
	} else {
		// TODO: don't use rolling checksums when writing this. Tell
		// the filewriter to use 16 MB chunks instead.
		br, err = schema.WriteFileFromReader(cache, name, tr)
	}
	if err != nil {
		return br, errors.New("failed to cache " + name + ": " + err.Error())
	}
	if imageDebug {
		log.Printf("Image Cache: saved as %v\n", br)
	}
	return br, nil
}
Ejemplo n.º 20
0
func (c *Client) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) {
	if c.sto != nil {
		return blobserver.Receive(c.sto, br, source)
	}
	size, ok := readerutil.ReaderSize(source)
	if !ok {
		size = 0
	}
	h := &UploadHandle{
		BlobRef:  br,
		Size:     uint32(size), // 0 if we don't know
		Contents: source,
		SkipStat: true,
	}
	pr, err := c.Upload(h)
	if err != nil {
		return blob.SizedRef{}, err
	}
	return pr.SizedBlobRef(), nil
}
Ejemplo n.º 21
0
func TestClose(t *testing.T) {
	fds := func() (n int) {
		openFdsVar.Do(func(kv expvar.KeyValue) {
			if i, ok := kv.Value.(*expvar.Int); ok {
				inc, _ := strconv.Atoi(i.String())
				n += inc
			}
		})
		return
	}

	fd0 := fds()
	sto, cleanup := newTempDiskpackedMemory(t)
	defer cleanup()
	fd1 := fds()

	s := sto.(*storage)

	const blobSize = 5 << 10
	b := &test.Blob{Contents: strings.Repeat("a", blobSize)}
	br := b.BlobRef()

	fd2 := fds()
	_, err := blobserver.Receive(sto, br, b.Reader())
	if err != nil {
		t.Fatal(err)
	}
	fd3 := fds()

	if err := s.Close(); err != nil {
		t.Fatalf("Close: %v", err)
	}
	fd4 := fds()
	got := fmt.Sprintf("%v %v %v %v %v", fd0, fd1, fd2, fd3, fd4)
	want := "0 2 2 2 0"
	if got != want {
		t.Errorf("fd count over time = %q; want %q", got, want)
	}

}
Ejemplo n.º 22
0
func (s *storage) ReceiveBlob(plainBR blob.Ref, source io.Reader) (sb blob.SizedRef, err error) {
	iv := s.randIV()
	stream := cipher.NewCTR(s.block, iv)

	hash := plainBR.Hash()
	var buf bytes.Buffer
	// TODO: compress before encrypting?
	buf.Write(iv) // TODO: write more structured header w/ version & IV length? or does that weaken it?
	sw := cipher.StreamWriter{S: stream, W: &buf}
	plainSize, err := io.Copy(io.MultiWriter(sw, hash), source)
	if err != nil {
		return sb, err
	}
	if !plainBR.HashMatches(hash) {
		return sb, blobserver.ErrCorruptBlob
	}

	encBR := blob.SHA1FromBytes(buf.Bytes())
	_, err = blobserver.Receive(s.blobs, encBR, bytes.NewReader(buf.Bytes()))
	if err != nil {
		log.Printf("encrypt: error writing encrypted blob %v (plaintext %v): %v", encBR, plainBR, err)
		return sb, errors.New("encrypt: error writing encrypted blob")
	}

	meta := encodeMetaValue(uint32(plainSize), iv, encBR, buf.Len())
	metaBlob := s.makeSingleMetaBlob(plainBR, meta)
	_, err = blobserver.ReceiveNoHash(s.meta, blob.SHA1FromBytes(metaBlob), bytes.NewReader(metaBlob))
	if err != nil {
		log.Printf("encrypt: error writing encrypted meta for plaintext %v (encrypted blob %v): %v", plainBR, encBR, err)
		return sb, errors.New("encrypt: error writing encrypted meta")
	}

	err = s.index.Set(plainBR.String(), meta)
	if err != nil {
		return sb, fmt.Errorf("encrypt: error updating index for encrypted %v (plaintext %v): %v", encBR, plainBR, err)
	}

	return blob.SizedRef{plainBR, uint32(plainSize)}, nil
}
Ejemplo n.º 23
0
// CreatePutUploadHandler returns the handler that receives a single
// blob at the blob's final URL, via the PUT method.  See
// doc/protocol/blob-upload-protocol.txt.
func CreatePutUploadHandler(storage blobserver.BlobReceiver) http.Handler {
	return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
		if req.Method != "PUT" {
			log.Printf("Inconfigured upload handler.")
			httputil.BadRequestError(rw, "Inconfigured handler.")
			return
		}
		// For non-chunked uploads, we catch it here. For chunked uploads, it's caught
		// by blobserver.Receive's LimitReader.
		if req.ContentLength > blobserver.MaxBlobSize {
			httputil.BadRequestError(rw, "blob too big")
			return
		}
		blobrefStr := path.Base(req.URL.Path)
		br, ok := blob.Parse(blobrefStr)
		if !ok {
			log.Printf("Invalid PUT request to %q", req.URL.Path)
			httputil.BadRequestError(rw, "Bad path")
			return
		}
		if !br.IsSupported() {
			httputil.BadRequestError(rw, "unsupported object hash function")
			return
		}
		_, err := blobserver.Receive(storage, br, req.Body)
		if err == blobserver.ErrCorruptBlob {
			httputil.BadRequestError(rw, "data doesn't match declared digest")
			return
		}
		if err != nil {
			httputil.ServeError(rw, req, err)
			return
		}
		rw.WriteHeader(http.StatusNoContent)
	})
}
Ejemplo n.º 24
0
// src: non-nil source
// dest: non-nil destination
// thirdLeg: optional third-leg client. if not nil, anything on src
//     but not on dest will instead be copied to thirdLeg, instead of
//     directly to dest. (sneakernet mode, copying to a portable drive
//     and transporting thirdLeg to dest)
func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) {
	srcBlobs := make(chan blob.SizedRef, 100)
	destBlobs := make(chan blob.SizedRef, 100)
	srcErr := make(chan error, 1)
	destErr := make(chan error, 1)

	ctx := context.TODO()
	defer ctx.Cancel()
	go func() {
		srcErr <- enumerateAllBlobs(ctx, src, srcBlobs)
	}()
	checkSourceError := func() {
		if err := <-srcErr; err != nil {
			retErr = fmt.Errorf("Enumerate error from source: %v", err)
		}
	}

	if c.dest == "stdout" {
		for sb := range srcBlobs {
			fmt.Printf("%s %d\n", sb.Ref, sb.Size)
		}
		checkSourceError()
		return
	}

	go func() {
		destErr <- enumerateAllBlobs(ctx, dest, destBlobs)
	}()
	checkDestError := func() {
		if err := <-destErr; err != nil {
			retErr = errors.New(fmt.Sprintf("Enumerate error from destination: %v", err))
		}
	}

	destNotHaveBlobs := make(chan blob.SizedRef)
	sizeMismatch := make(chan blob.Ref)
	readSrcBlobs := srcBlobs
	if c.verbose {
		readSrcBlobs = loggingBlobRefChannel(srcBlobs)
	}
	mismatches := []blob.Ref{}
	go client.ListMissingDestinationBlobs(destNotHaveBlobs, sizeMismatch, readSrcBlobs, destBlobs)

	// Handle three-legged mode if tc is provided.
	checkThirdError := func() {} // default nop
	syncBlobs := destNotHaveBlobs
	firstHopDest := dest
	if thirdLeg != nil {
		thirdBlobs := make(chan blob.SizedRef, 100)
		thirdErr := make(chan error, 1)
		go func() {
			thirdErr <- enumerateAllBlobs(ctx, thirdLeg, thirdBlobs)
		}()
		checkThirdError = func() {
			if err := <-thirdErr; err != nil {
				retErr = fmt.Errorf("Enumerate error from third leg: %v", err)
			}
		}
		thirdNeedBlobs := make(chan blob.SizedRef)
		go client.ListMissingDestinationBlobs(thirdNeedBlobs, sizeMismatch, destNotHaveBlobs, thirdBlobs)
		syncBlobs = thirdNeedBlobs
		firstHopDest = thirdLeg
	}
For:
	for {
		select {
		case br := <-sizeMismatch:
			// TODO(bradfitz): check both sides and repair, carefully.  For now, fail.
			log.Printf("WARNING: blobref %v has differing sizes on source and dest", br)
			stats.ErrorCount++
			mismatches = append(mismatches, br)
		case sb, ok := <-syncBlobs:
			if !ok {
				break For
			}
			fmt.Printf("Destination needs blob: %s\n", sb)

			blobReader, size, err := src.FetchStreaming(sb.Ref)
			if err != nil {
				stats.ErrorCount++
				log.Printf("Error fetching %s: %v", sb.Ref, err)
				continue
			}
			if size != sb.Size {
				stats.ErrorCount++
				log.Printf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d",
					sb.Size, sb.Ref, size)
				continue
			}

			if _, err := blobserver.Receive(firstHopDest, sb.Ref, blobReader); err != nil {
				stats.ErrorCount++
				log.Printf("Upload of %s to destination blobserver failed: %v", sb.Ref, err)
				continue
			}
			stats.BlobsCopied++
			stats.BytesCopied += size

			if c.removeSrc {
				if err = src.RemoveBlobs([]blob.Ref{sb.Ref}); err != nil {
					stats.ErrorCount++
					log.Printf("Failed to delete %s from source: %v", sb.Ref, err)
				}
			}
		}
	}

	checkSourceError()
	checkDestError()
	checkThirdError()
	if retErr == nil && stats.ErrorCount > 0 {
		retErr = fmt.Errorf("%d errors during sync", stats.ErrorCount)
	}
	return stats, retErr
}
Ejemplo n.º 25
0
func (st *shareTester) putRaw(ref blob.Ref, data string) {
	if _, err := blobserver.Receive(st.sto, ref, strings.NewReader(data)); err != nil {
		st.t.Fatal(err)
	}
}
Ejemplo n.º 26
0
func handleMultiPartUpload(conn http.ResponseWriter, req *http.Request, blobReceiver blobserver.BlobReceiveConfiger) {
	if !(req.Method == "POST" && strings.Contains(req.URL.Path, "/camli/upload")) {
		log.Printf("Inconfigured handler upload handler")
		httputil.BadRequestError(conn, "Inconfigured handler.")
		return
	}

	receivedBlobs := make([]blob.SizedRef, 0, 10)

	multipart, err := req.MultipartReader()
	if multipart == nil {
		httputil.BadRequestError(conn, fmt.Sprintf(
			"Expected multipart/form-data POST request; %v", err))
		return
	}

	var errText string
	addError := func(s string) {
		log.Printf("Client error: %s", s)
		if errText == "" {
			errText = s
			return
		}
		errText = errText + "\n" + s
	}

	for {
		mimePart, err := multipart.NextPart()
		if err == io.EOF {
			break
		}
		if err != nil {
			addError(fmt.Sprintf("Error reading multipart section: %v", err))
			break
		}

		contentDisposition, params, err := mime.ParseMediaType(mimePart.Header.Get("Content-Disposition"))
		if err != nil {
			addError("invalid Content-Disposition")
			break
		}

		if contentDisposition != "form-data" {
			addError(fmt.Sprintf("Expected Content-Disposition of \"form-data\"; got %q", contentDisposition))
			break
		}

		formName := params["name"]
		ref, ok := blob.Parse(formName)
		if !ok {
			addError(fmt.Sprintf("Ignoring form key %q", formName))
			continue
		}

		if oldAppEngineHappySpec {
			_, hasContentType := mimePart.Header["Content-Type"]
			if !hasContentType {
				addError(fmt.Sprintf("Expected Content-Type header for blobref %s; see spec", ref))
				continue
			}

			_, hasFileName := params["filename"]
			if !hasFileName {
				addError(fmt.Sprintf("Expected 'filename' Content-Disposition parameter for blobref %s; see spec", ref))
				continue
			}
		}

		var tooBig int64 = blobserver.MaxBlobSize + 1
		var readBytes int64
		blobGot, err := blobserver.Receive(blobReceiver, ref, &readerutil.CountingReader{
			io.LimitReader(mimePart, tooBig),
			&readBytes,
		})
		if readBytes == tooBig {
			err = fmt.Errorf("blob over the limit of %d bytes", blobserver.MaxBlobSize)
		}
		if err != nil {
			addError(fmt.Sprintf("Error receiving blob %v: %v\n", ref, err))
			break
		}
		log.Printf("Received blob %v\n", blobGot)
		receivedBlobs = append(receivedBlobs, blobGot)
	}

	ret, err := commonUploadResponse(blobReceiver, req)
	if err != nil {
		httputil.ServeError(conn, req, err)
	}

	received := make([]map[string]interface{}, 0)
	for _, got := range receivedBlobs {
		blob := make(map[string]interface{})
		blob["blobRef"] = got.Ref.String()
		blob["size"] = got.Size
		received = append(received, blob)
	}
	ret["received"] = received

	if req.Header.Get("X-Camlistore-Vivify") == "1" {
		for _, got := range receivedBlobs {
			err := vivify(blobReceiver, got)
			if err != nil {
				addError(fmt.Sprintf("Error vivifying blob %v: %v\n", got.Ref.String(), err))
			} else {
				conn.Header().Add("X-Camlistore-Vivified", got.Ref.String())
			}
		}
	}

	if errText != "" {
		ret["errorText"] = errText
	}

	httputil.ReturnJSON(conn, ret)
}
Ejemplo n.º 27
0
// src: non-nil source
// dest: non-nil destination
// thirdLeg: optional third-leg client. if not nil, anything on src
//     but not on dest will instead be copied to thirdLeg, instead of
//     directly to dest. (sneakernet mode, copying to a portable drive
//     and transporting thirdLeg to dest)
func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) {
	var statsMu sync.Mutex // guards stats return value

	srcBlobs := make(chan blob.SizedRef, 100)
	destBlobs := make(chan blob.SizedRef, 100)
	srcErr := make(chan error, 1)
	destErr := make(chan error, 1)

	ctx := context.TODO()
	enumCtx, cancel := context.WithCancel(ctx) // used for all (2 or 3) enumerates
	defer cancel()
	enumerate := func(errc chan<- error, sto blobserver.Storage, blobc chan<- blob.SizedRef) {
		err := enumerateAllBlobs(enumCtx, sto, blobc)
		if err != nil {
			cancel()
		}
		errc <- err
	}

	go enumerate(srcErr, src, srcBlobs)
	checkSourceError := func() {
		if err := <-srcErr; err != nil && err != context.Canceled {
			retErr = fmt.Errorf("Enumerate error from source: %v", err)
		}
	}

	if c.dest == "stdout" {
		for sb := range srcBlobs {
			fmt.Fprintf(cmdmain.Stdout, "%s %d\n", sb.Ref, sb.Size)
		}
		checkSourceError()
		return
	}

	if c.wipe {
		// TODO(mpl): dest is a client. make it send a "wipe" request?
		// upon reception its server then wipes itself if it is a wiper.
		log.Print("Index wiping not yet supported.")
	}

	go enumerate(destErr, dest, destBlobs)
	checkDestError := func() {
		if err := <-destErr; err != nil && err != context.Canceled {
			retErr = fmt.Errorf("Enumerate error from destination: %v", err)
		}
	}

	destNotHaveBlobs := make(chan blob.SizedRef)

	readSrcBlobs := srcBlobs
	if c.verbose {
		readSrcBlobs = loggingBlobRefChannel(srcBlobs)
	}

	mismatches := []blob.Ref{}

	logErrorf := func(format string, args ...interface{}) {
		log.Printf(format, args...)
		statsMu.Lock()
		stats.ErrorCount++
		statsMu.Unlock()
	}

	onMismatch := func(br blob.Ref) {
		// TODO(bradfitz): check both sides and repair, carefully.  For now, fail.
		logErrorf("WARNING: blobref %v has differing sizes on source and dest", br)
		mismatches = append(mismatches, br)
	}

	go blobserver.ListMissingDestinationBlobs(destNotHaveBlobs, onMismatch, readSrcBlobs, destBlobs)

	// Handle three-legged mode if tc is provided.
	checkThirdError := func() {} // default nop
	syncBlobs := destNotHaveBlobs
	firstHopDest := dest
	if thirdLeg != nil {
		thirdBlobs := make(chan blob.SizedRef, 100)
		thirdErr := make(chan error, 1)
		go enumerate(thirdErr, thirdLeg, thirdBlobs)
		checkThirdError = func() {
			if err := <-thirdErr; err != nil && err != context.Canceled {
				retErr = fmt.Errorf("Enumerate error from third leg: %v", err)
			}
		}
		thirdNeedBlobs := make(chan blob.SizedRef)
		go blobserver.ListMissingDestinationBlobs(thirdNeedBlobs, onMismatch, destNotHaveBlobs, thirdBlobs)
		syncBlobs = thirdNeedBlobs
		firstHopDest = thirdLeg
	}

	var gate = syncutil.NewGate(c.concurrency)
	var wg sync.WaitGroup

	for sb := range syncBlobs {
		sb := sb
		gate.Start()
		wg.Add(1)
		go func() {
			defer wg.Done()
			defer gate.Done()
			fmt.Fprintf(cmdmain.Stdout, "Destination needs blob: %s\n", sb)
			blobReader, size, err := src.Fetch(sb.Ref)

			if err != nil {
				logErrorf("Error fetching %s: %v", sb.Ref, err)
				return
			}
			if size != sb.Size {
				logErrorf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d",
					sb.Size, sb.Ref, size)
				return
			}

			_, err = blobserver.Receive(firstHopDest, sb.Ref, blobReader)
			if err != nil {
				logErrorf("Upload of %s to destination blobserver failed: %v", sb.Ref, err)
				return
			}
			statsMu.Lock()
			stats.BlobsCopied++
			stats.BytesCopied += int64(size)
			statsMu.Unlock()

			if c.removeSrc {
				if err := src.RemoveBlobs([]blob.Ref{sb.Ref}); err != nil {
					logErrorf("Failed to delete %s from source: %v", sb.Ref, err)
				}
			}
		}()
	}
	wg.Wait()

	checkSourceError()
	checkDestError()
	checkThirdError()
	if retErr == nil && stats.ErrorCount > 0 {
		retErr = fmt.Errorf("%d errors during sync", stats.ErrorCount)
	}
	return stats, retErr
}
Ejemplo n.º 28
0
func handleMultiPartUpload(rw http.ResponseWriter, req *http.Request, blobReceiver blobserver.BlobReceiveConfiger) {
	res := new(protocol.UploadResponse)

	if !(req.Method == "POST" && strings.Contains(req.URL.Path, "/camli/upload")) {
		log.Printf("Inconfigured handler upload handler")
		httputil.BadRequestError(rw, "Inconfigured handler.")
		return
	}

	receivedBlobs := make([]blob.SizedRef, 0, 10)

	multipart, err := req.MultipartReader()
	if multipart == nil {
		httputil.BadRequestError(rw, fmt.Sprintf(
			"Expected multipart/form-data POST request; %v", err))
		return
	}

	var errBuf bytes.Buffer
	addError := func(s string) {
		log.Printf("Client error: %s", s)
		if errBuf.Len() > 0 {
			errBuf.WriteByte('\n')
		}
		errBuf.WriteString(s)
	}

	for {
		mimePart, err := multipart.NextPart()
		if err == io.EOF {
			break
		}
		if err != nil {
			addError(fmt.Sprintf("Error reading multipart section: %v", err))
			break
		}

		contentDisposition, params, err := mime.ParseMediaType(mimePart.Header.Get("Content-Disposition"))
		if err != nil {
			addError("invalid Content-Disposition")
			break
		}

		if contentDisposition != "form-data" {
			addError(fmt.Sprintf("Expected Content-Disposition of \"form-data\"; got %q", contentDisposition))
			break
		}

		formName := params["name"]
		ref, ok := blob.Parse(formName)
		if !ok {
			addError(fmt.Sprintf("Ignoring form key %q", formName))
			continue
		}

		var tooBig int64 = blobserver.MaxBlobSize + 1
		var readBytes int64
		blobGot, err := blobserver.Receive(blobReceiver, ref, &readerutil.CountingReader{
			io.LimitReader(mimePart, tooBig),
			&readBytes,
		})
		if readBytes == tooBig {
			err = fmt.Errorf("blob over the limit of %d bytes", blobserver.MaxBlobSize)
		}
		if err != nil {
			addError(fmt.Sprintf("Error receiving blob %v: %v\n", ref, err))
			break
		}
		log.Printf("Received blob %v\n", blobGot)
		receivedBlobs = append(receivedBlobs, blobGot)
	}

	res.Received = receivedBlobs

	if req.Header.Get("X-Camlistore-Vivify") == "1" {
		for _, got := range receivedBlobs {
			err := vivify(blobReceiver, got)
			if err != nil {
				addError(fmt.Sprintf("Error vivifying blob %v: %v\n", got.Ref.String(), err))
			} else {
				rw.Header().Add("X-Camlistore-Vivified", got.Ref.String())
			}
		}
	}

	res.ErrorText = errBuf.String()

	httputil.ReturnJSON(rw, res)
}
Ejemplo n.º 29
0
// Upload uploads a blob, as described by the provided UploadHandle parameters.
func (c *Client) Upload(h *UploadHandle) (*PutResult, error) {
	errorf := func(msg string, arg ...interface{}) (*PutResult, error) {
		err := fmt.Errorf(msg, arg...)
		c.log.Print(err.Error())
		return nil, err
	}

	bodyReader, bodySize, err := h.readerAndSize()
	if err != nil {
		return nil, fmt.Errorf("client: error slurping upload handle to find its length: %v", err)
	}
	if bodySize > constants.MaxBlobSize {
		return nil, errors.New("client: body is bigger then max blob size")
	}

	c.statsMutex.Lock()
	c.stats.UploadRequests.Blobs++
	c.stats.UploadRequests.Bytes += bodySize
	c.statsMutex.Unlock()

	pr := &PutResult{BlobRef: h.BlobRef, Size: uint32(bodySize)}

	if c.sto != nil {
		// TODO: stat first so we can show skipped?
		_, err := blobserver.Receive(c.sto, h.BlobRef, bodyReader)
		if err != nil {
			return nil, err
		}
		return pr, nil
	}

	if !h.Vivify {
		if _, ok := c.haveCache.StatBlobCache(h.BlobRef); ok {
			pr.Skipped = true
			return pr, nil
		}
	}

	blobrefStr := h.BlobRef.String()

	// Pre-upload. Check whether the blob already exists on the
	// server and if not, the URL to upload it to.
	pfx, err := c.prefix()
	if err != nil {
		return nil, err
	}

	if !h.SkipStat {
		url_ := fmt.Sprintf("%s/camli/stat", pfx)
		req := c.newRequest("POST", url_, strings.NewReader("camliversion=1&blob1="+blobrefStr))
		req.Header.Add("Content-Type", "application/x-www-form-urlencoded")

		resp, err := c.doReqGated(req)
		if err != nil {
			return errorf("stat http error: %v", err)
		}
		defer resp.Body.Close()

		if resp.StatusCode != 200 {
			return errorf("stat response had http status %d", resp.StatusCode)
		}

		stat, err := parseStatResponse(resp)
		if err != nil {
			return nil, err
		}
		for _, sbr := range stat.HaveMap {
			c.haveCache.NoteBlobExists(sbr.Ref, uint32(sbr.Size))
		}
		_, serverHasIt := stat.HaveMap[blobrefStr]
		if env.DebugUploads() {
			log.Printf("HTTP Stat(%s) = %v", blobrefStr, serverHasIt)
		}
		if !h.Vivify && serverHasIt {
			pr.Skipped = true
			if closer, ok := h.Contents.(io.Closer); ok {
				// TODO(bradfitz): I did this
				// Close-if-possible thing early on, before I
				// knew better.  Fix the callers instead, and
				// fix the docs.
				closer.Close()
			}
			c.haveCache.NoteBlobExists(h.BlobRef, uint32(bodySize))
			return pr, nil
		}
	}

	if env.DebugUploads() {
		log.Printf("Uploading: %s (%d bytes)", blobrefStr, bodySize)
	}

	pipeReader, pipeWriter := io.Pipe()
	multipartWriter := multipart.NewWriter(pipeWriter)

	copyResult := make(chan error, 1)
	go func() {
		defer pipeWriter.Close()
		part, err := multipartWriter.CreateFormFile(blobrefStr, blobrefStr)
		if err != nil {
			copyResult <- err
			return
		}
		_, err = io.Copy(part, bodyReader)
		if err == nil {
			err = multipartWriter.Close()
		}
		copyResult <- err
	}()

	// TODO(bradfitz): verbosity levels. make this VLOG(2) or something. it's noisy:
	// c.log.Printf("Uploading %s", br)

	uploadURL := fmt.Sprintf("%s/camli/upload", pfx)
	req := c.newRequest("POST", uploadURL)
	req.Header.Set("Content-Type", multipartWriter.FormDataContentType())
	if h.Vivify {
		req.Header.Add("X-Camlistore-Vivify", "1")
	}
	req.Body = ioutil.NopCloser(pipeReader)
	req.ContentLength = multipartOverhead + bodySize + int64(len(blobrefStr))*2
	resp, err := c.doReqGated(req)
	if err != nil {
		return errorf("upload http error: %v", err)
	}
	defer resp.Body.Close()

	// check error from earlier copy
	if err := <-copyResult; err != nil {
		return errorf("failed to copy contents into multipart writer: %v", err)
	}

	// The only valid HTTP responses are 200 and 303.
	if resp.StatusCode != 200 && resp.StatusCode != 303 {
		return errorf("invalid http response %d in upload response", resp.StatusCode)
	}

	if resp.StatusCode == 303 {
		otherLocation := resp.Header.Get("Location")
		if otherLocation == "" {
			return errorf("303 without a Location")
		}
		baseURL, _ := url.Parse(uploadURL)
		absURL, err := baseURL.Parse(otherLocation)
		if err != nil {
			return errorf("303 Location URL relative resolve error: %v", err)
		}
		otherLocation = absURL.String()
		resp, err = http.Get(otherLocation)
		if err != nil {
			return errorf("error following 303 redirect after upload: %v", err)
		}
	}

	var ures protocol.UploadResponse
	if err := httputil.DecodeJSON(resp, &ures); err != nil {
		return errorf("error in upload response: %v", err)
	}

	if ures.ErrorText != "" {
		c.log.Printf("Blob server reports error: %s", ures.ErrorText)
	}

	expectedSize := uint32(bodySize)

	for _, sb := range ures.Received {
		if sb.Ref != h.BlobRef {
			continue
		}
		if sb.Size != expectedSize {
			return errorf("Server got blob %v, but reports wrong length (%v; we sent %d)",
				sb.Ref, sb.Size, expectedSize)
		}
		c.statsMutex.Lock()
		c.stats.Uploads.Blobs++
		c.stats.Uploads.Bytes += bodySize
		c.statsMutex.Unlock()
		if pr.Size <= 0 {
			pr.Size = sb.Size
		}
		c.haveCache.NoteBlobExists(pr.BlobRef, pr.Size)
		return pr, nil
	}

	return nil, errors.New("Server didn't receive blob.")
}
Ejemplo n.º 30
0
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("populating %v", dbfile)
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
	if err := os.MkdirAll(bsRoot, 0700); err != nil {
		b.Fatal(err)
	}
	dataDir, err := os.Open("testdata")
	if err != nil {
		b.Fatal(err)
	}
	fis, err := dataDir.Readdir(-1)
	if err != nil {
		b.Fatal(err)
	}
	if len(fis) == 0 {
		b.Fatalf("no files in %s dir", "testdata")
	}

	ks := doKeyStuff(b)

	bs, err := localdisk.New(bsRoot)
	if err != nil {
		b.Fatal(err)
	}
	if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())

	b.ResetTimer()
	for _, v := range fis {
		f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
		if err != nil {
			b.Fatal(err)
		}
		td := &trackDigestReader{r: f}
		fm := schema.NewFileMap(v.Name())
		fm.SetModTime(v.ModTime())
		fileRef, err := schema.WriteFileMap(bs, fm, td)
		if err != nil {
			b.Fatal(err)
		}
		f.Close()

		unsigned := schema.NewPlannedPermanode(td.Sum())
		unsigned.SetSigner(ks.pubKeyRef)
		sr := &jsonsign.SignRequest{
			UnsignedJSON: unsigned.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: time.Unix(0, 0),
		}
		signed, err := sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		pn := blob.SHA1FromString(signed)
		// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
		if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}

		contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
		claimTime, ok := fm.ModTime()
		if !ok {
			b.Fatal(err)
		}
		contentAttr.SetClaimDate(claimTime)
		contentAttr.SetSigner(ks.pubKeyRef)
		sr = &jsonsign.SignRequest{
			UnsignedJSON: contentAttr.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: claimTime,
		}
		signed, err = sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		cl := blob.SHA1FromString(signed)
		if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}
	}
	sh.IdleWait()

	return idx
}