Beispiel #1
0
// blobref: of the file or schema blob
//      ss: the parsed file schema blob
//      bm: keys to populate
func (ix *Index) populateFile(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
	seekFetcher, err := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
	if err != nil {
		return err
	}

	sha1 := sha1.New()
	fr, err := ss.NewFileReader(seekFetcher)
	if err != nil {
		// TODO(bradfitz): propagate up a transient failure
		// error type, so we can retry indexing files in the
		// future if blobs are only temporarily unavailable.
		// Basically the same as the TODO just below.
		log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
		return nil
	}
	mime, reader := magic.MimeTypeFromReader(fr)
	size, err := io.Copy(sha1, reader)
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("index: error indexing file %s: %v", blobRef, err)
		return nil
	}

	wholeRef := blobref.FromHash("sha1", sha1)
	bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
	bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, ss.FileName, mime))
	return nil
}
Beispiel #2
0
// blobref: of the file or schema blob
//      ss: the parsed file schema blob
//      bm: keys to populate
func (ix *Index) populateFile(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
	seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
	fr, err := ss.NewFileReader(seekFetcher)
	if err != nil {
		// TODO(bradfitz): propagate up a transient failure
		// error type, so we can retry indexing files in the
		// future if blobs are only temporarily unavailable.
		// Basically the same as the TODO just below.
		log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
		return nil
	}
	defer fr.Close()
	mime, reader := magic.MimeTypeFromReader(fr)

	sha1 := sha1.New()
	var copyDest io.Writer = sha1
	var withCopyErr func(error) // or nil
	if strings.HasPrefix(mime, "image/") {
		pr, pw := io.Pipe()
		copyDest = io.MultiWriter(copyDest, pw)
		confc := make(chan *image.Config, 1)
		go func() {
			conf, _, err := image.DecodeConfig(pr)
			defer io.Copy(ioutil.Discard, pr)
			if err == nil {
				confc <- &conf
			} else {
				confc <- nil
			}
		}()
		withCopyErr = func(err error) {
			pw.CloseWithError(err)
			if conf := <-confc; conf != nil {
				bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
			}
		}
	}

	size, err := io.Copy(copyDest, reader)
	if f := withCopyErr; f != nil {
		f(err)
	}
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("index: error indexing file %s: %v", blobRef, err)
		return nil
	}

	wholeRef := blobref.FromHash("sha1", sha1)
	bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
	bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, ss.FileName, mime))
	return nil
}
Beispiel #3
0
func (ih *ImageHandler) cached(br *blobref.BlobRef) (fr *schema.FileReader, err error) {
	fetchSeeker := blobref.SeekerFromStreamingFetcher(ih.Cache)
	fr, err = schema.NewFileReader(fetchSeeker, br)
	if err != nil {
		return nil, err
	}
	if imageDebug {
		log.Printf("Image Cache: hit: %v\n", br)
	}
	return fr, nil
}
Beispiel #4
0
// blobref: of the file or schema blob
//      ss: the parsed file schema blob
//      bm: keys to populate
func (ix *Index) populateDir(blobRef *blobref.BlobRef, ss *schema.Superset, bm BatchMutation) error {
	seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
	dr, err := ss.NewDirReader(seekFetcher)
	if err != nil {
		// TODO(bradfitz): propagate up a transient failure
		// error type, so we can retry indexing files in the
		// future if blobs are only temporarily unavailable.
		log.Printf("index: error indexing directory, creating NewDirReader %s: %v", blobRef, err)
		return nil
	}
	sts, err := dr.StaticSet()
	if err != nil {
		log.Printf("index: error indexing directory: can't get StaticSet: %v\n", err)
		return nil
	}

	bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(len(sts), ss.FileName, ""))
	return nil
}
Beispiel #5
0
// ServeBlobRef serves a blob.
func ServeBlobRef(rw http.ResponseWriter, req *http.Request, blobRef *blobref.BlobRef, fetcher blobref.StreamingFetcher) {
	if w, ok := fetcher.(blobserver.ContextWrapper); ok {
		fetcher = w.WrapContext(req)
	}
	seekFetcher := blobref.SeekerFromStreamingFetcher(fetcher)

	file, size, err := seekFetcher.Fetch(blobRef)
	switch err {
	case nil:
		break
	case os.ErrNotExist:
		rw.WriteHeader(http.StatusNotFound)
		fmt.Fprintf(rw, "Blob %q not found", blobRef)
		return
	default:
		httputil.ServeError(rw, req, err)
		return
	}
	defer file.Close()
	var content io.ReadSeeker = file

	rw.Header().Set("Content-Type", "application/octet-stream")
	if req.Header.Get("Range") == "" {
		// If it's small and all UTF-8, assume it's text and
		// just render it in the browser.  This is more for
		// demos/debuggability than anything else.  It isn't
		// part of the spec.
		if size <= 32<<10 {
			var buf bytes.Buffer
			_, err := io.Copy(&buf, file)
			if err != nil {
				httputil.ServeError(rw, req, err)
				return
			}
			if utf8.Valid(buf.Bytes()) {
				rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
			}
			content = bytes.NewReader(buf.Bytes())
		}
	}

	http.ServeContent(rw, req, "", dummyModTime, content)
}
Beispiel #6
0
// smartFetch the things that blobs point to, not just blobs.
func smartFetch(src blobref.StreamingFetcher, targ string, br *blobref.BlobRef) error {
	rc, err := fetch(src, br)
	if err != nil {
		return err
	}
	defer rc.Close()

	sniffer := index.NewBlobSniffer(br)
	_, err = io.CopyN(sniffer, rc, sniffSize)
	if err != nil && err != io.EOF {
		return err
	}

	sniffer.Parse()
	blob, ok := sniffer.SchemaBlob()

	if !ok {
		if *flagVerbose {
			log.Printf("Fetching opaque data %v into %q", br, targ)
		}

		// opaque data - put it in a file
		f, err := os.Create(targ)
		if err != nil {
			return fmt.Errorf("opaque: %v", err)
		}
		defer f.Close()
		body, _ := sniffer.Body()
		r := io.MultiReader(bytes.NewReader(body), rc)
		_, err = io.Copy(f, r)
		return err
	}

	switch blob.Type() {
	case "directory":
		dir := filepath.Join(targ, blob.FileName())
		if *flagVerbose {
			log.Printf("Fetching directory %v into %s", br, dir)
		}
		if err := os.MkdirAll(dir, blob.FileMode()); err != nil {
			return err
		}
		if err := setFileMeta(dir, blob); err != nil {
			log.Print(err)
		}
		entries := blob.DirectoryEntries()
		if entries == nil {
			return fmt.Errorf("bad entries blobref in dir %v", blob.BlobRef())
		}
		return smartFetch(src, dir, entries)
	case "static-set":
		if *flagVerbose {
			log.Printf("Fetching directory entries %v into %s", br, targ)
		}

		// directory entries
		const numWorkers = 10
		type work struct {
			br   *blobref.BlobRef
			errc chan<- error
		}
		members := blob.StaticSetMembers()
		workc := make(chan work, len(members))
		defer close(workc)
		for i := 0; i < numWorkers; i++ {
			go func() {
				for wi := range workc {
					wi.errc <- smartFetch(src, targ, wi.br)
				}
			}()
		}
		var errcs []<-chan error
		for _, mref := range members {
			errc := make(chan error, 1)
			errcs = append(errcs, errc)
			workc <- work{mref, errc}
		}
		for _, errc := range errcs {
			if err := <-errc; err != nil {
				return err
			}
		}
		return nil
	case "file":
		seekFetcher := blobref.SeekerFromStreamingFetcher(src)
		fr, err := schema.NewFileReader(seekFetcher, br)
		if err != nil {
			return fmt.Errorf("NewFileReader: %v", err)
		}
		fr.LoadAllChunks()
		defer fr.Close()

		name := filepath.Join(targ, blob.FileName())

		if fi, err := os.Stat(name); err == nil && fi.Size() == fi.Size() {
			if *flagVerbose {
				log.Printf("Skipping %s; already exists.", name)
				return nil
			}
		}

		if *flagVerbose {
			log.Printf("Writing %s to %s ...", br, name)
		}

		f, err := os.Create(name)
		if err != nil {
			return fmt.Errorf("file type: %v", err)
		}
		defer f.Close()
		if _, err := io.Copy(f, fr); err != nil {
			return fmt.Errorf("Copying %s to %s: %v", br, name, err)
		}
		if err := setFileMeta(name, blob); err != nil {
			log.Print(err)
		}
		return nil
	default:
		return errors.New("unknown blob type: " + blob.Type())
	}
	panic("unreachable")
}
Beispiel #7
0
func (ih *ImageHandler) storageSeekFetcher() blobref.SeekFetcher {
	return blobref.SeekerFromStreamingFetcher(ih.Fetcher) // TODO: pass ih.Cache?
}
Beispiel #8
0
func (dh *DownloadHandler) storageSeekFetcher() blobref.SeekFetcher {
	return blobref.SeekerFromStreamingFetcher(dh.Fetcher) // TODO: pass dh.Cache?
}
Beispiel #9
0
func (fth *FileTreeHandler) storageSeekFetcher() blobref.SeekFetcher {
	return blobref.SeekerFromStreamingFetcher(fth.Fetcher) // TODO: pass ih.Cache?
}
Beispiel #10
0
// blobref: of the file or schema blob
//      blob: the parsed file schema blob
//      bm: keys to populate
func (ix *Index) populateFile(blob *schema.Blob, bm BatchMutation) error {
	var times []time.Time // all creation or mod times seen; may be zero
	times = append(times, blob.ModTime())

	blobRef := blob.BlobRef()
	seekFetcher := blobref.SeekerFromStreamingFetcher(ix.BlobSource)
	fr, err := blob.NewFileReader(seekFetcher)
	if err != nil {
		// TODO(bradfitz): propagate up a transient failure
		// error type, so we can retry indexing files in the
		// future if blobs are only temporarily unavailable.
		// Basically the same as the TODO just below.
		log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
		return nil
	}
	defer fr.Close()
	mime, reader := magic.MIMETypeFromReader(fr)

	sha1 := sha1.New()
	var copyDest io.Writer = sha1
	var imageBuf *keepFirstN // or nil
	if strings.HasPrefix(mime, "image/") {
		imageBuf = &keepFirstN{N: 256 << 10}
		copyDest = io.MultiWriter(copyDest, imageBuf)
	}
	size, err := io.Copy(copyDest, reader)
	if err != nil {
		// TODO: job scheduling system to retry this spaced
		// out max n times.  Right now our options are
		// ignoring this error (forever) or returning the
		// error and making the indexing try again (likely
		// forever failing).  Both options suck.  For now just
		// log and act like all's okay.
		log.Printf("index: error indexing file %s: %v", blobRef, err)
		return nil
	}

	if imageBuf != nil {
		if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
			bm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
		}
		if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
			log.Printf("filename %q exif = %v, %v", blob.FileName(), ft, err)
			times = append(times, ft)
		} else {
			log.Printf("filename %q exif = %v, %v", blob.FileName(), ft, err)
		}
	}

	var sortTimes []time.Time
	for _, t := range times {
		if !t.IsZero() {
			sortTimes = append(sortTimes, t)
		}
	}
	sort.Sort(types.ByTime(sortTimes))
	var time3339s string
	switch {
	case len(sortTimes) == 1:
		time3339s = types.Time3339(sortTimes[0]).String()
	case len(sortTimes) >= 2:
		oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
		time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
	}

	wholeRef := blobref.FromHash(sha1)
	bm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
	bm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, blob.FileName(), mime))
	bm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
	return nil
}
Beispiel #11
0
func main() {
	client.AddFlags()
	flag.Parse()

	if len(*flagVia) > 0 {
		vs := strings.Split(*flagVia, ",")
		viaRefs = make([]*blobref.BlobRef, len(vs))
		for i, sbr := range vs {
			viaRefs[i] = blobref.Parse(sbr)
			if viaRefs[i] == nil {
				log.Fatalf("Invalid -via blobref: %q", sbr)
			}
			if *flagVerbose {
				log.Printf("via: %s", sbr)
			}
		}
	}

	if *flagGraph && flag.NArg() != 1 {
		log.Fatalf("The --graph option requires exactly one parameter.")
	}

	cl := client.NewOrFail()

	for n := 0; n < flag.NArg(); n++ {
		arg := flag.Arg(n)
		br := blobref.Parse(arg)
		if br == nil {
			log.Fatalf("Failed to parse argument %q as a blobref.", arg)
		}
		if *flagGraph {
			printGraph(cl, br)
			return
		}
		if *flagCheck {
			// TODO: do HEAD requests checking if the blobs exists.
			log.Fatal("not implemented")
			return
		}
		if *flagOutput == "-" {
			var rc io.ReadCloser
			var err error
			if *flagContents {
				seekFetcher := blobref.SeekerFromStreamingFetcher(cl)
				rc, err = schema.NewFileReader(seekFetcher, br)
			} else {
				rc, err = fetch(cl, br)
			}
			if err != nil {
				log.Fatal(err)
			}
			defer rc.Close()
			if _, err := io.Copy(os.Stdout, rc); err != nil {
				log.Fatalf("Failed reading %q: %v", br, err)
			}
			return
		}
		if err := smartFetch(cl, *flagOutput, br); err != nil {
			log.Fatal(err)
		}
	}
}
Beispiel #12
0
// smartFetch the things that blobs point to, not just blobs.
func smartFetch(cl *client.Client, targ string, br *blobref.BlobRef) error {
	if *flagVerbose {
		log.Printf("Fetching %v into %q", br, targ)
	}

	rc, err := fetch(cl, br)
	if err != nil {
		return err
	}
	defer rc.Close()

	sniffer := new(index.BlobSniffer)
	_, err = io.CopyN(sniffer, rc, sniffSize)
	if err != nil && err != io.EOF {
		return err
	}

	sniffer.Parse()
	sc, ok := sniffer.Superset()

	if !ok {
		// opaque data - put it in a file
		f, err := os.Create(targ)
		if err != nil {
			return fmt.Errorf("opaque: %v", err)
		}
		defer f.Close()
		body, _ := sniffer.Body()
		r := io.MultiReader(bytes.NewBuffer(body), rc)
		_, err = io.Copy(f, r)
		return err
	}

	sc.BlobRef = br

	switch sc.Type {
	case "directory":
		dir := filepath.Join(targ, sc.FileName)
		if err := os.MkdirAll(dir, sc.FileMode()); err != nil {
			return err
		}
		if err := setFileMeta(dir, sc); err != nil {
			log.Print(err)
		}
		entries := blobref.Parse(sc.Entries)
		if entries == nil {
			return fmt.Errorf("bad entries blobref: %v", sc.Entries)
		}
		return smartFetch(cl, dir, entries)
	case "static-set":
		// directory entries
		for _, m := range sc.Members {
			dref := blobref.Parse(m)
			if dref == nil {
				return fmt.Errorf("bad member blobref: %v", m)
			}
			if err := smartFetch(cl, targ, dref); err != nil {
				return err
			}
		}
		return nil
	case "file":
		name := filepath.Join(targ, sc.FileName)
		f, err := os.Create(name)
		if err != nil {
			return fmt.Errorf("file type: %v", err)
		}
		defer f.Close()
		seekFetcher := blobref.SeekerFromStreamingFetcher(cl)
		fr, err := schema.NewFileReader(seekFetcher, br)
		if err != nil {
			return fmt.Errorf("NewFileReader: %v", err)
		}
		defer fr.Close()

		if err := setFileMeta(name, sc); err != nil {
			log.Print(err)
		}
		return nil
	default:
		return errors.New("unknown blob type: " + sc.Type)
	}
	panic("unreachable")
}
Beispiel #13
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
	sf, ok := blobReceiver.(blobref.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blobref.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	// TODO(mpl): second check should not be necessary, and yet it happens. Figure it out.
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	unsigned := schema.NewHashPlannedPermanode(h)
	unsigned["camliSigner"] = publicKeyBlobRef
	signed, err := sigHelper.SignMap(unsigned)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", signed, err)
	}
	signedPerm := blobref.SHA1FromString(signed)
	_, err = blobReceiver.ReceiveBlob(signedPerm, strings.NewReader(signed))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v: %v", signed, err)
	}

	contentAttr := schema.NewSetAttributeClaim(signedPerm, "camliContent", fileblob.BlobRef.String())
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	contentAttr.SetClaimDate(claimDate)
	contentAttr["camliSigner"] = publicKeyBlobRef
	signed, err = sigHelper.SignMap(contentAttr)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	signedClaim := blobref.SHA1FromString(signed)
	_, err = blobReceiver.ReceiveBlob(signedClaim, strings.NewReader(signed))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v: %v", signed, err)
	}
	return nil
}
Beispiel #14
0
func (zh *zipHandler) storageSeekFetcher() blobref.SeekFetcher {
	return blobref.SeekerFromStreamingFetcher(zh.fetcher)
}
Beispiel #15
0
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
	sf, ok := blobReceiver.(blobref.StreamingFetcher)
	if !ok {
		return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
	}
	fetcher := blobref.SeekerFromStreamingFetcher(sf)
	fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
	if err != nil {
		return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	defer fr.Close()

	h := sha1.New()
	n, err := io.Copy(h, fr)
	if err != nil {
		return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
	}
	if n != fr.Size() {
		return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
	}

	config := blobReceiver.Config()
	if config == nil {
		return errors.New("blobReceiver has no config")
	}
	hf := config.HandlerFinder
	if hf == nil {
		return errors.New("blobReceiver config has no HandlerFinder")
	}
	JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
	if err != nil || sh == nil {
		return errors.New("jsonsign handler not found")
	}
	sigHelper, ok := sh.(*signhandler.Handler)
	if !ok {
		return errors.New("handler is not a JSON signhandler")
	}
	discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
	publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
	if !ok {
		return fmt.Errorf("Discovery: json decoding error: %v", err)
	}

	// The file schema must have a modtime to vivify, as the modtime is used for all three of:
	// 1) the permanode's signature
	// 2) the camliContent attribute claim's "claimDate"
	// 3) the signature time of 2)
	claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
	if err != nil {
		return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
	}

	permanodeBB := schema.NewHashPlannedPermanode(h)
	permanodeBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	permanodeBB.SetClaimDate(claimDate)
	permanodeSigned, err := sigHelper.Sign(permanodeBB)
	if err != nil {
		return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
	}
	permanodeRef := blobref.SHA1FromString(permanodeSigned)
	_, err = blobReceiver.ReceiveBlob(permanodeRef, strings.NewReader(permanodeSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
	}

	contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.BlobRef.String())
	contentClaimBB.SetSigner(blobref.MustParse(publicKeyBlobRef))
	contentClaimBB.SetClaimDate(claimDate)
	contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
	if err != nil {
		return fmt.Errorf("Signing camliContent claim: %v", err)
	}
	contentClaimRef := blobref.SHA1FromString(contentClaimSigned)
	_, err = blobReceiver.ReceiveBlob(contentClaimRef, strings.NewReader(contentClaimSigned))
	if err != nil {
		return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
	}
	return nil
}
Beispiel #16
0
func main() {
	client.AddFlags()
	flag.Parse()

	if *flagGraph && flag.NArg() != 1 {
		log.Fatalf("The --graph option requires exactly one parameter.")
	}

	var cl *client.Client
	var items []*blobref.BlobRef

	if *flagShared != "" {
		if client.ExplicitServer() != "" {
			log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.")
		}
		if flag.NArg() != 0 {
			log.Fatal("No arguments permitted when using --shared")
		}
		cl1, target, err := client.NewFromShareRoot(*flagShared)
		if err != nil {
			log.Fatal(err)
		}
		cl = cl1
		items = append(items, target)
	} else {
		cl = client.NewOrFail()
		for n := 0; n < flag.NArg(); n++ {
			arg := flag.Arg(n)
			br := blobref.Parse(arg)
			if br == nil {
				log.Fatalf("Failed to parse argument %q as a blobref.", arg)
			}
			items = append(items, br)
		}
	}

	httpStats := &httputil.StatsTransport{
		VerboseLog: *flagHTTP,
	}
	if *flagHTTP {
		httpStats.Transport = &http.Transport{
			Dial: func(net_, addr string) (net.Conn, error) {
				log.Printf("Dialing %s", addr)
				return net.Dial(net_, addr)
			},
		}
	}
	cl.SetHTTPClient(&http.Client{Transport: httpStats})

	// Put a local disk cache in front of the HTTP client.
	// TODO: this could be better about proactively cleaning things.
	// Fetching 2 TB shouldn't write 2 TB to /tmp before it's done.
	// Maybe the cache needs an LRU/size cap.
	cacheDir, err := ioutil.TempDir("", "camlicache")
	if err != nil {
		log.Fatalf("Error creating temp cache directory: %v\n", err)
	}
	defer os.RemoveAll(cacheDir)
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		log.Fatalf("Error setting up local disk cache: %v", err)
	}
	if *flagVerbose {
		log.Printf("Using temp blob cache directory %s", cacheDir)
	}
	fetcher := cacher.NewCachingFetcher(diskcache, cl)

	for _, br := range items {
		if *flagGraph {
			printGraph(fetcher, br)
			return
		}
		if *flagCheck {
			// TODO: do HEAD requests checking if the blobs exists.
			log.Fatal("not implemented")
			return
		}
		if *flagOutput == "-" {
			var rc io.ReadCloser
			var err error
			if *flagContents {
				seekFetcher := blobref.SeekerFromStreamingFetcher(fetcher)
				rc, err = schema.NewFileReader(seekFetcher, br)
				if err == nil {
					rc.(*schema.FileReader).LoadAllChunks()
				}
			} else {
				rc, err = fetch(fetcher, br)
			}
			if err != nil {
				log.Fatal(err)
			}
			defer rc.Close()
			if _, err := io.Copy(os.Stdout, rc); err != nil {
				log.Fatalf("Failed reading %q: %v", br, err)
			}
		} else {
			if err := smartFetch(fetcher, *flagOutput, br); err != nil {
				log.Fatal(err)
			}
		}
	}

	if *flagVerbose {
		log.Printf("HTTP requests: %d\n", httpStats.Requests())
	}
}