Ejemplo n.º 1
0
// which is one of "src", "dest", or "thirdleg"
func (c *syncCmd) storageFromParam(which storageType, val string) (blobserver.Storage, error) {
	if val == "" {
		switch which {
		case storageThird:
			return nil, nil
		case storageSource:
			discl := c.discoClient()
			discl.SetLogger(c.logger)
			src, err := discl.BlobRoot()
			if err != nil {
				return nil, fmt.Errorf("Failed to discover source server's blob path: %v", err)
			}
			val = src
		}
		if val == "" {
			return nil, cmdmain.UsageError("No --" + string(which) + " flag value specified")
		}
	}
	if which == storageDest && val == "stdout" {
		return nil, nil
	}
	if looksLikePath(val) {
		disk, err := localdisk.New(val)
		if err != nil {
			return nil, fmt.Errorf("Interpreted --%v=%q as a local disk path, but got error: %v", val, err)
		}
		return disk, nil
	}
	cl := client.New(val)
	// TODO(mpl): probably needs the transport setup for trusted certs here.
	cl.SetupAuth()
	cl.SetLogger(c.logger)
	return noHub{cl}, nil
}
Ejemplo n.º 2
0
// killReindex starts a reindexing in a new process, and kills that process
// after killTime. It then (naively for now ?) verifies that the kv store file is
// not corrupted by reinitializing an (possibly incomplete) index (with a corpus)
// with it. If the indexing was completed before we could kill the process, it
// returns true, false otherwise.
func killReindex(b *testing.B, dbfile string, killTime time.Duration,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) bool {
	cmd := exec.Command(os.Args[0], "-test.run=TestChildIndexer")
	cmd.Env = append(cmd.Env, "TEST_BE_CHILD=1", "TEST_BE_CHILD_DBFILE="+dbfile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout
	cmd.Stderr = &stderr
	if err := cmd.Start(); err != nil {
		b.Fatal(err)
	}

	waitc := make(chan error)
	go func() {
		waitc <- cmd.Wait()
	}()
	fullIndex := false
	select {
	case err := <-waitc:
		if err == nil {
			// indexer finished before we killed it
			fullIndex = true
			b.Logf("Finished indexing before being killed at %v", killTime)
			break
		}
		// TODO(mpl): do better
		if err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	case <-time.After(killTime):
		if err := cmd.Process.Kill(); err != nil {
			b.Fatal(err)
		}
		err := <-waitc
		// TODO(mpl): do better
		if err != nil && err.Error() != "signal: killed" {
			b.Fatalf("unexpected (not killed) error from indexer process: %v %v %v", err, stdout.String(), stderr.String())
		}
	}

	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	if _, err := idx.KeepInMemory(); err != nil {
		b.Fatal(err)
	}
	if err := idx.Close(); err != nil {
		b.Fatal(err)
	}
	return fullIndex
}
Ejemplo n.º 3
0
// TODO(rh): tame copy/paste code from cammount
func main() {
	client.AddFlags()
	flag.Parse()
	cacheDir, err := ioutil.TempDir("", "camlicache")
	if err != nil {
		log.Fatalf("Error creating temp cache directory: %v", err)
	}
	defer os.RemoveAll(cacheDir)
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		log.Fatalf("Error setting up local disk cache: %v", err)
	}
	if flag.NArg() != 1 {
		log.Fatal("usage: camwebdav <blobref>")
	}
	br := blobref.Parse(flag.Arg(0))
	if br == nil {
		log.Fatalf("%s was not a valid blobref.", flag.Arg(0))
	}
	client := client.NewOrFail()
	fetcher := cacher.NewCachingFetcher(diskcache, client)

	f = fs.NewCamliFileSystem(fetcher, br)
	http.HandleFunc("/", webdav)
	err = http.ListenAndServe(*davaddr, nil)
	if err != nil {
		log.Fatalf("Error starting WebDAV server: %v", err)
	}
}
Ejemplo n.º 4
0
func reindex(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("reindexing")
	if err := os.RemoveAll(dbfile); err != nil {
		b.Fatal(err)
	}
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)

	b.ResetTimer()
	if err := idx.Reindex(); err != nil {
		b.Fatal(err)
	}
	return idx
}
Ejemplo n.º 5
0
func enumerateMeta(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) int {
	b.Logf("enumerating meta blobs")
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	defer idx.Close()

	ch := make(chan camtypes.BlobMeta, 100)
	go func() {
		if err := idx.EnumerateBlobMeta(nil, ch); err != nil {
			b.Fatal(err)
		}
	}()
	n := 0
	for range ch {
		n++
	}
	b.Logf("Enumerated %d meta blobs", n)
	return n
}
Ejemplo n.º 6
0
func main() {
	// Scans the arg list and sets up flags
	debug := flag.Bool("debug", false, "print debugging messages.")
	client.AddFlags()
	flag.Parse()

	errorf := func(msg string, args ...interface{}) {
		fmt.Fprintf(os.Stderr, msg, args...)
		os.Exit(2)
	}

	if n := flag.NArg(); n < 1 || n > 2 {
		errorf("usage: cammount <mountpoint> [<root-blobref>]\n")
	}

	mountPoint := flag.Arg(0)

	client := client.NewOrFail() // automatic from flags

	cacheDir, err := ioutil.TempDir("", "camlicache")
	if err != nil {
		errorf("Error creating temp cache directory: %v\n", err)
	}
	defer os.RemoveAll(cacheDir)
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		errorf("Error setting up local disk cache: %v", err)
	}
	fetcher := cacher.NewCachingFetcher(diskcache, client)

	var camfs *fs.CamliFileSystem
	if flag.NArg() == 2 {
		root := blobref.Parse(flag.Arg(1))
		if root == nil {
			errorf("Error parsing root blobref: %q\n", root)
		}
		var err error
		camfs, err = fs.NewRootedCamliFileSystem(fetcher, root)
		if err != nil {
			errorf("Error creating root with %v: %v", root, err)
		}
	} else {
		camfs = fs.NewCamliFileSystem(fetcher)
		log.Printf("starting with fs %#v", camfs)
	}

	if *debug {
		// TODO: set fs's logger
	}

	conn, err := fuse.Mount(mountPoint)
	if err != nil {
		log.Fatalf("Mount: %v", err)
	}
	err = conn.Serve(camfs)
	if err != nil {
		log.Fatalf("Serve: %v", err)
	}
	log.Printf("fuse process ending.")
}
Ejemplo n.º 7
0
// which is one of "src", "dest", or "thirdleg"
func (c *syncCmd) storageFromParam(which storageType, val string) (blobserver.Storage, error) {
	var httpClient *http.Client

	if val == "" {
		switch which {
		case storageThird:
			return nil, nil
		case storageSource:
			discl := c.discoClient()
			discl.SetLogger(c.logger)
			src, err := discl.BlobRoot()
			if err != nil {
				return nil, fmt.Errorf("Failed to discover source server's blob path: %v", err)
			}
			val = src
			httpClient = discl.HTTPClient()
		}
		if val == "" {
			return nil, cmdmain.UsageError("No --" + string(which) + " flag value specified")
		}
	}
	if which == storageDest && val == "stdout" {
		return nil, nil
	}
	if looksLikePath(val) {
		disk, err := localdisk.New(val)
		if err != nil {
			return nil, fmt.Errorf("Interpreted --%v=%q as a local disk path, but got error: %v", which, val, err)
		}
		c.oneIsDisk = true
		return disk, nil
	}
	cl := client.New(val)
	cl.InsecureTLS = c.insecureTLS
	if httpClient == nil {
		httpClient = &http.Client{
			Transport: cl.TransportForConfig(nil),
		}
	}
	cl.SetHTTPClient(httpClient)
	if err := cl.SetupAuth(); err != nil {
		return nil, fmt.Errorf("could not setup auth for connecting to %v: %v", val, err)
	}
	cl.SetLogger(c.logger)
	serverKeyID, err := cl.ServerKeyID()
	if err != nil && err != client.ErrNoSigning {
		fmt.Fprintf(cmdmain.Stderr, "Failed to discover keyId for server %v: %v", val, err)
	} else {
		if which == storageSource {
			c.srcKeyID = serverKeyID
		} else if which == storageDest {
			c.destKeyID = serverKeyID
		}
	}
	return cl, nil
}
Ejemplo n.º 8
0
// New returns a new blobserver Storage implementation, storing blobs in the provided dir.
// If dir has an index.kv file, a diskpacked implementation is returnd.
func New(dir string) (blobserver.Storage, error) {
	if v, err := diskpacked.IsDir(dir); err != nil {
		return nil, err
	} else if v {
		return diskpacked.New(dir)
	}
	if v, err := localdisk.IsDir(dir); err != nil {
		return nil, err
	} else if v {
		return localdisk.New(dir)
	}
	return diskpacked.New(dir)
}
Ejemplo n.º 9
0
// NewDiskCache returns a new DiskCache from a StreamingFetcher, which
// is usually the pkg/client HTTP client (which typically has much
// higher latency and lower bandwidth than local disk).
func NewDiskCache(fetcher blob.StreamingFetcher) (*DiskCache, error) {
	// TODO: max disk size, keep LRU of access, smarter cleaning,
	// persistent directory per-user, etc.

	cacheDir, err := ioutil.TempDir("", "camlicache")
	if err != nil {
		return nil, err
	}
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		return nil, err
	}
	dc := &DiskCache{
		CachingFetcher: NewCachingFetcher(diskcache, fetcher),
		Root:           cacheDir,
	}
	return dc, nil
}
Ejemplo n.º 10
0
func TestChildIndexer(t *testing.T) {
	if os.Getenv("TEST_BE_CHILD") != "1" {
		t.Skip("not a real test; used as a child process by the benchmarks")
	}
	dbfile := os.Getenv("TEST_BE_CHILD_DBFILE")
	if dbfile == "" {
		log.Fatal("empty TEST_BE_CHILD_DBFILE")
	}
	if err := os.RemoveAll(dbfile); err != nil {
		log.Fatal(err)
	}
	var kv sorted.KeyValue
	var err error
	switch {
	case strings.HasSuffix(dbfile, "leveldb.db"):
		kv, err = leveldb.NewStorage(dbfile)
	case strings.HasSuffix(dbfile, "kvfile.db"):
		kv, err = kvfile.NewStorage(dbfile)
	case strings.HasSuffix(dbfile, "sqlite.db"):
		kv, err = sqlite.NewStorage(dbfile)
	default:
		log.Fatalf("unknown sorted provider for %v", dbfile)
	}
	if err != nil {
		log.Fatal(err)
	}
	bs, err := localdisk.New(filepath.Join(filepath.Dir(dbfile), "bs"))
	if err != nil {
		log.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		log.Fatal(err)
	}
	idx.InitBlobSource(bs)
	defer func() {
		if err := idx.Close(); err != nil {
			log.Fatal(err)
		}
	}()
	if err := idx.Reindex(); err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 11
0
// which is one of "src", "dest", or "thirdleg"
func (c *syncCmd) storageFromParam(which storageType, val string) (blobserver.Storage, error) {
	var httpClient *http.Client

	if val == "" {
		switch which {
		case storageThird:
			return nil, nil
		case storageSource:
			discl := c.discoClient()
			discl.SetLogger(c.logger)
			src, err := discl.BlobRoot()
			if err != nil {
				return nil, fmt.Errorf("Failed to discover source server's blob path: %v", err)
			}
			val = src
			httpClient = discl.HTTPClient()
		}
		if val == "" {
			return nil, cmdmain.UsageError("No --" + string(which) + " flag value specified")
		}
	}
	if which == storageDest && val == "stdout" {
		return nil, nil
	}
	if looksLikePath(val) {
		disk, err := localdisk.New(val)
		if err != nil {
			return nil, fmt.Errorf("Interpreted --%v=%q as a local disk path, but got error: %v", which, val, err)
		}
		return disk, nil
	}
	cl := client.New(val)
	cl.InsecureTLS = c.insecureTLS
	if httpClient == nil {
		httpClient = &http.Client{
			Transport: cl.TransportForConfig(nil),
		}
	}
	cl.SetHTTPClient(httpClient)
	cl.SetupAuth()
	cl.SetLogger(c.logger)
	return cl, nil
}
Ejemplo n.º 12
0
// dataStores returns the blobserver that stores the instances configurations, and the kv
// store for the instances states.
func dataStores() (blobserver.Storage, sorted.KeyValue, error) {
	dataDir := os.Getenv("CAMLI_GCE_DATA")
	if dataDir == "" {
		dataDir = "camli-gce-data"
		log.Printf("data dir not provided as env var CAMLI_GCE_DATA, so defaulting to %v", dataDir)
	}
	blobsDir := filepath.Join(dataDir, "instance-conf")
	if err := os.MkdirAll(blobsDir, 0700); err != nil {
		return nil, nil, err
	}
	instConf, err := localdisk.New(blobsDir)
	if err != nil {
		return nil, nil, err
	}
	instState, err := leveldb.NewStorage(filepath.Join(dataDir, "instance-state"))
	if err != nil {
		return nil, nil, err
	}
	return instConf, instState, nil
}
Ejemplo n.º 13
0
// NewDiskCache returns a new DiskCache from a Fetcher, which
// is usually the pkg/client HTTP client (which typically has much
// higher latency and lower bandwidth than local disk).
func NewDiskCache(fetcher blob.Fetcher) (*DiskCache, error) {
	cacheDir := filepath.Join(osutil.CacheDir(), "blobs")
	if !osutil.DirExists(cacheDir) {
		if err := os.Mkdir(cacheDir, 0700); err != nil {
			log.Printf("Warning: failed to make %s: %v; using tempdir instead", cacheDir, err)
			cacheDir, err = ioutil.TempDir("", "camlicache")
			if err != nil {
				return nil, err
			}
		}
	}
	// TODO: max disk size, keep LRU of access, smarter cleaning, etc
	// TODO: use diskpacked instead? harder to clean, though.
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		return nil, err
	}
	dc := &DiskCache{
		CachingFetcher: NewCachingFetcher(diskcache, fetcher),
		Root:           cacheDir,
	}
	return dc, nil
}
Ejemplo n.º 14
0
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
	sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
	b.Logf("populating %v", dbfile)
	kv, err := sortedProvider(dbfile)
	if err != nil {
		b.Fatal(err)
	}
	bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
	if err := os.MkdirAll(bsRoot, 0700); err != nil {
		b.Fatal(err)
	}
	dataDir, err := os.Open("testdata")
	if err != nil {
		b.Fatal(err)
	}
	fis, err := dataDir.Readdir(-1)
	if err != nil {
		b.Fatal(err)
	}
	if len(fis) == 0 {
		b.Fatalf("no files in %s dir", "testdata")
	}

	ks := doKeyStuff(b)

	bs, err := localdisk.New(bsRoot)
	if err != nil {
		b.Fatal(err)
	}
	if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
		b.Fatal(err)
	}
	idx, err := index.New(kv)
	if err != nil {
		b.Fatal(err)
	}
	idx.InitBlobSource(bs)
	sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())

	b.ResetTimer()
	for _, v := range fis {
		f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
		if err != nil {
			b.Fatal(err)
		}
		td := &trackDigestReader{r: f}
		fm := schema.NewFileMap(v.Name())
		fm.SetModTime(v.ModTime())
		fileRef, err := schema.WriteFileMap(bs, fm, td)
		if err != nil {
			b.Fatal(err)
		}
		f.Close()

		unsigned := schema.NewPlannedPermanode(td.Sum())
		unsigned.SetSigner(ks.pubKeyRef)
		sr := &jsonsign.SignRequest{
			UnsignedJSON: unsigned.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: time.Unix(0, 0),
		}
		signed, err := sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		pn := blob.SHA1FromString(signed)
		// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
		if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}

		contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
		claimTime, ok := fm.ModTime()
		if !ok {
			b.Fatal(err)
		}
		contentAttr.SetClaimDate(claimTime)
		contentAttr.SetSigner(ks.pubKeyRef)
		sr = &jsonsign.SignRequest{
			UnsignedJSON: contentAttr.Blob().JSON(),
			// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
			Fetcher:       bs,
			EntityFetcher: ks.entityFetcher,
			SignatureTime: claimTime,
		}
		signed, err = sr.Sign()
		if err != nil {
			b.Fatal("problem signing: " + err.Error())
		}
		cl := blob.SHA1FromString(signed)
		if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
			b.Fatal(err)
		}
	}
	sh.IdleWait()

	return idx
}
Ejemplo n.º 15
0
func newPublishHandler(conf *config) *publishHandler {
	cl, err := app.Client()
	if err != nil {
		logger.Fatalf("could not get a client for the publish handler %v", err)
	}
	if conf.RootName == "" {
		logger.Fatal("camliRoot not found in the app configuration")
	}
	maxResizeBytes := conf.MaxResizeBytes
	if maxResizeBytes == 0 {
		maxResizeBytes = constants.DefaultMaxResizeMem
	}
	var CSSFiles, JSDeps []string
	if conf.SourceRoot != "" {
		appRoot := filepath.Join(conf.SourceRoot, "app", "publisher")
		Files = &fileembed.Files{
			DirFallback: appRoot,
		}
		// TODO(mpl): Can I readdir by listing with "/" on Files, even with DirFallBack?
		// Apparently not, but retry later.
		dir, err := os.Open(appRoot)
		if err != nil {
			logger.Fatal(err)
		}
		defer dir.Close()
		names, err := dir.Readdirnames(-1)
		if err != nil {
			logger.Fatal(err)
		}
		for _, v := range names {
			if strings.HasSuffix(v, ".css") {
				CSSFiles = append(CSSFiles, v)
				continue
			}
			// TODO(mpl): document or fix (use a map?) the ordering
			// problem: i.e. jquery.js must be sourced before
			// publisher.js. For now, just cheat by sorting the
			// slice.
			if strings.HasSuffix(v, ".js") {
				JSDeps = append(JSDeps, v)
			}
		}
		sort.Strings(JSDeps)
	} else {
		Files.Listable = true
		dir, err := Files.Open("/")
		if err != nil {
			logger.Fatal(err)
		}
		defer dir.Close()
		fis, err := dir.Readdir(-1)
		if err != nil {
			logger.Fatal(err)
		}
		for _, v := range fis {
			name := v.Name()
			if strings.HasSuffix(name, ".css") {
				CSSFiles = append(CSSFiles, name)
				continue
			}
			if strings.HasSuffix(name, ".js") {
				JSDeps = append(JSDeps, name)
			}
		}
		sort.Strings(JSDeps)
	}
	// TODO(mpl): add all htmls found in Files to the template if none specified?
	if conf.GoTemplate == "" {
		logger.Fatal("a go template is required in the app configuration")
	}
	goTemplate, err := goTemplate(Files, conf.GoTemplate)
	if err != nil {
		logger.Fatal(err)
	}

	var cache blobserver.Storage
	var thumbMeta *server.ThumbMeta
	if conf.CacheRoot != "" {
		cache, err = localdisk.New(conf.CacheRoot)
		if err != nil {
			logger.Fatalf("Could not create localdisk cache: %v", err)
		}
		thumbsCacheDir := filepath.Join(os.TempDir(), "camli-publisher-cache")
		if err := os.MkdirAll(thumbsCacheDir, 0700); err != nil {
			logger.Fatalf("Could not create cache dir %s for %v publisher: %v", thumbsCacheDir, conf.RootName, err)
		}
		kv, err := sorted.NewKeyValue(map[string]interface{}{
			"type": "kv",
			"file": filepath.Join(thumbsCacheDir, conf.RootName+"-thumbnails.kv"),
		})
		if err != nil {
			logger.Fatalf("Could not create kv for %v's thumbs cache: %v", conf.RootName, err)
		}
		thumbMeta = server.NewThumbMeta(kv)
	}

	return &publishHandler{
		rootName:       conf.RootName,
		cl:             cl,
		resizeSem:      syncutil.NewSem(maxResizeBytes),
		staticFiles:    Files,
		goTemplate:     goTemplate,
		CSSFiles:       CSSFiles,
		JSDeps:         JSDeps,
		describedCache: make(map[string]*search.DescribedBlob),
		cache:          cache,
		thumbMeta:      thumbMeta,
	}
}
Ejemplo n.º 16
0
func main() {
	client.AddFlags()
	flag.Parse()

	if *flagGraph && flag.NArg() != 1 {
		log.Fatalf("The --graph option requires exactly one parameter.")
	}

	var cl *client.Client
	var items []*blobref.BlobRef

	if *flagShared != "" {
		if client.ExplicitServer() != "" {
			log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.")
		}
		if flag.NArg() != 0 {
			log.Fatal("No arguments permitted when using --shared")
		}
		cl1, target, err := client.NewFromShareRoot(*flagShared)
		if err != nil {
			log.Fatal(err)
		}
		cl = cl1
		items = append(items, target)
	} else {
		cl = client.NewOrFail()
		for n := 0; n < flag.NArg(); n++ {
			arg := flag.Arg(n)
			br := blobref.Parse(arg)
			if br == nil {
				log.Fatalf("Failed to parse argument %q as a blobref.", arg)
			}
			items = append(items, br)
		}
	}

	httpStats := &httputil.StatsTransport{
		VerboseLog: *flagHTTP,
	}
	if *flagHTTP {
		httpStats.Transport = &http.Transport{
			Dial: func(net_, addr string) (net.Conn, error) {
				log.Printf("Dialing %s", addr)
				return net.Dial(net_, addr)
			},
		}
	}
	cl.SetHTTPClient(&http.Client{Transport: httpStats})

	// Put a local disk cache in front of the HTTP client.
	// TODO: this could be better about proactively cleaning things.
	// Fetching 2 TB shouldn't write 2 TB to /tmp before it's done.
	// Maybe the cache needs an LRU/size cap.
	cacheDir, err := ioutil.TempDir("", "camlicache")
	if err != nil {
		log.Fatalf("Error creating temp cache directory: %v\n", err)
	}
	defer os.RemoveAll(cacheDir)
	diskcache, err := localdisk.New(cacheDir)
	if err != nil {
		log.Fatalf("Error setting up local disk cache: %v", err)
	}
	if *flagVerbose {
		log.Printf("Using temp blob cache directory %s", cacheDir)
	}
	fetcher := cacher.NewCachingFetcher(diskcache, cl)

	for _, br := range items {
		if *flagGraph {
			printGraph(fetcher, br)
			return
		}
		if *flagCheck {
			// TODO: do HEAD requests checking if the blobs exists.
			log.Fatal("not implemented")
			return
		}
		if *flagOutput == "-" {
			var rc io.ReadCloser
			var err error
			if *flagContents {
				seekFetcher := blobref.SeekerFromStreamingFetcher(fetcher)
				rc, err = schema.NewFileReader(seekFetcher, br)
				if err == nil {
					rc.(*schema.FileReader).LoadAllChunks()
				}
			} else {
				rc, err = fetch(fetcher, br)
			}
			if err != nil {
				log.Fatal(err)
			}
			defer rc.Close()
			if _, err := io.Copy(os.Stdout, rc); err != nil {
				log.Fatalf("Failed reading %q: %v", br, err)
			}
		} else {
			if err := smartFetch(fetcher, *flagOutput, br); err != nil {
				log.Fatal(err)
			}
		}
	}

	if *flagVerbose {
		log.Printf("HTTP requests: %d\n", httpStats.Requests())
	}
}