func init() { cmdmain.RegisterCommand("file", func(flags *flag.FlagSet) cmdmain.CommandRunner { cmd := new(fileCmd) flags.BoolVar(&cmd.makePermanode, "permanode", false, "Create an associate a new permanode for the uploaded file or directory.") flags.BoolVar(&cmd.filePermanodes, "filenodes", false, "Create (if necessary) content-based permanodes for each uploaded file.") flags.BoolVar(&cmd.vivify, "vivify", false, "If true, ask the server to create and sign permanode(s) associated with each uploaded"+ " file. This permits the server to have your signing key. Used mostly with untrusted"+ " or at-risk clients, such as phones.") flags.BoolVar(&cmd.exifTime, "exiftime", false, "Try to use metadata (such as EXIF) to get a stable creation time. If found, used as the replacement for the modtime. Mainly useful with vivify or filenodes.") flags.StringVar(&cmd.title, "title", "", "Optional title attribute to set on permanode when using -permanode.") flags.StringVar(&cmd.tag, "tag", "", "Optional tag(s) to set on permanode when using -permanode or -filenodes. Single value or comma separated.") flags.BoolVar(&cmd.diskUsage, "du", false, "Dry run mode: only show disk usage information, without upload or statting dest. Used for testing skipDirs configs, mostly.") if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { flags.BoolVar(&cmd.statcache, "statcache", true, "Use the stat cache, assuming unchanged files already uploaded in the past are still there. Fast, but potentially dangerous.") flags.BoolVar(&cmd.havecache, "havecache", true, "Use the 'have cache', a cache keeping track of what blobs the remote server should already have from previous uploads.") flags.BoolVar(&cmd.memstats, "debug-memstats", false, "Enter debug in-memory mode; collecting stats only. Doesn't upload anything.") flags.StringVar(&cmd.histo, "debug-histogram-file", "", "Optional file to create and write the blob size for each file uploaded. For use with GNU R and hist(read.table(\"filename\")$V1). Requires debug-memstats.") flags.BoolVar(&flagUseSQLiteChildCache, "sqlitecache", false, "Use sqlite for the statcache and havecache instead of a flat cache.") } else { cmd.havecache = true cmd.statcache = true } if client.AndroidOutput() { flags.BoolVar(&cmd.argsFromInput, "stdinargs", false, "If true, filenames to upload are sent one-per-line on stdin. EOF means to quit the process with exit status 0.") } flagCacheLog = flags.Bool("logcache", false, "log caching details") return cmd }) }
// statReceiver returns the StatReceiver used for checking for and uploading blobs. // // The optional provided node is only used for conditionally printing out status info to stdout. func (up *Uploader) statReceiver(n *node) blobserver.StatReceiver { statReceiver := up.altStatReceiver if statReceiver == nil { // TODO(mpl): simplify the altStatReceiver situation as well, // see TODO in cmd/camput/uploader.go statReceiver = up.Client } if client.AndroidOutput() && n != nil && n.fi.Mode()&os.ModeType == 0 { return client.AndroidStatusReceiver{Sr: statReceiver, Path: n.fullPath} } return statReceiver }
func newUploader() *Uploader { cc := client.NewOrFail() if !*cmdmain.FlagVerbose { cc.SetLogger(nil) } var transport http.RoundTripper proxy := http.ProxyFromEnvironment if flagProxyLocal { proxy = proxyFromEnvironment } tlsConfig, err := cc.TLSConfig() if err != nil { log.Fatalf("Error while configuring TLS for client: %v", err) } transport = &http.Transport{ Dial: cc.DialFunc(), TLSClientConfig: tlsConfig, Proxy: proxy, } httpStats := &httputil.StatsTransport{ VerboseLog: *flagHTTP, Transport: transport, } transport = httpStats if client.AndroidOutput() { transport = client.AndroidStatsTransport{transport} } cc.SetHTTPClient(&http.Client{Transport: transport}) pwd, err := os.Getwd() if err != nil { log.Fatalf("os.Getwd: %v", err) } return &Uploader{ Client: cc, transport: httpStats, pwd: pwd, entityFetcher: &jsonsign.CachingEntityFetcher{ Fetcher: &jsonsign.FileEntityFetcher{File: cc.SecretRingFile()}, }, } }
func (t *TreeUpload) run() { defer close(t.donec) // Kick off scanning all files, eventually learning the root // node (which references all its children). var root *node // nil until received and set in loop below. rootc := make(chan *node, 1) if !t.rootless { go func() { n, err := t.statPath(t.base, nil) if err != nil { log.Fatalf("Error scanning files under %s: %v", t.base, err) } close(t.stattedc) rootc <- n }() } var lastStat, lastUpload string dumpStats := func() { if client.AndroidOutput() { printAndroidCamputStatus(t) return } statStatus := "" if root == nil { statStatus = fmt.Sprintf("last stat: %s", lastStat) } blobStats := t.up.Stats() log.Printf("FILES: Total: %+v Skipped: %+v Uploaded: %+v %s BLOBS: %s Digested: %d last upload: %s", t.total, t.skipped, t.uploaded, statStatus, blobStats.String(), atomic.LoadInt64(&atomicDigestOps), lastUpload) } // Channels for stats & progress bars. These are never closed: uploadedc := make(chan *node) // at least tried to upload; server might have had blob skippedc := make(chan *node) // didn't even hit blobserver; trusted our stat cache uploadsdonec := make(chan bool) var upload chan<- *node withPermanode := t.up.fileOpts.wantFilePermanode() if t.DiskUsageMode { upload = NewNodeWorker(1, func(n *node, ok bool) { if !ok { uploadsdonec <- true return } if n.fi.IsDir() { fmt.Printf("%d\t%s\n", n.SumBytes()>>10, n.fullPath) } }) } else { upload = NewNodeWorker(uploadWorkers, func(n *node, ok bool) { if !ok { log.Printf("done with all uploads.") uploadsdonec <- true return } put, err := t.up.uploadNode(n) if err != nil { log.Fatalf("Error uploading %s: %v", n.fullPath, err) } n.SetPutResult(put, nil) if c := t.up.statCache; c != nil && !n.fi.IsDir() { c.AddCachedPutResult( t.up.pwd, n.fullPath, n.fi, put, withPermanode) } uploadedc <- n }) } checkStatCache := NewNodeWorker(10, func(n *node, ok bool) { if !ok { if t.up.statCache != nil { log.Printf("done checking stat cache") } close(upload) return } if t.DiskUsageMode || t.up.statCache == nil { upload <- n return } if !n.fi.IsDir() { cachedRes, err := t.up.statCache.CachedPutResult( t.up.pwd, n.fullPath, n.fi, withPermanode) if err == nil { n.SetPutResult(cachedRes, nil) cachelog.Printf("Cache HIT on %q -> %v", n.fullPath, cachedRes) client.NoteFileUploaded(n.fullPath, false) skippedc <- n return } } upload <- n }) ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop() stattedc := t.stattedc Loop: for { select { case <-uploadsdonec: break Loop case n := <-rootc: root = n case n := <-uploadedc: t.uploaded.incr(n) lastUpload = n.fullPath case n := <-skippedc: t.skipped.incr(n) case n, ok := <-stattedc: if !ok { log.Printf("done stattting:") dumpStats() close(checkStatCache) stattedc = nil continue } lastStat = n.fullPath t.total.incr(n) checkStatCache <- n case <-ticker.C: dumpStats() } } log.Printf("tree upload finished. final stats:") dumpStats() if root == nil { panic("unexpected nil root node") } var err error log.Printf("Waiting on root node %q", root.fullPath) t.finalPutRes, err = root.PutResult() log.Printf("Waited on root node %q: %v", root.fullPath, t.finalPutRes) if err != nil { t.err = err } }