func (c *archiveRun) main(a subcommands.Application, args []string) error { out := os.Stdout prefix := "\n" if c.defaultFlags.Quiet { out = nil prefix = "" } start := time.Now() arch := archiver.New(isolatedclient.New(c.isolatedFlags.ServerURL, c.isolatedFlags.Namespace), out) common.CancelOnCtrlC(arch) future := isolate.Archive(arch, &c.ArchiveOptions) future.WaitForHashed() var err error if err = future.Error(); err != nil { fmt.Printf("%s%s %s\n", prefix, filepath.Base(c.Isolate), err) } else { fmt.Printf("%s%s %s\n", prefix, future.Digest(), filepath.Base(c.Isolate)) } if err2 := arch.Close(); err == nil { err = err2 } if !c.defaultFlags.Quiet { duration := time.Since(start) stats := arch.Stats() fmt.Fprintf(os.Stderr, "Hits : %5d (%s)\n", stats.TotalHits(), stats.TotalBytesHits()) fmt.Fprintf(os.Stderr, "Misses : %5d (%s)\n", stats.TotalMisses(), stats.TotalBytesPushed()) fmt.Fprintf(os.Stderr, "Duration: %s\n", common.Round(duration, time.Millisecond)) } return err }
func (p *progress) printStep() (io.Writer, string) { if p.out == nil || !p.valueChanged { return p.out, "" } p.valueChanged = false // Zap resolution at .1s level. We're slow anyway. duration := common.Round(time.Since(p.start), 100*time.Millisecond) return p.out, fmt.Sprintf("%s %s%s", renderValues(p.columns), duration, p.EOL) }
func (c *batchArchiveRun) main(a subcommands.Application, args []string) error { out := os.Stdout prefix := "\n" if c.defaultFlags.Quiet { out = nil prefix = "" } start := time.Now() arch := archiver.New(isolatedclient.New(c.isolatedFlags.ServerURL, c.isolatedFlags.Namespace), out) common.CancelOnCtrlC(arch) type tmp struct { name string future archiver.Future } items := make(chan *tmp, len(args)) var wg sync.WaitGroup for _, arg := range args { wg.Add(1) go func(genJsonPath string) { defer wg.Done() data := &struct { Args []string Dir string Version int }{} if err := common.ReadJSONFile(genJsonPath, data); err != nil { arch.Cancel(err) return } if data.Version != isolate.IsolatedGenJSONVersion { arch.Cancel(fmt.Errorf("invalid version %d in %s", data.Version, genJsonPath)) return } if !common.IsDirectory(data.Dir) { arch.Cancel(fmt.Errorf("invalid dir %s in %s", data.Dir, genJsonPath)) return } opts, err := parseArchiveCMD(data.Args, data.Dir) if err != nil { arch.Cancel(fmt.Errorf("invalid archive command in %s: %s", genJsonPath, err)) return } name := filepath.Base(opts.Isolated) // Strip the extension if there is one. if dotIndex := strings.LastIndex(name, "."); dotIndex != -1 { name = name[0:dotIndex] } items <- &tmp{name, isolate.Archive(arch, opts)} }(arg) } go func() { wg.Wait() close(items) }() data := map[string]isolated.HexDigest{} for item := range items { item.future.WaitForHashed() if item.future.Error() == nil { data[item.name] = item.future.Digest() fmt.Printf("%s%s %s\n", prefix, item.future.Digest(), item.name) } else { fmt.Fprintf(os.Stderr, "%s%s %s\n", prefix, item.name, item.future.Error()) } } err := arch.Close() duration := time.Since(start) // Only write the file once upload is confirmed. if err == nil && c.dumpJSON != "" { err = common.WriteJSONFile(c.dumpJSON, data) } if !c.defaultFlags.Quiet { stats := arch.Stats() fmt.Fprintf(os.Stderr, "Hits : %5d (%s)\n", stats.TotalHits(), stats.TotalBytesHits()) fmt.Fprintf(os.Stderr, "Misses : %5d (%s)\n", stats.TotalMisses(), stats.TotalBytesPushed()) fmt.Fprintf(os.Stderr, "Duration: %s\n", common.Round(duration, time.Millisecond)) } return err }