コード例 #1
0
func main() {
	app := cli.NewApp()
	app.Email = "*****@*****.**"
	app.Name = "ipfs-bench"
	app.Action = func(context *cli.Context) error {
		verbose = context.Bool("verbose")
		noclean = context.Bool("noclean")
		params := &MultinodeParams{
			NumNodes: context.Int("numnodes"),
			FileSize: context.Int("filesize"),
			Net: &cn.LinkSettings{
				Latency: context.Int("latency"),
			},
		}
		res, err := RunMultinode(params)
		if err != nil {
			perr("error running tests: %s", err)
			return nil
		}

		for _, f := range res.FetchStats {
			hb := hum.IBytes(uint64(f.BW))
			fmt.Printf("Took: %s, Average BW: %s, TX Size: %d, DupBlocks: %d\n", f.Duration, hb, f.Total, f.DupBlocks)
		}
		fmt.Println(hum.IBytes(uint64(res.AverageBandwidth())))
		return nil
	}

	app.Flags = []cli.Flag{
		cli.IntFlag{
			Name:  "filesize",
			Usage: "specify size of file to transfer",
			Value: 1000000,
		},
		cli.IntFlag{
			Name:  "numnodes",
			Usage: "number of nodes to run test with",
			Value: 5,
		},
		cli.IntFlag{
			Name:  "latency",
			Usage: "set per-link latency",
			Value: 0,
		},
		cli.BoolFlag{
			Name:  "verbose",
			Usage: "print verbose logging info",
		},
		cli.BoolFlag{
			Name:  "noclean",
			Usage: "do not clean up docker nodes after test",
		},
	}
	app.Run(os.Args)
}
コード例 #2
0
// Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string {
	msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Drive Capacity:"),
		humanize.IBytes(uint64(storageInfo.Free)),
		humanize.IBytes(uint64(storageInfo.Total)))
	if storageInfo.Backend.Type == XL {
		diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
		if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 {
			diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures)
		}
		msg += colorBlue("\nStatus:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
	}
	return msg
}
コード例 #3
0
ファイル: collect.go プロジェクト: codeskyblue/gopsutil
func collectMem() (*Data, error) {
	v, err := mem.VirtualMemory()
	return &Data{
		Name: "mem",
		Data: map[string]interface{}{
			"total": v.Total,
			"free":  v.Free,
			"used":  v.Used,
			"summary": fmt.Sprintf("Total: %v, Free %v, Used %v",
				humanize.IBytes(v.Total), humanize.IBytes(v.Free), humanize.IBytes(v.Used)),
		},
	}, err
}
コード例 #4
0
// Constructs a formatted heal message, when cluster is found to be in state where it requires healing.
// healing is optional, server continues to initialize object layer after printing this message.
// it is upto the end user to perform a heal if needed.
func getHealMsg(endpoints []*url.URL, storageDisks []StorageAPI) string {
	msg := fmt.Sprintln("\nData volume requires HEALING. Healing is not implemented yet stay tuned:")
	// FIXME:  Enable this after we bring in healing.
	//	msg += "MINIO_ACCESS_KEY=%s "
	//	msg += "MINIO_SECRET_KEY=%s "
	//	msg += "minio control heal %s"
	//	creds := serverConfig.GetCredential()
	//	msg = fmt.Sprintf(msg, creds.AccessKeyID, creds.SecretAccessKey, getHealEndpoint(isSSL(), endpoints[0]))
	disksInfo, _, _ := getDisksInfo(storageDisks)
	for i, info := range disksInfo {
		if storageDisks[i] == nil {
			continue
		}
		msg += fmt.Sprintf(
			"\n[%s] %s - %s %s",
			formatInts(i+1, len(storageDisks)),
			endpoints[i],
			humanize.IBytes(uint64(info.Total)),
			func() string {
				if info.Total > 0 {
					return "online"
				}
				return "offline"
			}(),
		)
	}
	return msg
}
コード例 #5
0
ファイル: main.go プロジェクト: whyrusleeping/repobench
func benchAddSize(n *core.IpfsNode, cfg *BenchCfg, size int64) error {
	f := func(b *testing.B) {
		b.SetBytes(size)
		for i := 0; i < b.N; i++ {
			r := io.LimitReader(randbo.New(), size)
			spl := chunk.NewSizeSplitter(r, cfg.Blocksize)
			_, err := importer.BuildDagFromReader(n.DAG, spl, nil)
			if err != nil {
				fmt.Printf("ERRROR: ", err)
				b.Fatal(err)
			}
		}
	}

	br := testing.Benchmark(f)
	bs := humanize.IBytes(uint64(size))
	fmt.Printf("Add File (%s):\t%s\n", bs, br)

	err := cr.GarbageCollect(n, context.Background())
	if err != nil {
		return err
	}

	return nil
}
コード例 #6
0
ファイル: wounds.go プロジェクト: itchio/wharf
// PrettyString returns a human-readable English string for a given wound
func (w *Wound) PrettyString(container *tlc.Container) string {
	switch w.Kind {
	case WoundKind_DIR:
		dir := container.Dirs[w.Index]
		return fmt.Sprintf("directory wound (%s should exist)", dir.Path)
	case WoundKind_SYMLINK:
		symlink := container.Symlinks[w.Index]
		return fmt.Sprintf("symlink wound (%s should point to %s)", symlink.Path, symlink.Dest)
	case WoundKind_FILE:
		file := container.Files[w.Index]
		woundSize := humanize.IBytes(uint64(w.End - w.Start))
		offset := humanize.IBytes(uint64(w.Start))
		return fmt.Sprintf("~%s wound %s into %s", woundSize, offset, file.Path)
	default:
		return fmt.Sprintf("unknown wound (%d)", w.Kind)
	}
}
コード例 #7
0
ファイル: watch-main.go プロジェクト: balamurugana/mc
func (u watchMessage) String() string {
	msg := console.Colorize("Time", fmt.Sprintf("[%s] ", u.Event.Time))
	if u.Event.Type == EventCreate {
		msg += console.Colorize("Size", fmt.Sprintf("%6s ", humanize.IBytes(uint64(u.Event.Size))))
	} else {
		msg += fmt.Sprintf("%6s ", "")
	}
	msg += console.Colorize("EventType", fmt.Sprintf("%s ", u.Event.Type))
	msg += console.Colorize("ObjectName", fmt.Sprintf("%s", u.Event.Path))
	return msg
}
コード例 #8
0
ファイル: fs.go プロジェクト: itchio/butler
func sizeof(path string) {
	totalSize := int64(0)

	inc := func(_ string, f os.FileInfo, err error) error {
		if err != nil {
			return nil
		}
		totalSize += f.Size()
		return nil
	}

	filepath.Walk(path, inc)
	comm.Logf("Total size of %s: %s", path, humanize.IBytes(uint64(totalSize)))
	comm.Result(totalSize)
}
コード例 #9
0
ファイル: main.go プロジェクト: whyrusleeping/repobench
func benchDiskWriteSize(dir string, size int64) error {
	benchdir := path.Join(dir, fmt.Sprintf("benchfiles-%d", size))
	err := os.Mkdir(benchdir, 0777)
	if err != nil {
		return err
	}

	n := 0
	f := func(b *testing.B) {
		b.SetBytes(size)
		r := randbo.New()
		for i := 0; i < b.N; i++ {
			n++
			fi, err := os.Create(path.Join(dir, fmt.Sprint(n)))
			if err != nil {
				fmt.Println(err)
				b.Fatal(err)
			}

			_, err = io.CopyN(fi, r, size)
			if err != nil {
				fi.Close()
				fmt.Println(err)
				b.Fatal(err)
			}
			fi.Close()
		}
	}

	br := testing.Benchmark(f)
	bs := humanize.IBytes(uint64(size))
	fmt.Printf("DiskWrite (%s):\t%s\n", bs, br)

	err = os.RemoveAll(benchdir)
	if err != nil {
		return err
	}
	return nil
}
コード例 #10
0
// Constructs a formatted regular message when we have sufficient disks to start the cluster.
func getStorageInitMsg(titleMsg string, endpoints []*url.URL, storageDisks []StorageAPI) string {
	msg := colorBlue(titleMsg)
	disksInfo, _, _ := getDisksInfo(storageDisks)
	for i, info := range disksInfo {
		if storageDisks[i] == nil {
			continue
		}
		msg += fmt.Sprintf(
			"\n[%s] %s - %s %s",
			formatInts(i+1, len(storageDisks)),
			endpoints[i],
			humanize.IBytes(uint64(info.Total)),
			func() string {
				if info.Total > 0 {
					return "online"
				}
				return "offline"
			}(),
		)
	}
	return msg
}
コード例 #11
0
ファイル: diff.go プロジェクト: itchio/wharf
// Do computes the difference between old and new, according to the bsdiff
// algorithm, and writes the result to patch.
func (ctx *DiffContext) Do(old, new io.Reader, writeMessage WriteMessageFunc, consumer *state.Consumer) error {
	var memstats *runtime.MemStats

	if ctx.MeasureMem {
		memstats = &runtime.MemStats{}
		runtime.ReadMemStats(memstats)
		consumer.Debugf("Allocated bytes at start of bsdiff: %s (%s total)", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))
	}

	if ctx.db == nil {
		ctx.db = make([]byte, MaxMessageSize)
	}
	if ctx.eb == nil {
		ctx.eb = make([]byte, MaxMessageSize)
	}

	obuf, err := ioutil.ReadAll(old)
	if err != nil {
		return err
	}
	if int64(len(obuf)) > MaxFileSize {
		return fmt.Errorf("bsdiff: old file too large (%s > %s)", humanize.IBytes(uint64(len(obuf))), humanize.IBytes(uint64(MaxFileSize)))
	}
	obuflen := int32(len(obuf))

	nbuf, err := ioutil.ReadAll(new)
	if err != nil {
		return err
	}
	if int64(len(nbuf)) > MaxFileSize {
		// TODO: provide a different (int64) codepath for >=2GB files
		return fmt.Errorf("bsdiff: new file too large (%s > %s)", humanize.IBytes(uint64(len(nbuf))), humanize.IBytes(uint64(MaxFileSize)))
	}
	nbuflen := int32(len(nbuf))

	if ctx.MeasureMem {
		runtime.ReadMemStats(memstats)
		consumer.Debugf("Allocated bytes after ReadAll: %s (%s total)", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))
	}

	var lenf int32
	startTime := time.Now()

	I := qsufsort(obuf, ctx, consumer)

	duration := time.Since(startTime)
	consumer.Debugf("Suffix sorting done in %s", duration)

	if ctx.MeasureMem {
		runtime.ReadMemStats(memstats)
		consumer.Debugf("Allocated bytes after qsufsort: %s (%s total)", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))
	}

	// FIXME: the streaming format allows us to allocate less than that
	db := make([]byte, len(nbuf))
	eb := make([]byte, len(nbuf))

	bsdc := &Control{}

	consumer.ProgressLabel("Scanning...")

	// Compute the differences, writing ctrl as we go
	var scan, pos, length int32
	var lastscan, lastpos, lastoffset int32
	for scan < nbuflen {
		var oldscore int32
		scan += length

		progress := float64(scan) / float64(nbuflen)
		consumer.Progress(progress)

		for scsc := scan; scan < nbuflen; scan++ {
			pos, length = search(I, obuf, nbuf[scan:], 0, obuflen)

			for ; scsc < scan+length; scsc++ {
				if scsc+lastoffset < obuflen &&
					obuf[scsc+lastoffset] == nbuf[scsc] {
					oldscore++
				}
			}

			if (length == oldscore && length != 0) || length > oldscore+8 {
				break
			}

			if scan+lastoffset < obuflen && obuf[scan+lastoffset] == nbuf[scan] {
				oldscore--
			}
		}

		if length != oldscore || scan == nbuflen {
			var s, Sf int32
			lenf = 0
			for i := int32(0); lastscan+i < scan && lastpos+i < obuflen; {
				if obuf[lastpos+i] == nbuf[lastscan+i] {
					s++
				}
				i++
				if s*2-i > Sf*2-lenf {
					Sf = s
					lenf = i
				}
			}

			lenb := int32(0)
			if scan < nbuflen {
				var s, Sb int32
				for i := int32(1); (scan >= lastscan+i) && (pos >= i); i++ {
					if obuf[pos-i] == nbuf[scan-i] {
						s++
					}
					if s*2-i > Sb*2-lenb {
						Sb = s
						lenb = i
					}
				}
			}

			if lastscan+lenf > scan-lenb {
				overlap := (lastscan + lenf) - (scan - lenb)
				s := int32(0)
				Ss := int32(0)
				lens := int32(0)
				for i := int32(0); i < overlap; i++ {
					if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
						s++
					}
					if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
						s--
					}
					if s > Ss {
						Ss = s
						lens = i + 1
					}
				}

				lenf += lens - overlap
				lenb -= lens
			}

			for i := int32(0); i < lenf; i++ {
				db[i] = nbuf[lastscan+i] - obuf[lastpos+i]
			}
			for i := int32(0); i < (scan-lenb)-(lastscan+lenf); i++ {
				eb[i] = nbuf[lastscan+lenf+i]
			}

			bsdc.Add = db[:lenf]
			bsdc.Copy = eb[:(scan-lenb)-(lastscan+lenf)]
			bsdc.Seek = int64((pos - lenb) - (lastpos + lenf))

			err := writeMessage(bsdc)
			if err != nil {
				return err
			}

			lastscan = scan - lenb
			lastpos = pos - lenb
			lastoffset = pos - scan
		}
	}

	if ctx.MeasureMem {
		runtime.ReadMemStats(memstats)
		consumer.Debugf("Allocated bytes after scan: %s (%s total)", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))
	}

	bsdc.Reset()
	bsdc.Eof = true
	err = writeMessage(bsdc)
	if err != nil {
		return err
	}

	return nil
}
コード例 #12
0
ファイル: probe.go プロジェクト: itchio/butler
func doProbe(patch string) error {
	patchReader, err := eos.Open(patch)
	if err != nil {
		return err
	}

	defer patchReader.Close()

	stats, err := patchReader.Stat()
	if err != nil {
		return err
	}

	comm.Statf("patch:  %s", humanize.IBytes(uint64(stats.Size())))

	rctx := wire.NewReadContext(patchReader)
	err = rctx.ExpectMagic(pwr.PatchMagic)
	if err != nil {
		return err
	}

	header := &pwr.PatchHeader{}
	err = rctx.ReadMessage(header)
	if err != nil {
		return err
	}

	rctx, err = pwr.DecompressWire(rctx, header.Compression)
	if err != nil {
		return err
	}

	target := &tlc.Container{}
	err = rctx.ReadMessage(target)
	if err != nil {
		return err
	}

	source := &tlc.Container{}
	err = rctx.ReadMessage(source)
	if err != nil {
		return err
	}

	comm.Statf("target: %s in %s", humanize.IBytes(uint64(target.Size)), target.Stats())
	comm.Statf("source: %s in %s", humanize.IBytes(uint64(target.Size)), source.Stats())

	var patchStats []patchStat

	sh := &pwr.SyncHeader{}
	rop := &pwr.SyncOp{}

	for fileIndex, f := range source.Files {
		stat := patchStat{
			fileIndex: int64(fileIndex),
			freshData: f.Size,
		}

		sh.Reset()
		err = rctx.ReadMessage(sh)
		if err != nil {
			return err
		}

		if sh.FileIndex != int64(fileIndex) {
			return fmt.Errorf("malformed patch: expected file %d, got %d", fileIndex, sh.FileIndex)
		}

		readingOps := true

		var pos int64

		for readingOps {
			rop.Reset()

			err = rctx.ReadMessage(rop)
			if err != nil {
				return err
			}

			switch rop.Type {
			case pwr.SyncOp_BLOCK_RANGE:
				fixedSize := (rop.BlockSpan - 1) * pwr.BlockSize
				lastIndex := rop.BlockIndex + (rop.BlockSpan - 1)
				lastSize := pwr.ComputeBlockSize(f.Size, lastIndex)
				totalSize := (fixedSize + lastSize)
				stat.freshData -= totalSize
				pos += totalSize
			case pwr.SyncOp_DATA:
				totalSize := int64(len(rop.Data))
				if *appArgs.verbose {
					comm.Debugf("%s fresh data at %s (%d-%d)", humanize.IBytes(uint64(totalSize)), humanize.IBytes(uint64(pos)),
						pos, pos+totalSize)
				}
				pos += totalSize
			case pwr.SyncOp_HEY_YOU_DID_IT:
				readingOps = false
			}
		}

		patchStats = append(patchStats, stat)
	}

	sort.Sort(byDecreasingFreshData(patchStats))

	var totalFresh int64
	for _, stat := range patchStats {
		totalFresh += stat.freshData
	}

	var eightyFresh = int64(0.8 * float64(totalFresh))
	var printedFresh int64

	comm.Opf("80%% of fresh data is in the following files:")

	for _, stat := range patchStats {
		f := source.Files[stat.fileIndex]
		comm.Logf("%s in %s (%.2f%% changed)",
			humanize.IBytes(uint64(stat.freshData)),
			f.Path,
			float64(stat.freshData)/float64(f.Size)*100.0)

		printedFresh += stat.freshData
		if printedFresh >= eightyFresh {
			break
		}
	}

	return nil
}
コード例 #13
0
ファイル: cp.go プロジェクト: itchio/butler
func doCp(srcPath string, destPath string, resume bool) error {
	src, err := eos.Open(srcPath)
	if err != nil {
		return err
	}

	defer src.Close()

	dir := filepath.Dir(destPath)
	err = os.MkdirAll(dir, 0755)
	if err != nil {
		return err
	}

	flags := os.O_CREATE | os.O_WRONLY

	dest, err := os.OpenFile(destPath, flags, 0644)
	if err != nil {
		return err
	}

	defer dest.Close()

	stats, err := src.Stat()
	if err != nil {
		return err
	}

	totalBytes := int64(stats.Size())
	startOffset := int64(0)

	if resume {
		startOffset, err = dest.Seek(0, os.SEEK_END)
		if err != nil {
			return err
		}

		if startOffset == 0 {
			comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes)))
		} else if startOffset > totalBytes {
			comm.Logf("Existing data too big (%s > %s), starting over", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes)))
		} else if startOffset == totalBytes {
			comm.Logf("All %s already there", humanize.IBytes(uint64(totalBytes)))
			return nil
		}

		comm.Logf("Resuming at %s / %s", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes)))

		_, err = src.Seek(startOffset, os.SEEK_SET)
		if err != nil {
			return err
		}
	} else {
		comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes)))
	}

	start := time.Now()

	comm.Progress(float64(startOffset) / float64(totalBytes))
	comm.StartProgressWithTotalBytes(totalBytes)

	cw := counter.NewWriterCallback(func(count int64) {
		alpha := float64(startOffset+count) / float64(totalBytes)
		comm.Progress(alpha)
	}, dest)

	copiedBytes, err := io.Copy(cw, src)
	if err != nil {
		return err
	}
	comm.EndProgress()

	totalDuration := time.Since(start)
	prettyStartOffset := humanize.IBytes(uint64(startOffset))
	prettySize := humanize.IBytes(uint64(copiedBytes))
	perSecond := humanize.IBytes(uint64(float64(totalBytes-startOffset) / totalDuration.Seconds()))
	comm.Statf("%s + %s copied @ %s/s\n", prettyStartOffset, prettySize, perSecond)

	return nil
}
コード例 #14
0
ファイル: fs.go プロジェクト: PagerDuty/nomad
func (f *FSCommand) Run(args []string) int {
	var verbose, machine, job, stat, tail, follow bool
	var numLines, numBytes int64

	flags := f.Meta.FlagSet("fs", FlagSetClient)
	flags.Usage = func() { f.Ui.Output(f.Help()) }
	flags.BoolVar(&verbose, "verbose", false, "")
	flags.BoolVar(&machine, "H", false, "")
	flags.BoolVar(&job, "job", false, "")
	flags.BoolVar(&stat, "stat", false, "")
	flags.BoolVar(&follow, "f", false, "")
	flags.BoolVar(&tail, "tail", false, "")
	flags.Int64Var(&numLines, "n", -1, "")
	flags.Int64Var(&numBytes, "c", -1, "")

	if err := flags.Parse(args); err != nil {
		return 1
	}
	args = flags.Args()

	if len(args) < 1 {
		if job {
			f.Ui.Error("job ID is required")
		} else {
			f.Ui.Error("allocation ID is required")
		}
		return 1
	}

	if len(args) > 2 {
		f.Ui.Error(f.Help())
		return 1
	}

	path := "/"
	if len(args) == 2 {
		path = args[1]
	}

	client, err := f.Meta.Client()
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
		return 1
	}

	// If -job is specified, use random allocation, otherwise use provided allocation
	allocID := args[0]
	if job {
		allocID, err = getRandomJobAlloc(client, args[0])
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
			return 1
		}
	}

	// Truncate the id unless full length is requested
	length := shortId
	if verbose {
		length = fullId
	}
	// Query the allocation info
	if len(allocID) == 1 {
		f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
		return 1
	}
	if len(allocID)%2 == 1 {
		// Identifiers must be of even length, so we strip off the last byte
		// to provide a consistent user experience.
		allocID = allocID[:len(allocID)-1]
	}

	allocs, _, err := client.Allocations().PrefixList(allocID)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
		return 1
	}
	if len(allocs) == 0 {
		f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
		return 1
	}
	if len(allocs) > 1 {
		// Format the allocs
		out := make([]string, len(allocs)+1)
		out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
		for i, alloc := range allocs {
			out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
				limit(alloc.ID, length),
				limit(alloc.EvalID, length),
				alloc.JobID,
				alloc.TaskGroup,
				alloc.DesiredStatus,
				alloc.ClientStatus,
			)
		}
		f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
		return 0
	}
	// Prefix lookup matched a single allocation
	alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
		return 1
	}

	// Get file stat info
	file, _, err := client.AllocFS().Stat(alloc, path, nil)
	if err != nil {
		f.Ui.Error(err.Error())
		return 1
	}

	// If we want file stats, print those and exit.
	if stat {
		// Display the file information
		out := make([]string, 2)
		out[0] = "Mode|Size|Modified Time|Name"
		if file != nil {
			fn := file.Name
			if file.IsDir {
				fn = fmt.Sprintf("%s/", fn)
			}
			var size string
			if machine {
				size = fmt.Sprintf("%d", file.Size)
			} else {
				size = humanize.IBytes(uint64(file.Size))
			}
			out[1] = fmt.Sprintf("%s|%s|%s|%s", file.FileMode, size,
				formatTime(file.ModTime), fn)
		}
		f.Ui.Output(formatList(out))
		return 0
	}

	// Determine if the path is a file or a directory.
	if file.IsDir {
		// We have a directory, list it.
		files, _, err := client.AllocFS().List(alloc, path, nil)
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error listing alloc dir: %s", err))
			return 1
		}
		// Display the file information in a tabular format
		out := make([]string, len(files)+1)
		out[0] = "Mode|Size|Modified Time|Name"
		for i, file := range files {
			fn := file.Name
			if file.IsDir {
				fn = fmt.Sprintf("%s/", fn)
			}
			var size string
			if machine {
				size = fmt.Sprintf("%d", file.Size)
			} else {
				size = humanize.IBytes(uint64(file.Size))
			}
			out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
				file.FileMode,
				size,
				formatTime(file.ModTime),
				fn,
			)
		}
		f.Ui.Output(formatList(out))
		return 0
	}

	// We have a file, output it.
	var r io.ReadCloser
	var readErr error
	if !tail {
		if follow {
			r, readErr = f.followFile(client, alloc, path, api.OriginStart, 0, -1)
		} else {
			r, readErr = client.AllocFS().Cat(alloc, path, nil)
		}

		if readErr != nil {
			readErr = fmt.Errorf("Error reading file: %v", readErr)
		}
	} else {
		// Parse the offset
		var offset int64 = defaultTailLines * bytesToLines

		if nLines, nBytes := numLines != -1, numBytes != -1; nLines && nBytes {
			f.Ui.Error("Both -n and -c are not allowed")
			return 1
		} else if numLines < -1 || numBytes < -1 {
			f.Ui.Error("Invalid size is specified")
			return 1
		} else if nLines {
			offset = numLines * bytesToLines
		} else if nBytes {
			offset = numBytes
		} else {
			numLines = defaultTailLines
		}

		if offset > file.Size {
			offset = file.Size
		}

		if follow {
			r, readErr = f.followFile(client, alloc, path, api.OriginEnd, offset, numLines)
		} else {
			// This offset needs to be relative from the front versus the follow
			// is relative to the end
			offset = file.Size - offset
			r, readErr = client.AllocFS().ReadAt(alloc, path, offset, -1, nil)

			// If numLines is set, wrap the reader
			if numLines != -1 {
				r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second)
			}
		}

		if readErr != nil {
			readErr = fmt.Errorf("Error tailing file: %v", readErr)
		}
	}

	defer r.Close()
	if readErr != nil {
		f.Ui.Error(readErr.Error())
		return 1
	}

	io.Copy(os.Stdout, r)
	return 0
}
コード例 #15
0
ファイル: set.go プロジェクト: huin/artemis
		return t.Format("2006/01/02 15:04:05 MST")
	},
	"fmtip": func(addr net.Addr) string {
		switch addr := addr.(type) {
		case *net.TCPAddr:
			return addr.IP.String()
		case *net.UDPAddr:
			return addr.IP.String()
		default:
			return addr.String()
		}
	},
	"fmtbytecount": func(v interface{}) (string, error) {
		var buint64 uint64
		switch v := v.(type) {
		case uint64:
			buint64 = v
		case int64:
			buint64 = uint64(v)
		case float64:
			buint64 = uint64(v)
		default:
			return "", fmt.Errorf("unknown type for byte value %T", v)
		}
		return humanize.IBytes(buint64), nil
	},
	"fmtNStoMS": func(nanos float64) float64 {
		return nanos / 1e6
	},
}
コード例 #16
0
ファイル: memory.go プロジェクト: andir/go3status
func convert(value uint64) string {

	return humanize.IBytes(value)
}
コード例 #17
0
ファイル: patch.go プロジェクト: itchio/wharf
// Patch applies patch to old, according to the bspatch algorithm,
// and writes the result to new.
func Patch(old io.Reader, new io.Writer, newSize int64, readMessage ReadMessageFunc) error {
	// TODO: still debating whether we should take an io.ReadSeeker instead... probably?
	// the consumer can still do `ReadAll` themselves and pass a bytes.NewBuffer()
	obuf, err := ioutil.ReadAll(old)
	if err != nil {
		return err
	}

	// TODO: write directly to new instead of using a buffer
	nbuf := make([]byte, newSize)

	var oldpos, newpos int64

	ctrl := &Control{}

	for {
		ctrl.Reset()

		err = readMessage(ctrl)
		if err != nil {
			return err
		}

		if ctrl.Eof {
			break
		}

		// Sanity-check
		if newpos+int64(len(ctrl.Add)) > newSize {
			return ErrCorrupt
		}

		// Add old data to diff string
		for i := int64(0); i < int64(len(ctrl.Add)); i++ {
			nbuf[newpos+i] = ctrl.Add[i] + obuf[oldpos+i]
		}

		// Adjust pointers
		newpos += int64(len(ctrl.Add))
		oldpos += int64(len(ctrl.Add))

		// Sanity-check
		if newpos+int64(len(ctrl.Copy)) > newSize {
			return ErrCorrupt
		}

		// Read extra string
		copy(nbuf[newpos:], ctrl.Copy)

		// Adjust pointers
		newpos += int64(len(ctrl.Copy))
		oldpos += ctrl.Seek
	}

	if newpos != newSize {
		return fmt.Errorf("bsdiff: expected new file to be %d, was %d (%s difference)", newSize, newpos, humanize.IBytes(uint64(newSize-newpos)))
	}

	// Write the new file
	_, err = new.Write(nbuf)
	if err != nil {
		return err
	}

	return nil
}
コード例 #18
0
ファイル: bsdiff.go プロジェクト: itchio/butler
func doCmdBsdiff(target string, source string, patch string, concurrency int, measureOverhead bool) error {
	targetReader, err := os.Open(target)
	if err != nil {
		return err
	}

	defer targetReader.Close()

	targetStats, err := targetReader.Stat()
	if err != nil {
		return err
	}

	sourceReader, err := os.Open(source)
	if err != nil {
		return err
	}

	defer sourceReader.Close()

	sourceStats, err := sourceReader.Stat()
	if err != nil {
		return err
	}

	comm.Opf("Diffing %s (%s) and %s (%s)...", target, humanize.IBytes(uint64(targetStats.Size())), source,
		humanize.IBytes(uint64(sourceStats.Size())))

	patchWriter, err := os.Create(patch)
	if err != nil {
		return err
	}

	wctx := wire.NewWriteContext(patchWriter)

	err = wctx.WriteMagic(pwr.PatchMagic)
	if err != nil {
		return err
	}

	compression := butlerCompressionSettings()

	err = wctx.WriteMessage(&pwr.PatchHeader{
		Compression: &compression,
	})
	if err != nil {
		return err
	}

	wctx, err = pwr.CompressWire(wctx, &compression)
	if err != nil {
		return err
	}

	targetContainer := &tlc.Container{}
	targetContainer.Files = []*tlc.File{
		&tlc.File{
			Path: target,
			Size: targetStats.Size(),
		},
	}

	err = wctx.WriteMessage(targetContainer)
	if err != nil {
		return err
	}

	sourceContainer := &tlc.Container{}
	sourceContainer.Files = []*tlc.File{
		&tlc.File{
			Path: source,
			Size: sourceStats.Size(),
		},
	}

	err = wctx.WriteMessage(sourceContainer)
	if err != nil {
		return err
	}

	err = wctx.WriteMessage(&pwr.SyncHeader{
		FileIndex: 0,
	})
	if err != nil {
		return err
	}

	err = wctx.WriteMessage(&pwr.SyncOp{
		Type:      pwr.SyncOp_BSDIFF,
		FileIndex: 0,
	})
	if err != nil {
		return err
	}

	startTime := time.Now()

	comm.StartProgress()

	dc := bsdiff.DiffContext{
		MeasureMem:              *appArgs.memstats,
		MeasureParallelOverhead: measureOverhead,
		SuffixSortConcurrency:   concurrency,
	}

	err = dc.Do(targetReader, sourceReader, wctx.WriteMessage, comm.NewStateConsumer())
	if err != nil {
		return err
	}

	comm.EndProgress()

	err = wctx.WriteMessage(&pwr.SyncOp{
		Type: pwr.SyncOp_HEY_YOU_DID_IT,
	})
	if err != nil {
		return err
	}

	err = wctx.Close()
	if err != nil {
		return err
	}

	patchStats, err := os.Lstat(patch)
	if err != nil {
		return err
	}

	duration := time.Since(startTime)
	perSec := float64(sourceStats.Size()) / duration.Seconds()

	relToNew := 100.0 * float64(patchStats.Size()) / float64(sourceStats.Size())

	comm.Statf("Processed %s @ %s / s, total %s", humanize.IBytes(uint64(sourceStats.Size())), humanize.IBytes(uint64(perSec)), duration)
	comm.Statf("Wrote %s patch (%.2f%% of total size) to %s", humanize.IBytes(uint64(patchStats.Size())), relToNew, patch)

	return nil
}
コード例 #19
0
ファイル: fs.go プロジェクト: cursesun/nomad
func (f *FSCommand) Run(args []string) int {
	var verbose, machine, job, stat bool
	flags := f.Meta.FlagSet("fs-list", FlagSetClient)
	flags.Usage = func() { f.Ui.Output(f.Help()) }
	flags.BoolVar(&verbose, "verbose", false, "")
	flags.BoolVar(&machine, "H", false, "")
	flags.BoolVar(&job, "job", false, "")
	flags.BoolVar(&stat, "stat", false, "")

	if err := flags.Parse(args); err != nil {
		return 1
	}
	args = flags.Args()

	if len(args) < 1 {
		if job {
			f.Ui.Error("job ID is required")
		} else {
			f.Ui.Error("allocation ID is required")
		}

		return 1
	}

	path := "/"
	if len(args) == 2 {
		path = args[1]
	}

	client, err := f.Meta.Client()
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
		return 1
	}

	// If -job is specified, use random allocation, otherwise use provided allocation
	allocID := args[0]
	if job {
		allocID, err = getRandomJobAlloc(client, args[0])
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
			return 1
		}
	}

	// Truncate the id unless full length is requested
	length := shortId
	if verbose {
		length = fullId
	}
	// Query the allocation info
	if len(allocID) == 1 {
		f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
		return 1
	}
	if len(allocID)%2 == 1 {
		// Identifiers must be of even length, so we strip off the last byte
		// to provide a consistent user experience.
		allocID = allocID[:len(allocID)-1]
	}

	allocs, _, err := client.Allocations().PrefixList(allocID)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
		return 1
	}
	if len(allocs) == 0 {
		f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
		return 1
	}
	if len(allocs) > 1 {
		// Format the allocs
		out := make([]string, len(allocs)+1)
		out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
		for i, alloc := range allocs {
			out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
				limit(alloc.ID, length),
				limit(alloc.EvalID, length),
				alloc.JobID,
				alloc.TaskGroup,
				alloc.DesiredStatus,
				alloc.ClientStatus,
			)
		}
		f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
		return 0
	}
	// Prefix lookup matched a single allocation
	alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
		return 1
	}

	if alloc.DesiredStatus == "failed" {
		allocID := limit(alloc.ID, length)
		msg := fmt.Sprintf(`The allocation %q failed to be placed. To see the cause, run:
nomad alloc-status %s`, allocID, allocID)
		f.Ui.Error(msg)
		return 0
	}

	// Get file stat info
	file, _, err := client.AllocFS().Stat(alloc, path, nil)
	if err != nil {
		f.Ui.Error(err.Error())
		return 1
	}

	// If we want file stats, print those and exit.
	if stat {
		// Display the file information
		out := make([]string, 2)
		out[0] = "Mode|Size|Modified Time|Name"
		if file != nil {
			fn := file.Name
			if file.IsDir {
				fn = fmt.Sprintf("%s/", fn)
			}
			var size string
			if machine {
				size = fmt.Sprintf("%d", file.Size)
			} else {
				size = humanize.IBytes(uint64(file.Size))
			}
			out[1] = fmt.Sprintf("%s|%s|%s|%s", file.FileMode, size,
				formatTime(file.ModTime), fn)
		}
		f.Ui.Output(formatList(out))
		return 0
	}

	// Determine if the path is a file or a directory.
	if file.IsDir {
		// We have a directory, list it.
		files, _, err := client.AllocFS().List(alloc, path, nil)
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error listing alloc dir: %s", err))
			return 1
		}
		// Display the file information in a tabular format
		out := make([]string, len(files)+1)
		out[0] = "Mode|Size|Modified Time|Name"
		for i, file := range files {
			fn := file.Name
			if file.IsDir {
				fn = fmt.Sprintf("%s/", fn)
			}
			var size string
			if machine {
				size = fmt.Sprintf("%d", file.Size)
			} else {
				size = humanize.IBytes(uint64(file.Size))
			}
			out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
				file.FileMode,
				size,
				formatTime(file.ModTime),
				fn,
			)
		}
		f.Ui.Output(formatList(out))
	} else {
		// We have a file, cat it.
		r, _, err := client.AllocFS().Cat(alloc, path, nil)
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error reading file: %s", err))
			return 1
		}
		io.Copy(os.Stdout, r)
	}

	return 0
}