コード例 #1
0
ファイル: reindex.go プロジェクト: dichro/cameloff
func (f *FetcherEnumerator) PrintStats() {
	var (
		start     = time.Now()
		then      = time.Now()
		lastStats stats
	)
	for _ = range time.Tick(10 * time.Second) {
		f.mu.Lock()
		now := time.Now()
		stats := f.stats
		f.mu.Unlock()

		elapsed := now.Sub(start)
		delta := uint64(now.Sub(then).Seconds())

		bytes := uint64(stats.bytes - lastStats.bytes)
		blobs := float64(stats.blobs - lastStats.blobs)
		hit := float64(stats.hit - lastStats.hit)
		miss := float64(stats.miss - lastStats.miss)

		fmt.Printf("%s: %s blobs (%s/sec); %s bytes (%s/sec); cache %s@%.0f%% (cum %.0f%%)\n",
			elapsed,
			humanize.SI(float64(stats.blobs), ""), humanize.SI(blobs/float64(delta), ""),
			humanize.Bytes(stats.bytes), humanize.Bytes(bytes/uint64(delta)),
			humanize.SI(hit+miss, ""), 100*hit/(hit+miss),
			float64(100*stats.hit)/float64(stats.hit+stats.miss),
		)

		lastStats = stats
		then = now
	}
}
コード例 #2
0
ファイル: main.go プロジェクト: willhite/noms-old
func main() {
	useSHA := flag.String("use-sha", "", "<default>=no hashing, 1=sha1, 256=sha256, 512=sha512, blake=blake2b")
	useBH := flag.Bool("use-bh", false, "whether we buzhash the bytes")
	flag.Parse()

	flag.Usage = func() {
		fmt.Printf("%s <big-file>\n", os.Args[0])
		flag.PrintDefaults()
		return
	}

	if len(flag.Args()) < 1 {
		flag.Usage()
		return
	}

	p := flag.Args()[0]
	bh := buzhash.NewBuzHash(64 * 8)
	f, _ := os.Open(p)
	defer f.Close()
	t0 := time.Now()
	buf := make([]byte, 4*1024)
	l := uint64(0)

	var h hash.Hash
	if *useSHA == "1" {
		h = sha1.New()
	} else if *useSHA == "256" {
		h = sha256.New()
	} else if *useSHA == "512" {
		h = sha512.New()
	} else if *useSHA == "blake" {
		h = blake2.NewBlake2B()
	}

	for {
		n, err := f.Read(buf)
		l += uint64(n)
		if err == io.EOF {
			break
		}
		s := buf[:n]
		if h != nil {
			h.Write(s)
		}
		if *useBH {
			bh.Write(s)
		}
	}

	t1 := time.Now()
	d := t1.Sub(t0)
	fmt.Printf("Read  %s in %s (%s/s)\n", humanize.Bytes(l), d, humanize.Bytes(uint64(float64(l)/d.Seconds())))
	digest := []byte{}
	if h != nil {
		fmt.Printf("%x\n", h.Sum(digest))
	}
}
コード例 #3
0
ファイル: model.go プロジェクト: burkemw3/syncthingfuse
func (m *Model) GetPinsStatusByFolder() map[string]string {
	result := make(map[string]string)

	m.fmut.RLock()
	defer m.fmut.RUnlock()

	for fldr, files := range m.pinnedFiles {
		pendingBytes := uint64(0)
		pendingFileCount := 0
		pinnedBytes := uint64(0)
		pinnedFileCount := 0
		fbc := m.blockCaches[fldr]
		tc := m.treeCaches[fldr]

		for _, file := range files {
			pending := false

			fileEntry, _ := tc.GetEntry(file)
			for _, block := range fileEntry.Blocks {
				if false == fbc.HasPinnedBlock(block.Hash) {
					pending = true
					pendingBytes += uint64(block.Size)
				} else {
					pinnedBytes += uint64(block.Size)
				}
			}

			if pending {
				pendingFileCount += 1
			} else {
				pinnedFileCount += 1
			}
		}

		if pendingFileCount > 0 {
			pendingByteComment := human.Bytes(pendingBytes)
			fileLabel := "files"
			if pendingFileCount == 1 {
				fileLabel = "file"
			}
			result[fldr] = fmt.Sprintf("%d %s (%s) pending", pendingFileCount, fileLabel, pendingByteComment)
		} else {
			if pinnedFileCount > 0 {
				pinnedByteComment := human.Bytes(pinnedBytes)
				fileLabel := "files"
				if pinnedFileCount == 1 {
					fileLabel = "file"
				}
				result[fldr] = fmt.Sprintf("%d %s (%s) pinned", pinnedFileCount, fileLabel, pinnedByteComment)
			}
		}
	}

	return result
}
コード例 #4
0
ファイル: importer.go プロジェクト: willhite/noms-old
func getStatusPrinter(expected uint64) progressreader.Callback {
	startTime := time.Now()
	return func(seen uint64) {
		percent := float64(seen) / float64(expected) * 100
		elapsed := time.Now().Sub(startTime)
		rate := float64(seen) / elapsed.Seconds()

		status.Printf("%.2f%% of %s (%s/s)...",
			percent,
			humanize.Bytes(expected),
			humanize.Bytes(uint64(rate)))
	}
}
コード例 #5
0
ファイル: blob_get.go プロジェクト: Richardphp/noms
func main() {
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "usage: %s <dataset> <file>\n", os.Args[0])
		flag.PrintDefaults()
	}

	spec.RegisterDatabaseFlags(flag.CommandLine)
	flag.Parse(true)

	if len(flag.Args()) != 2 {
		d.CheckError(errors.New("expected dataset and file flags"))
	}

	var blob types.Blob
	path := flag.Arg(0)
	if db, val, err := spec.GetPath(path); err != nil {
		d.CheckErrorNoUsage(err)
	} else if val == nil {
		d.CheckErrorNoUsage(fmt.Errorf("No value at %s", path))
	} else if b, ok := val.(types.Blob); !ok {
		d.CheckErrorNoUsage(fmt.Errorf("Value at %s is not a blob", path))
	} else {
		defer db.Close()
		blob = b
	}

	filePath := flag.Arg(1)
	if filePath == "" {
		d.CheckErrorNoUsage(errors.New("Empty file path"))
	}

	// Note: overwrites any existing file.
	file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0644)
	d.CheckErrorNoUsage(err)
	defer file.Close()

	expected := humanize.Bytes(blob.Len())
	start := time.Now()

	progReader := progressreader.New(blob.Reader(), func(seen uint64) {
		elapsed := time.Since(start).Seconds()
		rate := uint64(float64(seen) / elapsed)
		status.Printf("%s of %s written in %ds (%s/s)...", humanize.Bytes(seen), expected, int(elapsed), humanize.Bytes(rate))
	})

	io.Copy(file, progReader)
	status.Done()
}
コード例 #6
0
ファイル: gen_gdl90.go プロジェクト: jpoirier/stratux
func printStats() {
	statTimer := time.NewTicker(30 * time.Second)
	diskUsageWarning := false
	for {
		<-statTimer.C
		var memstats runtime.MemStats
		runtime.ReadMemStats(&memstats)
		log.Printf("stats [started: %s]\n", humanize.RelTime(time.Time{}, stratuxClock.Time, "ago", "from now"))
		log.Printf(" - Disk bytes used = %s (%.1f %%), Disk bytes free = %s (%.1f %%)\n", humanize.Bytes(usage.Used()), 100*usage.Usage(), humanize.Bytes(usage.Free()), 100*(1-usage.Usage()))
		log.Printf(" - CPUTemp=%.02f deg C, MemStats.Alloc=%s, MemStats.Sys=%s, totalNetworkMessagesSent=%s\n", globalStatus.CPUTemp, humanize.Bytes(uint64(memstats.Alloc)), humanize.Bytes(uint64(memstats.Sys)), humanize.Comma(int64(totalNetworkMessagesSent)))
		log.Printf(" - UAT/min %s/%s [maxSS=%.02f%%], ES/min %s/%s, Total traffic targets tracked=%s", humanize.Comma(int64(globalStatus.UAT_messages_last_minute)), humanize.Comma(int64(globalStatus.UAT_messages_max)), float64(maxSignalStrength)/10.0, humanize.Comma(int64(globalStatus.ES_messages_last_minute)), humanize.Comma(int64(globalStatus.ES_messages_max)), humanize.Comma(int64(len(seenTraffic))))
		log.Printf(" - Network data messages sent: %d total, %d nonqueueable.  Network data bytes sent: %d total, %d nonqueueable.\n", globalStatus.NetworkDataMessagesSent, globalStatus.NetworkDataMessagesSentNonqueueable, globalStatus.NetworkDataBytesSent, globalStatus.NetworkDataBytesSentNonqueueable)
		if globalSettings.GPS_Enabled {
			log.Printf(" - Last GPS fix: %s, GPS solution type: %d using %d satellites (%d/%d seen/tracked), NACp: %d, est accuracy %.02f m\n", stratuxClock.HumanizeTime(mySituation.LastFixLocalTime), mySituation.Quality, mySituation.Satellites, mySituation.SatellitesSeen, mySituation.SatellitesTracked, mySituation.NACp, mySituation.Accuracy)
			log.Printf(" - GPS vertical velocity: %.02f ft/sec; GPS vertical accuracy: %v m\n", mySituation.GPSVertVel, mySituation.AccuracyVert)
		}
		// Check if we're using more than 95% of the free space. If so, throw a warning (only once).
		if !diskUsageWarning && usage.Usage() > 95.0 {
			err_p := fmt.Errorf("Disk bytes used = %s (%.1f %%), Disk bytes free = %s (%.1f %%)", humanize.Bytes(usage.Used()), 100*usage.Usage(), humanize.Bytes(usage.Free()), 100*(1-usage.Usage()))
			addSystemError(err_p)
			diskUsageWarning = true
		}
		logStatus()
	}
}
コード例 #7
0
func runSave(cmd *cobra.Command, args []string) (reterr error) {
	if len(args) == 0 {
		return errors.New("image reference missing")
	}

	output, err := cmd.Flags().GetString("output")
	if err != nil {
		return err
	}

	if output == "-" && terminal.IsTerminal(int(os.Stdout.Fd())) {
		return errors.New("refusing to output to terminal, specify output file")
	}

	client, err := engineapi.NewEnvClient()
	if err != nil {
		return err
	}

	ctx, cancel := context.WithCancel(context.Background())
	callOnSignal(ctx, cancel, syscall.SIGINT)
	defer cancel()

	graphdir, err := cmd.Flags().GetString("graph")
	if err != nil {
		return err
	}

	c, err := buildcache.New(client).Get(ctx, graphdir, args[0])
	if err != nil {
		return err
	}

	if output == "-" {
		_, err := io.Copy(os.Stdout, c)
		return err
	}

	f, err := ioutil.TempFile(filepath.Dir(output), ".buildcache-")
	if err != nil {
		return err
	}
	defer func() {
		if reterr != nil {
			os.RemoveAll(f.Name())
		}
	}()
	if n, err := io.Copy(f, c); err != nil {
		return err
	} else {
		logrus.Debugf("saving: %v", humanize.Bytes(uint64(n)))
	}
	if err := f.Sync(); err != nil {
		return err
	}
	if err := f.Close(); err != nil {
		return err
	}
	return os.Rename(f.Name(), output)
}
コード例 #8
0
func (w *hexWriter) Write(p []byte) (n int, err error) {
	for _, v := range p {
		if w.count == 16 {
			if !w.sizeWritten {
				w.hrs.write("  // ")
				w.hrs.write(humanize.Bytes(w.size))
				w.sizeWritten = true
			}
			w.hrs.newLine()
			w.count = 0
		} else if w.count != 0 {
			w.hrs.write(" ")
		}
		if v < 0x10 {
			w.hrs.write("0")
		}
		w.hrs.write(strconv.FormatUint(uint64(v), 16))
		if w.hrs.err != nil {
			err = w.hrs.err
			return
		}
		n++
		w.count++
	}
	return
}
コード例 #9
0
ファイル: restore_point_details.go プロジェクト: grrtrr/clcv2
// pretty-print #files/#bytes transfer statistics
func fmtTransfer(numFiles, numBytes uint64, duration time.Duration) string {
	var s string

	if numFiles == 0 && numBytes == 0 {
		return "none"
	}
	s = fmt.Sprintf("%d file", numFiles)
	if numFiles != 1 {
		s += "s"
	}
	s = fmt.Sprintf("%s, %s", s, humanize.Bytes(numBytes))

	if duration > 0 {
		s = fmt.Sprintf("%s (%s/s)", s, humanize.Bytes(numBytes/uint64(duration.Seconds())))
	}
	return s
}
コード例 #10
0
ファイル: diff.go プロジェクト: Richardphp/noms
func writeEncodedValue(w io.Writer, v types.Value) error {
	if v.Type().Kind() != types.BlobKind {
		return types.WriteEncodedValue(w, v)
	}
	write(w, []byte("Blob ("))
	write(w, []byte(humanize.Bytes(v.(types.Blob).Len())))
	return write(w, []byte(")"))
}
コード例 #11
0
ファイル: metainfo.go プロジェクト: ranveerkunal/bt
func (mi *MetaInfo) String() string {
	s := ""
	if mi.Info != nil {
		s += fmt.Sprintf("info: %x(sha1)\n", mi.infoHash)
		s += fmt.Sprintf("\tpiece length: %d\n", mi.Info.PieceLength)
		s += fmt.Sprintf("\tpieces: suppressed\n")
		s += fmt.Sprintf("\tprivate: %d\n", mi.Info.Private)
		if mi.Info.Name != "" {
			s += fmt.Sprintf("\tname: %q", mi.Info.Name)
		}
		if mi.Info.Length > 0 {
			s += fmt.Sprintf(" (%s)\n", humanize.Bytes(mi.Info.Length))
		} else {
			s += "\n"
		}
		if len(mi.Info.Files) > 0 {
			s += fmt.Sprintf("\tfiles:\n")
		}
		for _, f := range mi.Info.Files {
			s += fmt.Sprintf("\t\tpath: %q (%s)\n", filepath.Join(f.Path...), humanize.Bytes(f.Length))
		}
	}
	if len(mi.Announce) > 0 {
		s += fmt.Sprintf("announce: %q\n", mi.Announce)
	}
	if len(mi.AnnounceList) > 0 {
		s += fmt.Sprintf("announce-list:\n")
	}
	for _, a := range mi.AnnounceList {
		s += fmt.Sprintf("\t%q\n", strings.Join(a, ","))
	}
	if mi.CreationDate > 0 {
		s += fmt.Sprintf("ceation date: %s\n", time.Unix(mi.CreationDate, 0))
	}
	if mi.Comment != "" {
		s += fmt.Sprintf("comment: %q\n", mi.Comment)
	}
	if mi.CreatedBy != "" {
		s += fmt.Sprintf("created by: %q\n", mi.CreatedBy)
	}
	if mi.Encoding != "" {
		s += fmt.Sprintf("encoding: %s\n", mi.Encoding)
	}
	return s
}
コード例 #12
0
func main() {
	var policies []clcv2.SBSServerPolicy
	var day = time.Now()
	var date = flag.String("start", day.Format("2006-01-02"), "Day to query storage usage for")
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "usage: %s [options]  <Server-Policy-ID | Server-Name>\n", path.Base(os.Args[0]))
		flag.PrintDefaults()
	}

	flag.Parse()
	if flag.NArg() != 1 {
		flag.Usage()
		os.Exit(0)
	}

	day, err := time.Parse("2006-01-02", *date)
	if err != nil {
		exit.Error("invalid backup query date %s (expected format: YYYY-MM-DD)", *date)
	}

	client, err := clcv2.NewCLIClient()
	if err != nil {
		exit.Fatal(err.Error())
	}

	// First look up the corresponding Server Policy or Policies, since the query needs the Account Policy ID.
	if utils.LooksLikeServerName(flag.Arg(0)) {
		policies, err = client.SBSgetServerPolicyDetails(flag.Arg(0))
		if err != nil {
			exit.Fatalf("failed to list SBS policies for server %s: %s", flag.Arg(0), err)
		}
	} else {
		p, err := client.SBSgetServerPolicy(flag.Arg(0))
		if err != nil {
			exit.Fatalf("failed to list SBS Server Policy %s: %s", flag.Arg(0), err)
		}
		policies = []clcv2.SBSServerPolicy{*p}
	}

	table := tablewriter.NewWriter(os.Stdout)
	table.SetAutoFormatHeaders(false)
	table.SetAlignment(tablewriter.ALIGN_CENTRE)
	table.SetAutoWrapText(false)
	table.SetHeader([]string{"Server",
		fmt.Sprintf("Usage on %s", day.Format("Mon, Jan 2 2006")),
		"Server Policy ID", "Account Policy ID",
	})

	for _, p := range policies {
		usage, err := client.SBSgetServerStorageUsage(p.AccountPolicyID, p.ID, day)
		if err != nil {
			exit.Fatalf("failed to obtain server %s storage use on %s: %s", p.ServerID, day.Format("2006-01-02"), err)
		}
		table.Append([]string{p.ServerID, humanize.Bytes(usage), p.ID, p.AccountPolicyID})
	}
	table.Render()
}
コード例 #13
0
ファイル: cluster.go プロジェクト: ModelRocket/openstorage
func (c *clusterClient) status(context *cli.Context) {
	c.clusterOptions(context)
	jsonOut := context.GlobalBool("json")
	outFd := os.Stdout
	fn := "status"

	cluster, err := c.manager.Enumerate()
	if err != nil {
		cmdError(context, fn, err)
		return
	}

	if jsonOut {
		fmtOutput(context, &Format{Cluster: &cluster})
	} else {
		fmt.Fprintf(outFd, "Cluster Information:\nCluster ID %s: Status: %v\n\n",
			cluster.Id, cluster.Status)

		fmt.Fprintf(outFd, "Load Information:\n")
		w := new(tabwriter.Writer)
		w.Init(outFd, 12, 12, 1, ' ', 0)

		fmt.Fprintln(w, "ID\t MGMT IP\t STATUS\t CPU\t MEM TOTAL\t MEM FREE")
		for _, n := range cluster.Nodes {
			status := ""
			if n.Status == api.Status_STATUS_INIT {
				status = "Initializing"
			} else if n.Status == api.Status_STATUS_OK {
				status = "OK"
			} else if n.Status == api.Status_STATUS_OFFLINE {
				status = "Off Line"
			} else {
				status = "Error"
			}

			fmt.Fprintln(w, n.Id, "\t", n.MgmtIp, "\t", status, "\t",
				n.Cpu, "\t", humanize.Bytes(n.MemTotal), "\t",
				humanize.Bytes(n.MemFree))
		}

		fmt.Fprintln(w)
		w.Flush()
	}
}
コード例 #14
0
ファイル: fetch.go プロジェクト: kalman/noms-pre-release
func getStatusPrinter(expectedLen int64) progressreader.Callback {
	return func(seenLen uint64) {
		var expected string
		if expectedLen < 0 {
			expected = "(unknown)"
		} else {
			expected = human.Bytes(uint64(expectedLen))
		}

		elapsed := time.Now().Sub(start)
		rate := uint64(float64(seenLen) / elapsed.Seconds())

		status.Printf("%s of %s written in %ds (%s/s)...",
			human.Bytes(seenLen),
			expected,
			uint64(elapsed.Seconds()),
			human.Bytes(rate))
	}
}
コード例 #15
0
ファイル: main.go プロジェクト: vrecan/LimitWindowsDiskCache
func GetSizes() string {
	kernel32, err := syscall.LoadDLL("kernel32.dll")

	if nil != err {
		abort("loadLibrary", err)
	}
	defer kernel32.Release()
	get, err := kernel32.FindProc("GetSystemFileCacheSize")
	if nil != err {
		abort("GetProcAddress", err)
	}
	var minFileCache uint64
	var maxFileCache uint64
	var lpFlags uint32
	res, _, err := get.Call(uintptr(unsafe.Pointer(&minFileCache)), uintptr(unsafe.Pointer(&maxFileCache)), uintptr(unsafe.Pointer(&lpFlags)))
	if res == 0 {
		abort("getSystemFileCacheSize", err)
	}
	return fmt.Sprintf("Min: %v Max: %v Flags: %v", human.Bytes(minFileCache), human.Bytes(maxFileCache), lpFlags)

}
コード例 #16
0
ファイル: client.go プロジェクト: radoondas/apachebeat
// PublishEvents sends all events to elasticsearch. On error a slice with all
// events not published or confirmed to be processed by elasticsearch will be
// returned. The input slice backing memory will be reused by return the value.
func (client *Client) PublishEvents(
	events []common.MapStr,
) ([]common.MapStr, error) {

	begin := time.Now()
	publishEventsCallCount.Add(1)

	if !client.connected {
		return events, ErrNotConnected
	}

	// new request to store all events into
	request, err := client.startBulkRequest("", "", client.params)
	if err != nil {
		logp.Err("Failed to perform any bulk index operations: %s", err)
		return events, err
	}

	// encode events into bulk request buffer, dropping failed elements from
	// events slice
	events = bulkEncodePublishRequest(request, client.index, events)
	if len(events) == 0 {
		return nil, nil
	}

	// send bulk request
	bufferSize := request.buf.Len()
	_, res, err := request.Flush()
	if err != nil {
		logp.Err("Failed to perform any bulk index operations: %s", err)
		return events, err
	}

	logp.Debug("elasticsearch", "PublishEvents: %d metrics have been packed into a buffer of %s and published to elasticsearch in %v.",
		len(events),
		humanize.Bytes(uint64(bufferSize)),
		time.Now().Sub(begin))

	// check response for transient errors
	client.json.init(res.raw)
	failed_events := bulkCollectPublishFails(&client.json, events)
	ackedEvents.Add(int64(len(events) - len(failed_events)))
	eventsNotAcked.Add(int64(len(failed_events)))
	if len(failed_events) > 0 {
		return failed_events, mode.ErrTempBulkFailure
	}

	return nil, nil
}
コード例 #17
0
ファイル: main.go プロジェクト: Richardphp/noms
func main() {
	// use [from/to/by] or [from/iterations]
	nFrom := flag.Uint64("from", 1e2, "start iterations from this number")
	nTo := flag.Uint64("to", 1e4, "run iterations until arriving at this number")
	nBy := flag.Uint64("by", 1, "increment each iteration by this number")
	nIncrements := flag.Uint64("iterations", 0, "number of iterations to execute")
	encodingType := flag.String("encoding", "string", "encode/decode as 'string', 'binary', 'binary-int', 'binary-varint'")
	flag.Parse(true)

	flag.Usage = func() {
		fmt.Printf("%s\n", os.Args[0])
		flag.PrintDefaults()
		return
	}

	t0 := time.Now()
	nBytes := uint64(0)
	nIterations := uint64(0)

	encoderDecoder := getEncoder(*encodingType)
	startingLoop := newBigFloat(*nFrom)

	var endLoop *big.Float
	var incrementer *big.Float

	if *nIncrements > 0 {
		// using from/iterations flags
		fmt.Printf("encoding: %v from: %v iterations: %v\n", *encodingType, *nFrom, *nIncrements)

		incrementer = newBigFloat(1)
		n := newBigFloat(*nIncrements)
		endLoop = n.Add(n, startingLoop)
	} else {
		// using from/to/by flags
		fmt.Printf("encoding: %v from: %v to: %v by: %v\n", *encodingType, *nFrom, *nTo, *nBy)
		incrementer = newBigFloat(*nBy)
		endLoop = newBigFloat(*nTo)
	}

	for i := startingLoop; i.Cmp(endLoop) < 0; i = i.Add(i, incrementer) {
		nIterations++
		nBytes += runTest(encoderDecoder, i)
	}

	t1 := time.Now()
	d := t1.Sub(t0)
	fmt.Printf("IO  %s (%v nums) in %s (%s/s)\n", humanize.Bytes(nBytes), humanize.Comma(int64(nIterations)), d, humanize.Bytes(uint64(float64(nBytes)/d.Seconds())))
}
コード例 #18
0
ファイル: releases.go プロジェクト: pulcy/pulsar
func (r *Release) String() string {
	str := make([]string, len(r.Assets)+1)
	str[0] = fmt.Sprintf(
		"%s, name: '%s', description: '%s', id: %d, tagged: %s, published: %s, draft: %v, prerelease: %v",
		r.TagName, r.Name, r.Description, r.Id,
		timeFmtOr(r.Created, RELEASE_DATE_FORMAT, ""),
		timeFmtOr(r.Published, RELEASE_DATE_FORMAT, ""),
		Mark(r.Draft), Mark(r.Prerelease))

	for idx, asset := range r.Assets {
		str[idx+1] = fmt.Sprintf("  - artifact: %s, downloads: %d, state: %s, type: %s, size: %s, id: %d",
			asset.Name, asset.Downloads, asset.State, asset.ContentType,
			humanize.Bytes(asset.Size), asset.Id)
	}

	return strings.Join(str, "\n")
}
コード例 #19
0
ファイル: main.go プロジェクト: influxdata/kapacitor
func doList(args []string) error {
	if len(args) == 0 {
		fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'")
		listUsage()
		os.Exit(2)
	}

	var patterns []string
	if len(args) >= 2 {
		patterns = args[1:]
	} else {
		patterns = []string{""}
	}

	limit := 100

	switch kind := args[0]; kind {
	case "tasks":
		maxID := 2 // len("ID")
		var allTasks TaskList
		for _, pattern := range patterns {
			offset := 0
			for {
				tasks, err := cli.ListTasks(&client.ListTasksOptions{
					Pattern: pattern,
					Fields:  []string{"type", "status", "executing", "dbrps"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}
				allTasks = append(allTasks, tasks...)
				for _, t := range tasks {
					if l := len(t.ID); l > maxID {
						maxID = l
					}
				}
				if len(tasks) != limit {
					break
				}
				offset += limit
			}
		}
		outFmt := fmt.Sprintf("%%-%ds%%-10v%%-10v%%-10v%%s\n", maxID+1)
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Executing", "Databases and Retention Policies")
		sort.Sort(allTasks)
		for _, t := range allTasks {
			fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs)
		}
	case "templates":
		maxID := 2 // len("ID")
		var allTemplates TemplateList
		for _, pattern := range patterns {
			offset := 0
			for {
				templates, err := cli.ListTemplates(&client.ListTemplatesOptions{
					Pattern: pattern,
					Fields:  []string{"type", "vars"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}
				allTemplates = append(allTemplates, templates...)

				for _, t := range templates {
					if l := len(t.ID); l > maxID {
						maxID = l
					}
				}
				if len(templates) != limit {
					break
				}
				offset += limit
			}
		}
		outFmt := fmt.Sprintf("%%-%ds%%-10v%%-40v\n", maxID+1)
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Vars")
		sort.Sort(allTemplates)
		for _, t := range allTemplates {
			vars := make([]string, 0, len(t.Vars))
			for name := range t.Vars {
				vars = append(vars, name)
			}
			sort.Strings(vars)
			fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ","))
		}
	case "recordings":
		maxID := 2 // len("ID")
		// The recordings are returned in sorted order already, no need to sort them here.
		var allRecordings []client.Recording
		for _, pattern := range patterns {
			offset := 0
			for {
				recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{
					Pattern: pattern,
					Fields:  []string{"type", "size", "date", "status"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}
				allRecordings = append(allRecordings, recordings...)
				for _, r := range recordings {
					if l := len(r.ID); l > maxID {
						maxID = l
					}
				}

				if len(recordings) != limit {
					break
				}
				offset += limit
			}
		}
		outFmt := fmt.Sprintf("%%-%ds%%-8v%%-10s%%-10s%%-23s\n", maxID+1)
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Size", "Date")
		for _, r := range allRecordings {
			fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822))
		}
	case "replays":
		maxID := 2        // len("ID")
		maxTask := 4      // len("Task")
		maxRecording := 9 // len("Recording")
		// The replays are returned in sorted order already, no need to sort them here.
		var allReplays []client.Replay
		for _, pattern := range patterns {
			offset := 0
			for {
				replays, err := cli.ListReplays(&client.ListReplaysOptions{
					Pattern: pattern,
					Fields:  []string{"task", "recording", "status", "clock", "date"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}
				allReplays = append(allReplays, replays...)

				for _, r := range replays {
					if l := len(r.ID); l > maxID {
						maxID = l
					}
					if l := len(r.Task); l > maxTask {
						maxTask = l
					}
					if l := len(r.Recording); l > maxRecording {
						maxRecording = l
					}
				}
				if len(replays) != limit {
					break
				}
				offset += limit
			}
		}
		outFmt := fmt.Sprintf("%%-%dv%%-%dv%%-%dv%%-9v%%-8v%%-23v\n", maxID+1, maxTask+1, maxRecording+1)
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Task", "Recording", "Status", "Clock", "Date")
		for _, r := range allReplays {
			fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822))
		}
	case "service-tests":
		outFmt := "%s\n"
		fmt.Fprintf(os.Stdout, outFmt, "Service Name")
		for _, pattern := range patterns {
			serviceTests, err := cli.ListServiceTests(&client.ListServiceTestsOptions{
				Pattern: pattern,
			})
			if err != nil {
				return err
			}

			for _, s := range serviceTests.Services {
				fmt.Fprintf(os.Stdout, outFmt, s.Name)
			}
		}
	default:
		return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings', 'replays' or 'service-tests'?", kind)
	}
	return nil

}
コード例 #20
0
ファイル: main.go プロジェクト: wutaizeng/kapacitor
func doList(args []string) error {
	if len(args) == 0 {
		fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'")
		listUsage()
		os.Exit(2)
	}

	var patterns []string
	if len(args) >= 2 {
		patterns = args[1:]
	} else {
		patterns = []string{""}
	}

	limit := 100

	switch kind := args[0]; kind {
	case "tasks":
		outFmt := "%-30s%-10v%-10v%-10v%s\n"
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Executing", "Databases and Retention Policies")
		for _, pattern := range patterns {
			offset := 0
			for {
				tasks, err := cli.ListTasks(&client.ListTasksOptions{
					Pattern: pattern,
					Fields:  []string{"type", "status", "executing", "dbrps"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}

				for _, t := range tasks {
					fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs)
				}
				if len(tasks) != limit {
					break
				}
				offset += limit
			}
		}
	case "templates":
		outFmt := "%-30s%-10v%-40v\n"
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Vars")
		for _, pattern := range patterns {
			offset := 0
			for {
				templates, err := cli.ListTemplates(&client.ListTemplatesOptions{
					Pattern: pattern,
					Fields:  []string{"type", "vars"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}

				for _, t := range templates {
					vars := make([]string, 0, len(t.Vars))
					for name := range t.Vars {
						vars = append(vars, name)
					}
					sort.Strings(vars)
					fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, strings.Join(vars, ","))
				}
				if len(templates) != limit {
					break
				}
				offset += limit
			}
		}
	case "recordings":
		outFmt := "%-40s%-8v%-10s%-10s%-23s\n"
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Size", "Date")
		for _, pattern := range patterns {
			offset := 0
			for {
				recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{
					Pattern: pattern,
					Fields:  []string{"type", "size", "date", "status"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}

				for _, r := range recordings {
					fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822))
				}
				if len(recordings) != limit {
					break
				}
				offset += limit
			}
		}
	case "replays":
		outFmt := "%-40v%-20v%-40v%-9v%-8v%-23v\n"
		fmt.Fprintf(os.Stdout, outFmt, "ID", "Task", "Recording", "Status", "Clock", "Date")
		for _, pattern := range patterns {
			offset := 0
			for {
				replays, err := cli.ListReplays(&client.ListReplaysOptions{
					Pattern: pattern,
					Fields:  []string{"task", "recording", "status", "clock", "date"},
					Offset:  offset,
					Limit:   limit,
				})
				if err != nil {
					return err
				}

				for _, r := range replays {
					fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822))
				}
				if len(replays) != limit {
					break
				}
				offset += limit
			}
		}
	default:
		return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind)
	}
	return nil

}
コード例 #21
0
ファイル: fs_ls.go プロジェクト: carriercomm/nomad
func (f *FSListCommand) Run(args []string) int {
	var verbose bool
	var machine bool
	var job bool
	flags := f.Meta.FlagSet("fs-list", FlagSetClient)
	flags.Usage = func() { f.Ui.Output(f.Help()) }
	flags.BoolVar(&verbose, "verbose", false, "")
	flags.BoolVar(&machine, "H", false, "")
	flags.BoolVar(&job, "job", false, "")

	if err := flags.Parse(args); err != nil {
		return 1
	}
	args = flags.Args()

	if len(args) < 1 {
		f.Ui.Error("allocation id is a required parameter")
		return 1
	}

	path := "/"
	if len(args) == 2 {
		path = args[1]
	}

	client, err := f.Meta.Client()
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
		return 1
	}

	// If -job is specified, use random allocation, otherwise use provided allocation
	allocID := args[0]
	if job {
		allocID, err = getRandomJobAlloc(client, args[0])
		if err != nil {
			f.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
			return 1
		}
	}

	// Truncate the id unless full length is requested
	length := shortId
	if verbose {
		length = fullId
	}
	// Query the allocation info
	if len(allocID) == 1 {
		f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
		return 1
	}
	if len(allocID)%2 == 1 {
		// Identifiers must be of even length, so we strip off the last byte
		// to provide a consistent user experience.
		allocID = allocID[:len(allocID)-1]
	}

	allocs, _, err := client.Allocations().PrefixList(allocID)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
		return 1
	}
	if len(allocs) == 0 {
		f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
		return 1
	}
	if len(allocs) > 1 {
		// Format the allocs
		out := make([]string, len(allocs)+1)
		out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
		for i, alloc := range allocs {
			out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
				limit(alloc.ID, length),
				limit(alloc.EvalID, length),
				alloc.JobID,
				alloc.TaskGroup,
				alloc.DesiredStatus,
				alloc.ClientStatus,
			)
		}
		f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
		return 0
	}
	// Prefix lookup matched a single allocation
	alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
		return 1
	}

	if alloc.DesiredStatus == "failed" {
		allocID := limit(alloc.ID, length)
		msg := fmt.Sprintf(`The allocation %q failed to be placed. To see the cause, run: 
nomad alloc-status %s`, allocID, allocID)
		f.Ui.Error(msg)
		return 0
	}
	// Get the file at the given path
	files, _, err := client.AllocFS().List(alloc, path, nil)
	if err != nil {
		f.Ui.Error(fmt.Sprintf("Error listing alloc dir: %v", err))
		return 1
	}

	// Display the file information in a tabular format
	out := make([]string, len(files)+1)
	out[0] = "Mode|Size|Modfied Time|Name"
	for i, file := range files {
		fn := file.Name
		if file.IsDir {
			fn = fmt.Sprintf("%s/", fn)
		}
		var size string
		if machine {
			size = fmt.Sprintf("%d", file.Size)
		} else {
			size = humanize.Bytes(uint64(file.Size))
		}
		out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
			file.FileMode,
			size,
			formatTime(file.ModTime),
			fn,
		)
	}

	f.Ui.Output(formatList(out))
	return 0
}
コード例 #22
0
ファイル: gen_gdl90.go プロジェクト: morhall/stratux
func printStats() {
	statTimer := time.NewTicker(30 * time.Second)
	for {
		<-statTimer.C
		var memstats runtime.MemStats
		runtime.ReadMemStats(&memstats)
		log.Printf("stats [started: %s]\n", humanize.RelTime(time.Time{}, stratuxClock.Time, "ago", "from now"))
		log.Printf(" - CPUTemp=%.02f deg C, MemStats.Alloc=%s, MemStats.Sys=%s, totalNetworkMessagesSent=%s\n", globalStatus.CPUTemp, humanize.Bytes(uint64(memstats.Alloc)), humanize.Bytes(uint64(memstats.Sys)), humanize.Comma(int64(totalNetworkMessagesSent)))
		log.Printf(" - UAT/min %s/%s [maxSS=%.02f%%], ES/min %s/%s\n, Total traffic targets tracked=%s", humanize.Comma(int64(globalStatus.UAT_messages_last_minute)), humanize.Comma(int64(globalStatus.UAT_messages_max)), float64(maxSignalStrength)/10.0, humanize.Comma(int64(globalStatus.ES_messages_last_minute)), humanize.Comma(int64(globalStatus.ES_messages_max)), humanize.Comma(int64(len(seenTraffic))))
		if globalSettings.GPS_Enabled {
			log.Printf(" - Last GPS fix: %s, GPS solution type: %d using %d satellites (%d/%d seen/tracked), NACp: %d, est accuracy %.02f m\n", stratuxClock.HumanizeTime(mySituation.LastFixLocalTime), mySituation.quality, mySituation.Satellites, mySituation.SatellitesSeen, mySituation.SatellitesTracked, mySituation.NACp, mySituation.Accuracy)
			log.Printf(" - GPS vertical velocity: %.02f ft/sec; GPS vertical accuracy: %v m\n", mySituation.GPSVertVel, mySituation.AccuracyVert)
		}
	}
}
コード例 #23
0
ファイル: fileinfo.go プロジェクト: TarunParimi/lsp
func (fi FileInfo) representSize() string {
	return humanize.Bytes(uint64(fi.f.Size()))
}
コード例 #24
0
ファイル: gen_gdl90.go プロジェクト: speedbird21/stratux
func printStats() {
	statTimer := time.NewTicker(30 * time.Second)
	for {
		<-statTimer.C
		var memstats runtime.MemStats
		runtime.ReadMemStats(&memstats)
		log.Printf("stats [up since: %s]\n", humanize.Time(timeStarted))
		log.Printf(" - CPUTemp=%.02f deg C, MemStats.Alloc=%s, MemStats.Sys=%s, totalNetworkMessagesSent=%s\n", globalStatus.CPUTemp, humanize.Bytes(uint64(memstats.Alloc)), humanize.Bytes(uint64(memstats.Sys)), humanize.Comma(int64(totalNetworkMessagesSent)))
		log.Printf(" - UAT/min %s/%s [maxSS=%.02f%%], ES/min %s/%s\n, Total traffic targets tracked=%s", humanize.Comma(int64(globalStatus.UAT_messages_last_minute)), humanize.Comma(int64(globalStatus.UAT_messages_max)), float64(maxSignalStrength)/10.0, humanize.Comma(int64(globalStatus.ES_messages_last_minute)), humanize.Comma(int64(globalStatus.ES_messages_max)), humanize.Comma(int64(len(seenTraffic))))
		if globalSettings.GPS_Enabled {
			log.Printf(" - Last GPS fix: %s, GPS solution type: %d, NACp: %d, est accuracy %.02f m\n", humanize.Time(mySituation.LastFixLocalTime), mySituation.quality, mySituation.NACp, mySituation.Accuracy)
		}
	}
}
コード例 #25
0
ファイル: http.go プロジェクト: EaterOfCode/ClueGetter
func httpHandlerMessage(w http.ResponseWriter, r *http.Request) {
	queueId := r.URL.Path[len("/message/"):]
	instances, err := httpParseFilterInstance(r)
	if err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	row := Rdbms.QueryRow(`
		SELECT m.session, m.date, COALESCE(m.body_size,0), CONCAT(m.sender_local, '@', m.sender_domain) sender,
				m.rcpt_count, m.verdict, m.verdict_msg,
				COALESCE(m.rejectScore,0), COALESCE(m.rejectScoreThreshold,0), COALESCE(m.tempfailScore,0),
				(COALESCE(m.rejectScore,0) + COALESCE(m.tempfailScore,0)) scoreCombined,
				COALESCE(m.tempfailScoreThreshold,0), s.ip, s.reverse_dns, s.helo, s.sasl_username,
				s.sasl_method, s.cert_issuer, s.cert_subject, s.cipher_bits, s.cipher, s.tls_version,
				cc.hostname mtaHostname, cc.daemon_name mtaDaemonName
			FROM message m
				LEFT JOIN session s ON s.id = m.session
				LEFT JOIN cluegetter_client cc on s.cluegetter_client = cc.id
			WHERE s.cluegetter_instance IN (`+strings.Join(instances, ",")+`)
				AND m.id = ?`, queueId)
	msg := &httpMessage{Recipients: make([]*httpMessageRecipient, 0)}
	err = row.Scan(&msg.SessionId, &msg.Date, &msg.BodySize, &msg.Sender, &msg.RcptCount,
		&msg.Verdict, &msg.VerdictMsg, &msg.RejectScore, &msg.RejectScoreThreshold,
		&msg.TempfailScore, &msg.ScoreCombined, &msg.TempfailScoreThreshold,
		&msg.Ip, &msg.ReverseDns, &msg.Helo, &msg.SaslUsername, &msg.SaslMethod,
		&msg.CertIssuer, &msg.CertSubject, &msg.CipherBits, &msg.Cipher, &msg.TlsVersion,
		&msg.MtaHostname, &msg.MtaDaemonName)
	if err != nil {
		http.Error(w, "Page Not Found: "+err.Error(), http.StatusNotFound)
		return
	}
	msg.BodySizeStr = humanize.Bytes(uint64(msg.BodySize))

	recipientRows, _ := Rdbms.Query(
		"SELECT r.id, r.local, r.domain FROM recipient r "+
			"LEFT JOIN message_recipient mr ON mr.recipient = r.id "+
			"LEFT JOIN message m ON m.id = mr.message WHERE message = ?", queueId)
	defer recipientRows.Close()
	for recipientRows.Next() {
		recipient := &httpMessageRecipient{}
		recipientRows.Scan(&recipient.Id, &recipient.Local, &recipient.Domain)
		if recipient.Domain == "" {
			recipient.Email = recipient.Local
		} else {
			recipient.Email = recipient.Local + "@" + recipient.Domain
		}
		msg.Recipients = append(msg.Recipients, recipient)
	}

	headerRows, _ := Rdbms.Query("SELECT name, body FROM message_header WHERE message = ?", queueId)
	defer headerRows.Close()
	for headerRows.Next() {
		header := &httpMessageHeader{}
		headerRows.Scan(&header.Name, &header.Body)
		msg.Headers = append(msg.Headers, header)
	}

	checkResultRows, err := Rdbms.Query(
		`SELECT module, verdict, score, weighted_score, COALESCE(duration, 0.0),
			determinants FROM message_result WHERE message = ?`, queueId)
	if err != nil {
		panic("Error while retrieving check results in httpHandlerMessage(): " + err.Error())
	}
	defer checkResultRows.Close()
	for checkResultRows.Next() {
		checkResult := &httpMessageCheckResult{}
		checkResultRows.Scan(&checkResult.Module, &checkResult.Verdict, &checkResult.Score,
			&checkResult.WeightedScore, &checkResult.Duration, &checkResult.Determinants)
		msg.CheckResults = append(msg.CheckResults, checkResult)
	}

	data := struct {
		*HttpViewData
		Message *httpMessage
	}{
		HttpViewData: httpGetViewData(),
		Message:      msg,
	}

	httpRenderOutput(w, r, "message.html", data, msg)
}
コード例 #26
0
ファイル: stemcell.go プロジェクト: pivotalservices/bosh-hub
func (s StemcellSource) FormattedSize() string          { return humanize.Bytes(s.Size) }
コード例 #27
0
ファイル: http.go プロジェクト: rayyang2000/ClueGetter
func httpHandlerMessage(w http.ResponseWriter, r *http.Request) {
	queueId := r.URL.Path[len("/message/"):]
	row := Rdbms.QueryRow(`
		SELECT m.session, m.date, coalesce(m.body_size,0), m.sender_local || '@' || m.sender_domain sender,
				m.rcpt_count, m.verdict, m.verdict_msg,
				COALESCE(m.rejectScore,0), COALESCE(m.rejectScoreThreshold,0), COALESCE(m.tempfailScore,0),
				(COALESCE(m.rejectScore,0) + COALESCE(m.tempfailScore,0)) scoreCombined,
				COALESCE(m.tempfailScoreThreshold,0), s.ip, s.reverse_dns, s.sasl_username, s.sasl_method,
				s.cert_issuer, s.cert_subject, s.cipher_bits, s.cipher, s.tls_version,
				cc.hostname mtaHostname, cc.daemon_name mtaDaemonName
			FROM message m
				LEFT JOIN session s ON s.id = m.session
				LEFT JOIN cluegetter_client cc on s.cluegetter_client = cc.id
			WHERE m.id = ?`, queueId)
	msg := &httpMessage{Recipients: make([]*httpMessageRecipient, 0)}
	row.Scan(&msg.SessionId, &msg.Date, &msg.BodySize, &msg.Sender, &msg.RcptCount,
		&msg.Verdict, &msg.VerdictMsg, &msg.RejectScore, &msg.RejectScoreThreshold,
		&msg.TempfailScore, &msg.ScoreCombined, &msg.TempfailScoreThreshold,
		&msg.Ip, &msg.ReverseDns, &msg.SaslUsername, &msg.SaslMethod,
		&msg.CertIssuer, &msg.CertSubject, &msg.CipherBits, &msg.Cipher, &msg.TlsVersion,
		&msg.MtaHostname, &msg.MtaDaemonName)
	msg.BodySizeStr = humanize.Bytes(uint64(msg.BodySize))

	recipientRows, _ := Rdbms.Query(
		"SELECT r.id, r.local, r.domain FROM recipient r "+
			"LEFT JOIN message_recipient mr ON mr.recipient = r.id "+
			"LEFT JOIN message m ON m.id = mr.message WHERE message = ?", queueId)
	defer recipientRows.Close()
	for recipientRows.Next() {
		recipient := &httpMessageRecipient{}
		recipientRows.Scan(&recipient.Id, &recipient.Local, &recipient.Domain)
		if recipient.Domain == "" {
			recipient.Email = recipient.Local
		} else {
			recipient.Email = recipient.Local + "@" + recipient.Domain
		}
		msg.Recipients = append(msg.Recipients, recipient)
	}

	headerRows, _ := Rdbms.Query("SELECT name, body FROM message_header WHERE message = ?", queueId)
	defer headerRows.Close()
	for headerRows.Next() {
		header := &httpMessageHeader{}
		headerRows.Scan(&header.Name, &header.Body)
		msg.Headers = append(msg.Headers, header)
	}

	checkResultRows, _ := Rdbms.Query(
		`SELECT module, verdict, score, weighted_score, COALESCE(duration, 0.0),
			determinants FROM message_result WHERE message = ?`, queueId)
	defer checkResultRows.Close()
	for checkResultRows.Next() {
		checkResult := &httpMessageCheckResult{}
		checkResultRows.Scan(&checkResult.Module, &checkResult.Verdict, &checkResult.Score,
			&checkResult.WeightedScore, &checkResult.Duration, &checkResult.Determinants)
		msg.CheckResults = append(msg.CheckResults, checkResult)
	}

	if r.FormValue("json") == "1" {
		httpReturnJson(w, msg)
		return
	}

	tplSkeleton, _ := assets.Asset("htmlTemplates/skeleton.html")
	tplMsg, _ := assets.Asset("htmlTemplates/message.html")
	tpl := template.New("skeleton.html")
	tpl.Parse(string(tplMsg))
	tpl.Parse(string(tplSkeleton))

	w.Header().Set("Content-Type", "text/html; charset=utf-8")
	if err := tpl.ExecuteTemplate(w, "skeleton.html", msg); err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
	}
}
コード例 #28
0
ファイル: proxy.go プロジェクト: moul/advanced-ssh-config
func proxyGo(host *config.Host, dryRun bool) error {
	stats := ConnectionStats{
		CreatedAt: time.Now(),
	}
	connectHookArgs := ConnectHookArgs{
		Host:  host,
		Stats: &stats,
	}

	Logger.Debugf("Preparing host object")
	if err := hostPrepare(host); err != nil {
		return err
	}

	if dryRun {
		return fmt.Errorf("dry-run: Golang native TCP connection to '%s:%s'", host.HostName, host.Port)
	}

	// BeforeConnect hook
	Logger.Debugf("Calling BeforeConnect hooks")
	beforeConnectDrivers, err := host.Hooks.BeforeConnect.InvokeAll(connectHookArgs)
	if err != nil {
		Logger.Errorf("BeforeConnect hook failed: %v", err)
	}
	defer beforeConnectDrivers.Close()

	Logger.Debugf("Connecting to %s:%s", host.HostName, host.Port)
	conn, err := net.Dial("tcp", fmt.Sprintf("%s:%s", host.HostName, host.Port))
	if err != nil {
		// OnConnectError hook
		connectHookArgs.Error = err
		Logger.Debugf("Calling OnConnectError hooks")
		onConnectErrorDrivers, err := host.Hooks.OnConnectError.InvokeAll(connectHookArgs)
		if err != nil {
			Logger.Errorf("OnConnectError hook failed: %v", err)
		}
		defer onConnectErrorDrivers.Close()

		return err
	}
	Logger.Debugf("Connected to %s:%s", host.HostName, host.Port)
	stats.ConnectedAt = time.Now()

	// OnConnect hook
	Logger.Debugf("Calling OnConnect hooks")
	onConnectDrivers, err := host.Hooks.OnConnect.InvokeAll(connectHookArgs)
	if err != nil {
		Logger.Errorf("OnConnect hook failed: %v", err)
	}
	defer onConnectDrivers.Close()

	// Ignore SIGHUP
	signal.Ignore(syscall.SIGHUP)

	waitGroup := sync.WaitGroup{}
	result := exportReadWrite{}

	ctx, cancel := context.WithCancel(context.Background())
	ctx = context.WithValue(ctx, "sync", &waitGroup)

	waitGroup.Add(2)
	c1 := readAndWrite(ctx, conn, os.Stdout)
	c2 := readAndWrite(ctx, os.Stdin, conn)
	select {
	case result = <-c1:
		stats.WrittenBytes = result.written
	case result = <-c2:
	}
	if result.err != nil && result.err == io.EOF {
		result.err = nil
	}

	conn.Close()
	cancel()
	waitGroup.Wait()
	select {
	case res := <-c1:
		stats.WrittenBytes = res.written
	default:
	}

	stats.DisconnectedAt = time.Now()
	stats.ConnectionDuration = stats.DisconnectedAt.Sub(stats.ConnectedAt)
	averageSpeed := float64(stats.WrittenBytes) / stats.ConnectionDuration.Seconds()
	// round duraction
	stats.ConnectionDuration = ((stats.ConnectionDuration + time.Second/2) / time.Second) * time.Second
	stats.AverageSpeed = math.Ceil(averageSpeed*1000) / 1000
	// human
	stats.WrittenBytesHuman = humanize.Bytes(stats.WrittenBytes)
	connectionDurationHuman := humanize.RelTime(stats.DisconnectedAt, stats.ConnectedAt, "", "")
	stats.ConnectionDurationHuman = strings.Replace(connectionDurationHuman, "now", "0 sec", -1)
	stats.AverageSpeedHuman = humanize.Bytes(uint64(stats.AverageSpeed)) + "/s"

	// OnDisconnect hook
	Logger.Debugf("Calling OnDisconnect hooks")
	onDisconnectDrivers, err := host.Hooks.OnDisconnect.InvokeAll(connectHookArgs)
	if err != nil {
		Logger.Errorf("OnDisconnect hook failed: %v", err)
	}
	defer onDisconnectDrivers.Close()

	Logger.Debugf("Byte written %v", stats.WrittenBytes)
	return result.err
}
コード例 #29
0
ファイル: system.go プロジェクト: natrim/grainbot
// InitSystem register's dice commands on module load
func InitSystem(mod *modules.Module) {
	owner := &modules.OwnerPermission{}
	mod.AddResponse(quitreg, func(r *modules.Response) {
		r.Respond("okey, " + r.Nick + "! Goodbye everypony!")
		syscall.Kill(syscall.Getpid(), syscall.SIGINT)
	}, owner)
	mod.AddResponse(restartreg, func(r *modules.Response) {
		r.Respond("okey, " + r.Nick + "! Reebooot!")
		syscall.Kill(syscall.Getpid(), syscall.SIGUSR2)
	}, owner)
	mod.AddResponse(joinpartreg, func(r *modules.Response) {
		if r.Matches[1] == "join" {
			if len(r.Matches)-1 >= 3 && r.Matches[3] != "" {
				r.Server.Join("#" + r.Matches[3])
				r.Respond("okey, " + r.Nick + "! let'z join " + "#" + r.Matches[3])
			} else {
				r.Mention("tell me where to join!")
			}
		} else if r.Matches[1] == "part" {
			if len(r.Matches)-1 >= 3 && r.Matches[3] != "" {
				r.Respond("okey, " + r.Nick + "! let'z leave " + "#" + r.Matches[3])
				r.Server.Part("#" + r.Matches[3])
			} else if r.Channel != "" {
				r.Respond("okey, " + r.Nick + " leaving from here!")
				r.Server.Part(r.Channel)
			} else {
				r.Mention("tell me from what to leave!")
			}
		}

	}, owner)
	mod.AddResponse(nickreg, func(r *modules.Response) {
		r.Server.Nick(r.Matches[1])
		r.Respond("okey, " + r.Nick + "! Let'z talk as another pony!")
	}, owner)

	mod.AddResponse(statsreg, func(r *modules.Response) {
		mem := &runtime.MemStats{}
		runtime.ReadMemStats(mem)
		r.Respond("PID: " + strconv.Itoa(syscall.Getpid()) + ", last (re)start: " + human.Time(startTime) + ", sys memory: " + human.Bytes(mem.Sys))
	}, owner)
}
コード例 #30
0
ファイル: badged.go プロジェクト: iron-io/imagelayers-graph
func (bd badgeD) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	url := r.URL.Path
	if len(url) <= 1 {
		http.NotFound(w, r)
		return
	}

	if !strings.HasSuffix(url, ".svg") {
		http.NotFound(w, r)
		return
	}

	parts := strings.Split(strings.TrimSuffix(strings.TrimPrefix(url, "/"), ".svg"), ":")
	if len(parts) != 2 {
		log.Println("error parsing parts for", url)
		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
		return
	}

	image, tag := parts[0], parts[1]

	if str, ok := bd.cache[image+tag]; ok {
		w.Header().Set("Content-type", "image/svg+xml")
		fmt.Fprint(w, str)
		return
	}

	const contentTypeReq = "application/json;charset=UTF-8"
	bodyReq := strings.NewReader(fmt.Sprintf(`{"repos":[{"name":"%s","tag":"%s"}]}`, image, tag))
	resp, err := http.Post(bd.urlReq, contentTypeReq, bodyReq)
	if err != nil {
		log.Println("error posting call:", err)
		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
		return
	}
	defer resp.Body.Close()

	var data []struct {
		Repo struct {
			Name   string `json:"name"`
			Tag    string `json:"tag"`
			Size   uint64 `json:"size"`
			Count  int    `json:"count"`
			Status int    `json:"status"`
		} `json:"repo"`
	}
	if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
		log.Println("error parsing JSON response:", err)
		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
		return
	}

	if len(data) == 0 {
		log.Println("can't find image")
		http.NotFound(w, r)
		return
	}

	w.Header().Set("Content-type", "image/svg+xml")
	var buf bytes.Buffer
	bd.renderer.Execute(&buf, struct {
		Size   string
		Layers string
	}{humanize.Bytes(data[0].Repo.Size), fmt.Sprint(data[0].Repo.Count, " layers")})

	bd.cache[image+tag] = buf.String()
	fmt.Fprint(w, buf.String())
}