コード例 #1
0
ファイル: scan.go プロジェクト: yosiat/cockroach
func (n *scanNode) unmarshalValue(kv client.KeyValue) (parser.Datum, bool) {
	kind, ok := n.colKind[n.colID]
	if !ok {
		n.err = fmt.Errorf("column-id \"%d\" does not exist", n.colID)
		return nil, false
	}
	if kv.Exists() {
		switch kind {
		case ColumnType_INT:
			return parser.DInt(kv.ValueInt()), true
		case ColumnType_BOOL:
			return parser.DBool(kv.ValueInt() != 0), true
		case ColumnType_FLOAT:
			return parser.DFloat(math.Float64frombits(uint64(kv.ValueInt()))), true
		case ColumnType_STRING, ColumnType_BYTES:
			return parser.DString(kv.ValueBytes()), true
		case ColumnType_DATE:
			var t time.Time
			if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil {
				return nil, false
			}
			return parser.DDate{Time: t}, true
		case ColumnType_TIMESTAMP:
			var t time.Time
			if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil {
				return nil, false
			}
			return parser.DTimestamp{Time: t}, true
		case ColumnType_INTERVAL:
			return parser.DInterval{Duration: time.Duration(kv.ValueInt())}, true
		}
	}
	return parser.DNull, true
}
コード例 #2
0
ファイル: virtualmtime.go プロジェクト: kattunga/syncthing
func (r *VirtualMtimeRepo) GetMtime(path string, diskMtime time.Time) time.Time {
	data, exists := r.ns.Bytes(path)
	if !exists {
		// Absense of debug print is significant enough in itself here
		return diskMtime
	}

	var mtime time.Time
	if err := mtime.UnmarshalBinary(data[:len(data)/2]); err != nil {
		panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err))
	}

	if mtime.Equal(diskMtime) {
		if err := mtime.UnmarshalBinary(data[len(data)/2:]); err != nil {
			panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err))
		}

		if debug {
			l.Debugf("virtual mtime: return %v instead of %v for path: %s", mtime, diskMtime, path)
		}
		return mtime
	}

	if debug {
		l.Debugf("virtual mtime: record exists, but mismatch inDisk: %v dbDisk: %v for path: %s", diskMtime, mtime, path)
	}
	return diskMtime
}
コード例 #3
0
ファイル: redis.go プロジェクト: gofmt/oauth2
// Returns a storage session
func (s *RedisStorage) LoadAccess(code string) (*osin.AccessData, error) {
	logs.Debug("LoadAccess: %s", code)

	d_map, err := s.client.HGetAllMap(code).Result()
	if len(d_map) == 0 || err != nil {
		return nil, errors.New("Authorize not found")
	}

	client, err := s.getClient(d_map["client"])
	if err != nil {
		return nil, err
	}
	expires_in, _ := strconv.Atoi(d_map["expires_in"])
	created_at := new(time.Time)
	created_at.UnmarshalBinary([]byte(d_map["created_at"]))

	d := &osin.AccessData{
		Client:      client,
		ExpiresIn:   int32(expires_in),
		Scope:       d_map["scope"],
		RedirectUri: d_map["redirect_uri"],
		CreatedAt:   *created_at,
		UserData:    d_map["user_data"],
	}
	return d, nil
}
コード例 #4
0
ファイル: persistence.go プロジェクト: nubunto/vise
func CheckExpiration() error {
	return db.Update(func(tx *bolt.Tx) error {
		files := tx.Bucket([]byte("files"))
		filesCursor := files.Cursor()
		for fileToken, _ := filesCursor.First(); fileToken != nil; fileToken, _ = filesCursor.Next() {
			fileBucket := files.Bucket(fileToken)
			log.Println("Scanning file token", string(fileToken))

			possibleDeletionDate := time.Now()

			var deleteDate time.Time
			err := deleteDate.UnmarshalBinary(fileBucket.Get([]byte("delete-date")))
			if err != nil {
				log.Println("Delete date unmarshal error:", err)
				continue
			}
			d, m, y := possibleDeletionDate.Date()
			dd, mm, yy := deleteDate.Date()
			if d == dd && m == mm && y == yy {
				log.Println("Removing file...")
				err = os.RemoveAll(path.Join(uppath.UploadedPath, string(fileToken)))
				if err != nil {
					log.Println("Failed to remove file:", fileToken, " -", err)
					continue
				}
				files.DeleteBucket(fileToken)
			}
		}
		return nil
	})
}
コード例 #5
0
ファイル: directory.go プロジェクト: disorganizer/brig
func (d *Directory) FromProto(pnd *wire.Node) error {
	pbd := pnd.Directory

	modTime := time.Time{}
	if err := modTime.UnmarshalBinary(pnd.ModTime); err != nil {
		return err
	}

	d.id = pnd.ID
	d.modTime = modTime
	d.parent = pbd.Parent
	d.size = pnd.NodeSize
	d.hash = &Hash{pnd.Hash}
	d.name = pnd.Name
	d.children = make(map[string]*Hash)

	// Be cautious, input might come from everywhere:
	links := pbd.Links
	if len(pbd.Names) != len(links) {
		return fmt.Errorf("Malformed input: More or less names than links in `%s`", d.name)
	}

	// Find our place in the world:
	for idx, name := range pbd.Names {
		d.children[name] = &Hash{links[idx]}
	}

	return nil
}
コード例 #6
0
ファイル: datetime.go プロジェクト: spkg/local
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (dt *DateTime) UnmarshalBinary(data []byte) error {
	var t time.Time
	if err := t.UnmarshalBinary(data); err != nil {
		return err
	}
	*dt = DateTimeFromTime(t)
	return nil
}
コード例 #7
0
ファイル: wire_format.go プロジェクト: cs2dsb/udp-sd
func DeserializeTime(chunk *Chunk, out *time.Time) error {
	timeField, err := UnwrapField(chunk)
	if err != nil {
		return err
	}

	err = out.UnmarshalBinary(timeField)
	return err
}
コード例 #8
0
ファイル: icmp.go プロジェクト: jwendel/ping
func (p *icmpEcho) Decode() (time.Time, error) {
	t := time.Time{}
	err := t.UnmarshalBinary(p.Data[:15])
	if err != nil {
		fmt.Printf("icmpEcho.UnmarshalBinary problem: %v\n", err)
	}

	return t, err
}
コード例 #9
0
ファイル: namespaced.go プロジェクト: kattunga/syncthing
// Time returns the stored value interpreted as a time.Time and a boolean
// that is false if no value was stored at the key.
func (n NamespacedKV) Time(key string) (time.Time, bool) {
	var t time.Time
	keyBs := append(n.prefix, []byte(key)...)
	valBs, err := n.db.Get(keyBs, nil)
	if err != nil {
		return t, false
	}
	err = t.UnmarshalBinary(valBs)
	return t, err == nil
}
コード例 #10
0
ファイル: device.go プロジェクト: qbit/syncthing
func (s *DeviceStatisticsReference) GetLastSeen() time.Time {
	value, err := s.db.Get(s.key(deviceStatisticTypeLastSeen), nil)
	if err != nil {
		if err != leveldb.ErrNotFound {
			l.Warnln("DeviceStatisticsReference: Failed loading last seen value for", s.device, ":", err)
		}
		return time.Unix(0, 0)
	}

	rtime := time.Time{}
	err = rtime.UnmarshalBinary(value)
	if err != nil {
		l.Warnln("DeviceStatisticsReference: Failed parsing last seen value for", s.device, ":", err)
		return time.Unix(0, 0)
	}
	if debug {
		l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, rtime)
	}
	return rtime
}
コード例 #11
0
ファイル: file.go プロジェクト: disorganizer/brig
func (f *File) FromProto(pnd *wire.Node) error {
	pfi := pnd.File
	if pfi == nil {
		return fmt.Errorf("File attribute is empty. This is likely not a real file.")
	}

	modTime := time.Time{}
	if err := modTime.UnmarshalBinary(pnd.ModTime); err != nil {
		return err
	}

	f.id = pnd.ID
	f.size = pnd.NodeSize
	f.modTime = modTime
	f.hash = &Hash{pnd.Hash}
	f.parent = pfi.Parent
	f.name = pnd.Name
	f.key = pfi.Key
	return nil
}
コード例 #12
0
ファイル: persistence.go プロジェクト: nubunto/vise
func Inspect() ([]interface{}, error) {
	ret := make([]interface{}, 0)
	err := db.View(func(tx *bolt.Tx) error {
		files := tx.Bucket([]byte("files"))
		files.ForEach(func(fileToken, _ []byte) error {
			fileInfo := files.Bucket(fileToken)
			days, _ := strconv.Atoi(string(fileInfo.Get([]byte("expires-in"))))
			var creationTime, deleteDate time.Time
			var err error
			err = creationTime.UnmarshalBinary(fileInfo.Get([]byte("creation-time")))
			if err != nil {
				log.Println("Created date unmarshal error:", err)
			}
			err = deleteDate.UnmarshalBinary(fileInfo.Get([]byte("delete-date")))
			if err != nil {
				log.Println("Delete date unmarshal error:", err)
			}
			data := struct {
				Days       int
				Filename   string
				Created    string
				DeleteDate string
			}{
				days,
				string(fileInfo.Get([]byte("filename"))),
				creationTime.String(),
				deleteDate.String(),
			}
			ret = append(ret, data)
			return nil
		})
		return nil
	})
	if err != nil {
		return nil, err
	}
	return ret, nil
}
コード例 #13
0
ファイル: type.go プロジェクト: rod6/rodis
func parseMetadata(metadata []byte) (byte, *time.Time, error) {
	if len(metadata) < 2 {
		return None, nil, ErrMetaFormat
	}
	if metadata[0] != MetaVersion {
		return None, nil, ErrMetaFormat
	}

	tipe := metadata[1] & 0x0F // lower 4 bits of metadata[1] is type
	hasExpire := metadata[1]&byte(0xF0) == byte(0x10)

	if !hasExpire {
		return tipe, nil, nil
	}

	var expireAt time.Time
	err := expireAt.UnmarshalBinary(metadata[2:])
	if err != nil {
		return None, nil, ErrMetaFormat
	}

	return tipe, &expireAt, nil
}
コード例 #14
0
ファイル: main.go プロジェクト: sjn1978/go-fuzz
func Fuzz(data []byte) int {
	var t time.Time
	if err := t.UnmarshalText(data); err != nil {
		return 0
	}
	data1, err := t.MarshalText()
	if err != nil {
		panic(err)
	}
	var t1 time.Time
	if err := t1.UnmarshalText(data1); err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t1) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t1: %#v\n", t1)
		panic("bad MarshalText")
	}

	data2, err := t.GobEncode()
	if err != nil {
		panic(err)
	}
	var t2 time.Time
	if err := t2.GobDecode(data2); err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t2) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t2: %#v\n", t2)
		panic("bad GobEncode")
	}

	data3, err := t.MarshalBinary()
	if err != nil {
		panic(err)
	}
	var t3 time.Time
	if err := t3.UnmarshalBinary(data3); err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t3) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t3: %#v\n", t3)
		panic("bad MarshalBinary")
	}

	data4, err := t.MarshalJSON()
	if err != nil {
		panic(err)
	}
	var t4 time.Time
	if err := t4.UnmarshalJSON(data4); err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t4) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t4: %#v\n", t4)
		panic("bad MarshalJSON")
	}

	data5, err := t.MarshalText()
	if err != nil {
		panic(err)
	}
	var t5 time.Time
	if err := t5.UnmarshalText(data5); err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t5) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t5: %#v\n", t5)
		panic("bad MarshalText")
	}

	data6 := t.Format(time.RFC3339Nano)
	t6, err := time.Parse(time.RFC3339Nano, data6)
	if err != nil {
		panic(err)
	}
	if !fuzz.DeepEqual(t, t6) {
		fmt.Printf("t0: %#v\n", t)
		fmt.Printf("t6: %#v\n", t6)
		panic("bad Format")
	}
	return 1
}
コード例 #15
0
ファイル: main.go プロジェクト: pjvds/edgy
func main() {
	flag.Parse()
	tidy.Configure().LogFromLevelSpecifiedByEnvironment().To(tidy.Console).MustBuildDefault()

	app := cli.NewApp()
	app.Name = "edgy command line interface"
	app.Commands = []cli.Command{
		cli.Command{
			Name: "publish",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:   "hosts",
					Value:  "localhost:5050",
					EnvVar: "EDGY_HOSTS",
				},
				cli.StringFlag{
					Name:  "topic",
					Value: "",
				},
				cli.StringFlag{
					Name:  "key",
					Value: "",
				},
				cli.StringFlag{
					Name:  "payload",
					Value: "",
				},
			},
			Action: func(ctx *cli.Context) {
				hosts := ctx.String("hosts")
				topic := ctx.String("topic")
				key := ctx.String("key")
				payload := ctx.String("payload")

				if len(hosts) == 0 {
					fmt.Fprintf(os.Stderr, "missing hosts")
					return
				}
				if len(topic) == 0 {
					fmt.Fprintf(os.Stderr, "missing topic")
					return
				}
				if len(key) == 0 {
					fmt.Fprintf(os.Stderr, "missing partition key")
					return
				}
				if len(payload) == 0 {
					fmt.Fprintf(os.Stderr, "missing payload")
					return
				}

				if payload == "-" {
					stdin, err := ioutil.ReadAll(os.Stdin)
					if err != nil {
						fmt.Fprintf(os.Stderr, "failed to read payload from stdin")
						return
					}
					if len(stdin) == 0 {
						fmt.Fprintf(os.Stderr, "empty payload from stdin")
						return

					}
					payload = string(stdin)
				}

				builder, err := client.NewCluster().FromHosts(hosts)
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				cluster, err := builder.Build()
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				producer, err := client.NewProducer(cluster, client.ProducerConfig{
					QueueTime: 0,
					QueueSize: 1,
				})
				if err != nil {
					println("failed to create producer: " + err.Error())
					return
				}

				if err := producer.Append(topic, int(xxhash.Checksum32([]byte(key))), []byte(payload)).Wait(); err != nil {
					println("failed: " + err.Error())
				}
			},
		},
		cli.Command{
			Name: "writebench",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:   "hosts",
					Value:  "localhost:5050",
					EnvVar: "EDGY_HOSTS",
				},
				cli.StringFlag{
					Name:  "topic",
					Value: "writebench",
				},
				cli.IntFlag{
					Name:  "num",
					Value: 1e6,
				},
				cli.IntFlag{
					Name:  "size",
					Value: 50,
				},
				cli.IntFlag{
					Name:  "queue.size",
					Value: 1000,
				},
				cli.DurationFlag{
					Name:  "queue.time",
					Value: 50 * time.Millisecond,
				},
			},
			Action: func(ctx *cli.Context) {
				hosts := ctx.String("hosts")
				num := ctx.Int("num")
				topic := ctx.String("topic")
				size := ctx.Int("size")
				queueSize := ctx.Int("queue.size")
				queueTime := ctx.Duration("queue.time")

				builder, err := client.NewCluster().FromHosts(hosts)
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				cluster, err := builder.Build()
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				producer, err := client.NewProducer(cluster, client.ProducerConfig{
					QueueTime: queueTime,
					QueueSize: queueSize,
				})
				if err != nil {
					println("failed to create producer: " + err.Error())
					return
				}

				work := sync.WaitGroup{}
				work.Add(num)

				started := time.Now()
				payload := randombytes.Make(size)

				for n := 0; n < num; n++ {
					result := producer.Append(topic, n, payload)

					go func(result client.AppendResult) {
						result.Wait()
						work.Done()
					}(result)
				}

				elapsed := time.Now().Sub(started)
				msgsPerSecond := float64(num) / elapsed.Seconds()
				totalMb := float64(num*size) / (1e6)

				fmt.Printf("run time: %v\n", elapsed)
				fmt.Printf("total msgs: %v\n", num)
				fmt.Printf("msgs/s: %v\n", msgsPerSecond)
				fmt.Printf("total transfered: %v\n", totalMb)
				fmt.Printf("MB/s: %v\n", totalMb/elapsed.Seconds())
				fmt.Printf("done!")
			},
		},

		cli.Command{
			Name: "latency-single",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:   "hosts",
					Value:  "localhost:5050",
					EnvVar: "EDGY_HOSTS",
				},
				cli.StringFlag{
					Name:  "topic",
					Value: "latencybench",
				},
				cli.StringFlag{
					Name:  "payload",
					Value: "foobar",
				},
				cli.IntFlag{
					Name:  "queue.size",
					Value: 1,
				},
				cli.DurationFlag{
					Name:  "queue.time",
					Value: 0,
				},
				cli.DurationFlag{
					Name:  "timeout",
					Value: 10 * time.Second,
				},
			},
			Action: func(ctx *cli.Context) {
				hosts := ctx.String("hosts")
				topic := ctx.String("topic")
				queueSize := ctx.Int("queue.size")
				queueTime := ctx.Duration("queue.time")
				timeout := ctx.Duration("timeout")
				histogram := hdrhistogram.New(0, time.Minute.Nanoseconds(), 5)

				builder, err := client.NewCluster().FromHosts(hosts)
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				cluster, err := builder.Build()
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				producer, err := client.NewProducer(cluster, client.ProducerConfig{
					QueueTime: queueTime,
					QueueSize: queueSize,
				})
				if err != nil {
					println("failed to create producer: " + err.Error())
					return
				}
				consumer, err := cluster.Consume(true, topic)
				statsTimer := time.NewTicker(time.Second)

				for {
					timeoutChannel := time.After(timeout)

					id := []byte(uuid.New())
					appendedAt := time.Now()

					producer.Append(topic, 0, id)

					select {
					case messages := <-consumer.Messages():
						for _, rawMessage := range messages.Messages.Messages() {
							if bytes.Equal(id, rawMessage[storage.HEADER_LENGTH:]) {
								latency := time.Since(appendedAt)
								println(latency.String())

								histogram.RecordValue(latency.Nanoseconds() / 1e6)
								continue
							}
						}
					case <-statsTimer.C:
						fmt.Printf("p95: %vms\n", histogram.ValueAtQuantile(95))
						fmt.Printf("p50: %vms\n", histogram.ValueAtQuantile(50))
						fmt.Printf("mean: %vms\n", histogram.Mean())
						fmt.Printf("min: %vms\n", histogram.Min())
						fmt.Printf("max: %vms\n", histogram.Max())
						fmt.Printf("count: %v\n", histogram.TotalCount())
					case <-timeoutChannel:
						println("timeout")
					}
				}
			},
		},

		cli.Command{
			Name: "latency-stream",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:   "hosts",
					Value:  "localhost:5050",
					EnvVar: "EDGY_HOSTS",
				},
				cli.StringFlag{
					Name:  "topic",
					Value: "latencybench",
				},
				cli.StringFlag{
					Name:  "payload",
					Value: "foobar",
				},
				cli.IntFlag{
					Name:  "queue.size",
					Value: 1000,
				},
				cli.DurationFlag{
					Name:  "queue.time",
					Value: 50 * time.Millisecond,
				},
			},
			Action: func(ctx *cli.Context) {
				hosts := ctx.String("hosts")
				topic := ctx.String("topic")
				queueSize := ctx.Int("queue.size")
				queueTime := ctx.Duration("queue.time")

				builder, err := client.NewCluster().FromHosts(hosts)
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				cluster, err := builder.Build()
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				producer, err := client.NewProducer(cluster, client.ProducerConfig{
					QueueTime: queueTime,
					QueueSize: queueSize,
				})
				if err != nil {
					println("failed to create producer: " + err.Error())
					return
				}
				consumer, err := cluster.Consume(true, topic)

				go func() {
					for {
						payload, _ := time.Now().UTC().MarshalBinary()
						producer.Append(topic, 0, payload)
					}
				}()

				timeInMessage := new(time.Time)
				timer := time.NewTicker(1 * time.Second)
				histogram := hdrhistogram.New(0, time.Minute.Nanoseconds(), 5)

				for {
					select {
					case messages := <-consumer.Messages():
						for _, rawMessage := range messages.Messages.Messages() {
							payload := rawMessage[storage.HEADER_LENGTH:]
							if err := timeInMessage.UnmarshalBinary(payload); err != nil {
								fmt.Printf("skipping message: %v\n", err.Error())
								continue
							}

							histogram.RecordValue(time.Since(*timeInMessage).Nanoseconds() / 1e6)
						}
					case <-timer.C:
						fmt.Printf("p95: %vms\n", histogram.ValueAtQuantile(0.95))
						fmt.Printf("p50: %vms\n", histogram.ValueAtQuantile(0.50))
						fmt.Printf("mean: %vms\n", histogram.Mean())
						fmt.Printf("min: %vms\n", histogram.Min())
						fmt.Printf("max: %vms\n", histogram.Max())
						fmt.Printf("count: %v\n", histogram.TotalCount())
					}
				}
			},
		},

		cli.Command{
			Name: "consume",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:   "hosts",
					Value:  "localhost:5050",
					EnvVar: "EDGY_HOSTS",
				},
				cli.StringFlag{
					Name:  "topics",
					Value: "writebench",
				},
				cli.BoolFlag{
					Name: "devnull",
				},
				cli.BoolFlag{
					Name: "continuous",
				},
			},
			Action: func(ctx *cli.Context) {
				hosts := ctx.String("hosts")
				topics := ctx.String("topics")
				devnull := ctx.Bool("devnull")
				continuous := ctx.Bool("continuous")

				builder, err := client.NewCluster().FromHosts(hosts)
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				cluster, err := builder.Build()
				if err != nil {
					fmt.Printf("cannot build cluster: %v\n", err.Error())
					return
				}

				consumer, err := cluster.Consume(continuous, strings.Split(topics, ",")...)
				if err != nil {
					println(err)
					return
				}

				var messageCounter, byteCounter int64
				startedAt := time.Now()

				for message := range consumer.Messages() {

					if !devnull {
						for _, rawMessage := range message.Messages.Messages() {
							value := string(rawMessage[storage.HEADER_LENGTH:])
							fmt.Fprintln(os.Stdout, value)
						}
					}

					messageCounter += int64(message.Messages.MessageCount())
					byteCounter += message.Messages.DataLen64()
				}

				elapsed := time.Since(startedAt)
				msgsPerSecond := float64(messageCounter) / elapsed.Seconds()
				totalMb := float64(byteCounter) / (1e6)

				fmt.Fprintf(os.Stderr, "run time: %v\n", elapsed)
				fmt.Fprintf(os.Stderr, "total msgs: %v\n", messageCounter)
				fmt.Fprintf(os.Stderr, "msgs/s: %f\n", msgsPerSecond)
				fmt.Fprintf(os.Stderr, "total transfered: %vmb\n", totalMb)
				fmt.Fprintf(os.Stderr, "MB/s: %v\n", totalMb/elapsed.Seconds())
				fmt.Fprintf(os.Stderr, "done!")
			},
		},
	}

	app.Run(os.Args)
}
コード例 #16
0
ファイル: dump.go プロジェクト: letiemble/syncthing
func dump(ldb *db.Instance) {
	it := ldb.NewIterator(nil, nil)
	for it.Next() {
		key := it.Key()
		switch key[0] {
		case db.KeyTypeDevice:
			folder := binary.BigEndian.Uint32(key[1:])
			device := binary.BigEndian.Uint32(key[1+4:])
			name := nulString(key[1+4+4:])
			fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)

			var f protocol.FileInfo
			err := f.Unmarshal(it.Value())
			if err != nil {
				log.Fatal(err)
			}
			fmt.Printf(" V:%v\n", f)

		case db.KeyTypeGlobal:
			folder := binary.BigEndian.Uint32(key[1:])
			name := nulString(key[1+4:])
			var flv db.VersionList
			flv.Unmarshal(it.Value())
			fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)

		case db.KeyTypeBlock:
			folder := binary.BigEndian.Uint32(key[1:])
			hash := key[1+4 : 1+4+32]
			name := nulString(key[1+4+32:])
			fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))

		case db.KeyTypeDeviceStatistic:
			fmt.Printf("[dstat] K:%x V:%x\n", it.Key(), it.Value())

		case db.KeyTypeFolderStatistic:
			fmt.Printf("[fstat] K:%x V:%x\n", it.Key(), it.Value())

		case db.KeyTypeVirtualMtime:
			folder := binary.BigEndian.Uint32(key[1:])
			name := nulString(key[1+4:])
			val := it.Value()
			var real, virt time.Time
			real.UnmarshalBinary(val[:len(val)/2])
			virt.UnmarshalBinary(val[len(val)/2:])
			fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)

		case db.KeyTypeFolderIdx:
			key := binary.BigEndian.Uint32(it.Key()[1:])
			fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())

		case db.KeyTypeDeviceIdx:
			key := binary.BigEndian.Uint32(it.Key()[1:])
			val := it.Value()
			if len(val) == 0 {
				fmt.Printf("[deviceidx] K:%d V:<nil>\n", key)
			} else {
				dev := protocol.DeviceIDFromBytes(val)
				fmt.Printf("[deviceidx] K:%d V:%s\n", key, dev)
			}

		default:
			fmt.Printf("[???]\n  %x\n  %x\n", it.Key(), it.Value())
		}
	}
}
コード例 #17
0
ファイル: commit.go プロジェクト: disorganizer/brig
func (cm *Commit) FromProto(pnd *wire.Node) error {
	pcm := pnd.Commit
	if pcm == nil {
		return fmt.Errorf("No commit attr in protobuf. Probably not a commit.")
	}

	author := &Author{}
	if err := author.FromProto(pcm.Author); err != nil {
		return err
	}

	modTime := time.Time{}
	if err := modTime.UnmarshalBinary(pnd.ModTime); err != nil {
		return err
	}

	hash, err := multihash.Cast(pnd.Hash)
	if err != nil {
		return err
	}

	root, err := multihash.Cast(pcm.Root)
	if err != nil {
		return err
	}

	var parent multihash.Multihash
	if len(pcm.Parent) > 0 {
		parent, err = multihash.Cast(pcm.Parent)
		if err != nil {
			return err
		}
	}

	var changeset []*CheckpointLink

	for _, pcl := range pcm.Changeset {
		cl := &CheckpointLink{}
		if err := cl.FromProto(pcl); err != nil {
			return err
		}

		changeset = append(changeset, cl)
	}

	protoMergeInfo := pcm.Merge
	if protoMergeInfo != nil {
		mergeInfo := &Merge{}
		if err := mergeInfo.FromProto(protoMergeInfo); err != nil {
			return err
		}

		cm.merge = mergeInfo
	}

	// Set commit data if everything worked:
	cm.id = pnd.ID
	cm.message = pcm.Message
	cm.author = author
	cm.modTime = modTime
	cm.hash = &Hash{hash}
	cm.root = &Hash{root}

	if parent != nil {
		cm.parent = &Hash{parent}
	}
	return nil
}
コード例 #18
0
ファイル: constant.go プロジェクト: mc0/okq
func main() {
	flag.Parse()
	agg.CreateInterrupt(1)
	n := runtime.NumCPU()
	runtime.GOMAXPROCS(n)
	okq.Debug = true

	queueCh := make(chan string)
	qs := make([]string, 10)
	for i := range qs {
		qs[i] = randString()
	}
	go func() {
		for {
			for i := range qs {
				queueCh <- qs[i]
			}
		}
	}()

	triggerJobCh := make(chan bool, n*10)
	for i := 0; i < n*10; i++ {
		triggerJobCh <- true
	}

	pushFlag := okq.Normal
	if *noBlock {
		log.Println("using NOBLOCK")
		pushFlag = okq.NoBlock
	}
	for i := 0; i < n; i++ {
		go func() {
			cl := okq.New(*addr)
			for range triggerJobCh {
				eventB, err := time.Now().MarshalBinary()
				if err != nil {
					log.Fatal(err)
				}
				err = cl.Push(<-queueCh, string(eventB), pushFlag)
				if err != nil {
					log.Fatal(err)
				}
			}
		}()
	}

	fn := func(e *okq.Event) bool {
		eventB := []byte(e.Contents)
		var then time.Time
		if err := then.UnmarshalBinary(eventB); err != nil {
			log.Fatal(err)
		}
		agg.Agg("event", time.Since(then).Seconds())
		triggerJobCh <- true
		return true
	}

	var chwg sync.WaitGroup
	for i := 0; i < n; i++ {
		chwg.Add(1)
		go func() {
			cl := okq.New(*addr)
			err := cl.Consumer(fn, stopCh, qs...)
			if err != nil {
				log.Fatalf("got error consuming: %s", err)
			}
		}()
	}
	chwg.Wait()
}