예제 #1
0
파일: delete.go 프로젝트: keryoo/go-hbase
func (this *Delete) toProto() pb.Message {
	d := &proto.MutationProto{
		Row:        this.key,
		MutateType: proto.MutationProto_DELETE.Enum(),
	}

	for i, v := range this.families {
		cv := &proto.MutationProto_ColumnValue{
			Family:         v,
			QualifierValue: make([]*proto.MutationProto_ColumnValue_QualifierValue, 0),
		}

		if len(this.qualifiers[i]) == 0 {
			cv.QualifierValue = append(cv.QualifierValue, &proto.MutationProto_ColumnValue_QualifierValue{
				Qualifier:  nil,
				Timestamp:  pb.Uint64(uint64(math.MaxInt64)),
				DeleteType: proto.MutationProto_DELETE_FAMILY.Enum(),
			})
		}

		for _, v := range this.qualifiers[i] {
			cv.QualifierValue = append(cv.QualifierValue, &proto.MutationProto_ColumnValue_QualifierValue{
				Qualifier:  v,
				Timestamp:  pb.Uint64(uint64(math.MaxInt64)),
				DeleteType: proto.MutationProto_DELETE_MULTIPLE_VERSIONS.Enum(),
			})
		}

		d.ColumnValue = append(d.ColumnValue, cv)
	}

	return d
}
예제 #2
0
// Encodes the SnapshotRecoveryRequest to a buffer. Returns the number of bytes
// written and any error that may have occurred.
func (req *SnapshotRecoveryRequest) Encode(w io.Writer) (int, error) {

	protoPeers := make([]*protobuf.SnapshotRecoveryRequest_Peer, len(req.Peers))

	for i, peer := range req.Peers {
		protoPeers[i] = &protobuf.SnapshotRecoveryRequest_Peer{
			Name:             proto.String(peer.Name),
			ConnectionString: proto.String(peer.ConnectionString),
		}
	}

	pb := &protobuf.SnapshotRecoveryRequest{
		LeaderName: proto.String(req.LeaderName),
		LastIndex:  proto.Uint64(req.LastIndex),
		LastTerm:   proto.Uint64(req.LastTerm),
		Peers:      protoPeers,
		State:      req.State,
	}
	p, err := proto.Marshal(pb)
	if err != nil {
		return -1, err
	}

	return w.Write(p)
}
예제 #3
0
func (s *GameMasterService) FindGame(body []byte) ([]byte, error) {
	req := &game_master_service.FindGameRequest{}
	proto.Unmarshal(body, req)
	token := s.sess.receivedToken
	fmt.Println(req.String())
	advNotification := false
	if req.AdvancedNotification != nil {
		advNotification = *req.AdvancedNotification
	}
	player := req.Player[0]
	notify := NewNotification(NotifyFindGameRequest, map[string]interface{}{
		"advanced_notification": advNotification,
	})
	// TODO: care about game_properties and other stuff
	notify.Attributes = append(notify.Attributes, player.Attribute...)
	s.sess.OnceNotified(NotifyFindGameResponse, func(n *Notification) {
		m := n.Map()
		res := &game_master_service.FindGameResponse{}
		res.Queued = proto.Bool(m["queued"].(bool))
		res.RequestId = proto.Uint64(m["requestId"].(uint64))
		res.FactoryId = proto.Uint64(0)
		buf, err := proto.Marshal(res)
		if err != nil {
			panic(err)
		}
		s.sess.Respond(token, buf)
	})
	s.sess.ServerNotifications <- notify
	return nil, nil
}
예제 #4
0
func (bw *BlockWriter) writeBlockWriteRequest(w io.Writer) error {
	targets := bw.currentPipeline()[1:]

	op := &hdfs.OpWriteBlockProto{
		Header: &hdfs.ClientOperationHeaderProto{
			BaseHeader: &hdfs.BaseHeaderProto{
				Block: bw.block.GetB(),
				Token: bw.block.GetBlockToken(),
			},
			ClientName: proto.String(bw.clientName),
		},
		Targets:               targets,
		Stage:                 bw.currentStage().Enum(),
		PipelineSize:          proto.Uint32(uint32(len(targets))),
		MinBytesRcvd:          proto.Uint64(bw.block.GetB().GetNumBytes()),
		MaxBytesRcvd:          proto.Uint64(uint64(bw.offset)), // I don't understand these two fields
		LatestGenerationStamp: proto.Uint64(uint64(bw.generationTimestamp())),
		RequestedChecksum: &hdfs.ChecksumProto{
			Type:             hdfs.ChecksumTypeProto_CHECKSUM_CRC32.Enum(),
			BytesPerChecksum: proto.Uint32(outboundChunkSize),
		},
	}

	return writeBlockOpRequest(w, writeBlockOp, op)
}
예제 #5
0
// Update the datastore with the user's current state.
func (server *Server) UpdateFrozenUser(client *Client, state *mumbleproto.UserState) {
	// Full sync If there's no userstate messgae provided, or if there is one, and
	// it includes a registration operation.
	user := client.user
	nanos := time.Now().Unix()
	if state == nil || state.UserId != nil {
		fu, err := user.Freeze()
		if err != nil {
			server.Fatal(err)
		}
		fu.LastActive = proto.Uint64(uint64(nanos))
		err = server.freezelog.Put(fu)
		if err != nil {
			server.Fatal(err)
		}
	} else {
		fu := &freezer.User{}
		fu.Id = proto.Uint32(user.Id)
		if state.ChannelId != nil {
			fu.LastChannelId = proto.Uint32(uint32(client.Channel.Id))
		}
		if state.TextureHash != nil {
			fu.TextureBlob = proto.String(user.TextureBlob)
		}
		if state.CommentHash != nil {
			fu.CommentBlob = proto.String(user.CommentBlob)
		}
		fu.LastActive = proto.Uint64(uint64(nanos))
		err := server.freezelog.Put(fu)
		if err != nil {
			server.Fatal(err)
		}
	}
	server.numLogOps += 1
}
예제 #6
0
파일: client.go 프로젝트: nevun/pond
// usageString returns a description of the amount of space taken up by a body
// with the given contents and a bool indicating overflow.
func (draft *Draft) usageString() (string, bool) {
	var replyToId *uint64
	if draft.inReplyTo != 0 {
		replyToId = proto.Uint64(1)
	}
	var dhPub [32]byte

	msg := &pond.Message{
		Id:               proto.Uint64(0),
		Time:             proto.Int64(1 << 62),
		Body:             []byte(draft.body),
		BodyEncoding:     pond.Message_RAW.Enum(),
		InReplyTo:        replyToId,
		MyNextDh:         dhPub[:],
		Files:            draft.attachments,
		DetachedFiles:    draft.detachments,
		SupportedVersion: proto.Int32(protoVersion),
	}

	serialized, err := proto.Marshal(msg)
	if err != nil {
		panic("error while serialising candidate Message: " + err.Error())
	}

	s := fmt.Sprintf("%s of %s bytes", prettyNumber(uint64(len(serialized))), prettyNumber(pond.MaxSerializedMessage))
	return s, len(serialized) > pond.MaxSerializedMessage
}
예제 #7
0
파일: themis_rpc.go 프로젝트: yzl11/vessel
func (rpc *themisRPC) commitRow(tbl, row []byte, mutations []*columnMutation,
	prewriteTs, commitTs uint64, primaryOffset int) error {
	req := &ThemisCommitRequest{}
	req.ThemisCommit = &ThemisCommit{
		Row:          row,
		PrewriteTs:   pb.Uint64(prewriteTs),
		CommitTs:     pb.Uint64(commitTs),
		PrimaryIndex: pb.Int(primaryOffset),
	}

	for _, m := range mutations {
		req.ThemisCommit.Mutations = append(req.ThemisCommit.Mutations, m.toCell())
	}
	var res ThemisCommitResponse
	err := rpc.call("commitRow", tbl, row, req, &res)
	if err != nil {
		return errors.Trace(err)
	}
	ok := res.GetResult()
	if !ok {
		if primaryOffset == -1 {
			return errors.Errorf("commit secondary failed, tbl: %s row: %q ts: %d", tbl, row, commitTs)
		}
		return errors.Errorf("commit primary failed, tbl: %s row: %q ts: %d", tbl, row, commitTs)
	}
	return nil
}
예제 #8
0
파일: get.go 프로젝트: yzl11/vessel
func (g *Get) ToProto() pb.Message {
	get := &proto.Get{
		Row: g.Row,
	}

	if g.TsRangeFrom != 0 && g.TsRangeTo != 0 && g.TsRangeFrom <= g.TsRangeTo {
		get.TimeRange = &proto.TimeRange{
			From: pb.Uint64(g.TsRangeFrom),
			To:   pb.Uint64(g.TsRangeTo),
		}
	}

	for v, _ := range g.Families {
		col := &proto.Column{
			Family: []byte(v),
		}
		var quals [][]byte
		for qual, _ := range g.FamilyQuals[v] {
			quals = append(quals, []byte(qual))
		}
		col.Qualifier = quals
		get.Column = append(get.Column, col)
	}
	get.MaxVersions = pb.Uint32(uint32(g.Versions))
	return get
}
예제 #9
0
파일: lock.go 프로젝트: anywhy/tidb
// If key == nil then only commit but value is nil
func (l *txnLock) commitThenGet(commitVersion uint64) ([]byte, error) {
	req := &pb.Request{
		Type: pb.MessageType_CmdCommitThenGet.Enum(),
		CmdCommitGetReq: &pb.CmdCommitThenGetRequest{
			Key:           l.key,
			LockVersion:   proto.Uint64(l.pl.version),
			CommitVersion: proto.Uint64(commitVersion),
			GetVersion:    proto.Uint64(l.ver),
		},
	}
	var backoffErr error
	for backoff := regionMissBackoff(); backoffErr == nil; backoffErr = backoff() {
		region, err := l.store.regionCache.GetRegion(l.key)
		if err != nil {
			return nil, errors.Trace(err)
		}
		resp, err := l.store.SendKVReq(req, region.VerID())
		if err != nil {
			return nil, errors.Trace(err)
		}
		if regionErr := resp.GetRegionError(); regionErr != nil {
			continue
		}
		cmdCommitGetResp := resp.GetCmdCommitGetResp()
		if cmdCommitGetResp == nil {
			return nil, errors.Trace(errBodyMissing)
		}
		if keyErr := cmdCommitGetResp.GetError(); keyErr != nil {
			return nil, errors.Errorf("unexpected commit err: %s", keyErr.String())
		}
		return cmdCommitGetResp.GetValue(), nil
	}
	return nil, errors.Annotate(backoffErr, txnRetryableMark)
}
예제 #10
0
파일: network.go 프로젝트: carriercomm/pond
func (c *client) sendDraft(draft *Draft) (uint64, time.Time, error) {
	to := c.contacts[draft.to]

	// Zero length bodies are ACKs.
	if len(draft.body) == 0 {
		draft.body = " "
	}

	id := c.randId()
	created := c.Now()
	message := &pond.Message{
		Id:               proto.Uint64(id),
		Time:             proto.Int64(created.Unix()),
		Body:             []byte(draft.body),
		BodyEncoding:     pond.Message_RAW.Enum(),
		Files:            draft.attachments,
		DetachedFiles:    draft.detachments,
		SupportedVersion: proto.Int32(protoVersion),
	}

	if r := draft.inReplyTo; r != 0 {
		message.InReplyTo = proto.Uint64(r)
	}

	if to.ratchet == nil {
		var nextDHPub [32]byte
		curve25519.ScalarBaseMult(&nextDHPub, &to.currentDHPrivate)
		message.MyNextDh = nextDHPub[:]
	}

	err := c.send(to, message)
	return id, created, err
}
예제 #11
0
// Creates a new log entry associated with a log.
func newLogEntry(log *Log, event *ev, index uint64, term uint64, command Command) (*LogEntry, error) {
	var buf bytes.Buffer
	var commandName string
	if command != nil {
		commandName = command.CommandName()
		if encoder, ok := command.(CommandEncoder); ok {
			if err := encoder.Encode(&buf); err != nil {
				return nil, err
			}
		} else {
			if err := json.NewEncoder(&buf).Encode(command); err != nil {
				return nil, err
			}
		}
	}

	pb := &protobuf.LogEntry{
		Index:       proto.Uint64(index),
		Term:        proto.Uint64(term),
		CommandName: proto.String(commandName),
		Command:     buf.Bytes(),
	}

	e := &LogEntry{
		pb:    pb,
		log:   log,
		event: event,
	}

	return e, nil
}
예제 #12
0
파일: rpc.go 프로젝트: duzhanyuan/tidb
func (h *rpcHandler) checkContext(ctx *kvrpcpb.Context) *errorpb.Error {
	region, leaderID := h.cluster.GetRegion(ctx.GetRegionId())
	// No region found.
	if region == nil {
		return &errorpb.Error{
			Message: proto.String("region not found"),
			RegionNotFound: &errorpb.RegionNotFound{
				RegionId: proto.Uint64(ctx.GetRegionId()),
			},
		}
	}
	var storePeer, leaderPeer *metapb.Peer
	for _, p := range region.Peers {
		if p.GetStoreId() == h.storeID {
			storePeer = p
		}
		if p.GetId() == leaderID {
			leaderPeer = p
		}
	}
	// The Store does not contain a Peer of the Region.
	if storePeer == nil {
		return &errorpb.Error{
			Message: proto.String("region not found"),
			RegionNotFound: &errorpb.RegionNotFound{
				RegionId: proto.Uint64(ctx.GetRegionId()),
			},
		}
	}
	// No leader.
	if leaderPeer == nil {
		return &errorpb.Error{
			Message: proto.String("no leader"),
			NotLeader: &errorpb.NotLeader{
				RegionId: proto.Uint64(ctx.GetRegionId()),
			},
		}
	}
	// The Peer on the Store is not leader.
	if storePeer.GetId() != leaderPeer.GetId() {
		return &errorpb.Error{
			Message: proto.String("not leader"),
			NotLeader: &errorpb.NotLeader{
				RegionId: proto.Uint64(ctx.GetRegionId()),
				Leader:   leaderPeer,
			},
		}
	}
	// Region epoch does not match.
	if !proto.Equal(region.GetRegionEpoch(), ctx.GetRegionEpoch()) {
		return &errorpb.Error{
			Message:    proto.String("stale epoch"),
			StaleEpoch: &errorpb.StaleEpoch{},
		}
	}
	h.startKey, h.endKey = region.StartKey, region.EndKey
	return nil
}
예제 #13
0
// This function tests that basic workflow works.
func TestSuccessPath(t *testing.T) {
	pbs := []*pb.LanzRecord{
		{
			ConfigRecord: &pb.ConfigRecord{
				Timestamp:    proto.Uint64(146591697107544),
				LanzVersion:  proto.Uint32(1),
				NumOfPorts:   proto.Uint32(146),
				SegmentSize:  proto.Uint32(512),
				MaxQueueSize: proto.Uint32(524288000),
				PortConfigRecord: []*pb.ConfigRecord_PortConfigRecord{
					{
						IntfName:      proto.String("Cpu"),
						SwitchId:      proto.Uint32(2048),
						PortId:        proto.Uint32(4096),
						InternalPort:  proto.Bool(false),
						HighThreshold: proto.Uint32(50000),
						LowThreshold:  proto.Uint32(25000),
					},
				},
				GlobalUsageReportingEnabled: proto.Bool(true),
			},
		},
		{
			CongestionRecord: &pb.CongestionRecord{
				Timestamp: proto.Uint64(146591697107546),
				IntfName:  proto.String("Cpu"),
				SwitchId:  proto.Uint32(2048),
				PortId:    proto.Uint32(4096),
				QueueSize: proto.Uint32(30000),
			},
		},
		{
			ErrorRecord: &pb.ErrorRecord{
				Timestamp:    proto.Uint64(146591697107549),
				ErrorMessage: proto.String("Error"),
			},
		},
	}

	conn := &testConnector{reader: bytes.NewReader(pbsToStream(pbs))}
	ch := make(chan *pb.LanzRecord)
	c, done := launchClient(ch, conn)
	for i, p := range pbs {
		r, ok := <-ch
		if !ok {
			t.Fatalf("Unexpected closed channel")
		}
		if !test.DeepEqual(p, r) {
			t.Fatalf("Test case %d: expected %v, but got %v", i, p, r)
		}
	}
	c.Stop()
	<-done
	if conn.open {
		t.Fatalf("Connection still open after stopping")
	}
}
예제 #14
0
파일: exec.go 프로젝트: contester/runlib
func fillResult(result *subprocess.SubprocessResult, response *contester_proto.LocalExecutionResult) {
	if result.TotalProcesses > 0 {
		response.TotalProcesses = proto.Uint64(result.TotalProcesses)
	}
	response.ReturnCode = proto.Uint32(result.ExitCode)
	response.Flags = parseSuccessCode(result.SuccessCode)
	response.Time = parseTime(result)
	response.Memory = proto.Uint64(result.PeakMemory)
	response.StdOut, _ = contester_proto.NewBlob(result.Output)
	response.StdErr, _ = contester_proto.NewBlob(result.Error)
}
예제 #15
0
파일: message.go 프로젝트: jrossi/gollum
// Serialize generates a string containing all data that can be preserved over
// shutdown (i.e. no data directly referencing runtime components).
func (msg Message) Serialize() ([]byte, error) {
	serializable := &SerializedMessage{
		StreamID:     proto.Uint64(uint64(msg.StreamID)),
		PrevStreamID: proto.Uint64(uint64(msg.PrevStreamID)),
		Timestamp:    proto.Int64(msg.Timestamp.UnixNano()),
		Sequence:     proto.Uint64(msg.Sequence),
		Data:         msg.Data,
	}

	return proto.Marshal(serializable)
}
예제 #16
0
파일: protobuf.go 프로젝트: jchris/indexing
// protobufEncode encode payload message into protobuf array of bytes. Return
// `data` can be transported to the other end and decoded back to Payload
// message.
func protobufEncode(payload interface{}) (data []byte, err error) {
	pl := protobuf.Payload{
		Version: proto.Uint32(uint32(ProtobufVersion())),
	}

	switch val := payload.(type) {
	case []*c.VbKeyVersions:
		pl.Vbkeys = make([]*protobuf.VbKeyVersions, 0, len(val))
		for _, vb := range val { // for each VbKeyVersions
			pvb := &protobuf.VbKeyVersions{
				Bucketname: proto.String(vb.Bucket),
				Vbucket:    proto.Uint32(uint32(vb.Vbucket)),
				Vbuuid:     proto.Uint64(vb.Vbuuid),
			}
			pvb.Kvs = make([]*protobuf.KeyVersions, 0, len(vb.Kvs))
			for _, kv := range vb.Kvs { // for each mutation
				pkv := &protobuf.KeyVersions{
					Seqno: proto.Uint64(kv.Seqno),
				}
				if kv.Docid != nil && len(kv.Docid) > 0 {
					pkv.Docid = kv.Docid
				}
				if len(kv.Uuids) == 0 {
					continue
				}
				l := len(kv.Uuids)
				pkv.Uuids = make([]uint64, 0, l)
				pkv.Commands = make([]uint32, 0, l)
				pkv.Keys = make([][]byte, 0, l)
				pkv.Oldkeys = make([][]byte, 0, l)
				for i, uuid := range kv.Uuids { // for each key-version
					pkv.Uuids = append(pkv.Uuids, uuid)
					pkv.Commands = append(pkv.Commands, uint32(kv.Commands[i]))
					pkv.Keys = append(pkv.Keys, kv.Keys[i])
					pkv.Oldkeys = append(pkv.Oldkeys, kv.Oldkeys[i])
				}
				pvb.Kvs = append(pvb.Kvs, pkv)
			}
			pl.Vbkeys = append(pl.Vbkeys, pvb)
		}

	case *c.VbConnectionMap:
		pl.Vbmap = &protobuf.VbConnectionMap{
			Bucket:   proto.String(val.Bucket),
			Vbuuids:  val.Vbuuids,
			Vbuckets: c.Vbno16to32(val.Vbuckets),
		}
	}

	if err == nil {
		data, err = proto.Marshal(&pl)
	}
	return
}
예제 #17
0
// Creates a new AppendEntries response.
func newAppendEntriesResponse(term uint64, success bool, index uint64, commitIndex uint64) *AppendEntriesResponse {
	pb := &protobuf.AppendEntriesResponse{
		Term:        proto.Uint64(term),
		Index:       proto.Uint64(index),
		Success:     proto.Bool(success),
		CommitIndex: proto.Uint64(commitIndex),
	}

	return &AppendEntriesResponse{
		pb: pb,
	}
}
예제 #18
0
func (w *protoResponseWriter) Stats(rows, unique uint64, min, max []byte) error {
	res := &protobuf.StatisticsResponse{
		Stats: &protobuf.IndexStatistics{
			KeysCount:       proto.Uint64(rows),
			UniqueKeysCount: proto.Uint64(unique),
			KeyMin:          min,
			KeyMax:          max,
		},
	}

	return protobuf.EncodeAndWrite(w.conn, *w.encBuf, res)
}
예제 #19
0
//Token登陆
func (service *superRpc) AuthByToken(req *msg.OAtuhTokenLogin, res *msg.OAuth2Response) error {
	data := service.super.FindBySinaID(req.GetToken())
	if data == nil {
		res.RetCode = proto.Uint32(uint32(msg.OAtuhRetCode_TOKEN_NOT_FOUND))
	} else {
		res.RetCode = proto.Uint32(uint32(msg.OAtuhRetCode_AUTH_OK))
		res.Accid = proto.Uint64(data.GetAccid())
		res.AuthSid = proto.Uint64(0)
		res.User = data
	}
	return nil
}
예제 #20
0
파일: scan.go 프로젝트: keryoo/go-hbase
func (s *Scan) getData(nextStart []byte) []*ResultRow {
	if s.closed {
		return nil
	}

	server, location := s.getServerAndLocation(s.table, nextStart)

	req := &proto.ScanRequest{
		Region: &proto.RegionSpecifier{
			Type:  proto.RegionSpecifier_REGION_NAME.Enum(),
			Value: []byte(location.name),
		},
		NumberOfRows: pb.Uint32(uint32(s.numCached)),
		Scan:         &proto.Scan{},
	}

	if s.id > 0 {
		req.ScannerId = pb.Uint64(s.id)
	} else {
		if s.StartRow != nil {
			req.Scan.StartRow = s.StartRow
		}
		if s.StopRow != nil {
			req.Scan.StopRow = s.StopRow
		}
		if s.timeRange != nil {
			req.Scan.TimeRange = &proto.TimeRange{
				From: pb.Uint64(uint64(s.timeRange.From.UnixNano() / 1e6)),
				To:   pb.Uint64(uint64(s.timeRange.To.UnixNano() / 1e6)),
			}
		}
	}

	for i, v := range s.families {
		req.Scan.Column = append(req.Scan.Column, &proto.Column{
			Family:    v,
			Qualifier: s.qualifiers[i],
		})
	}

	log.Debug("sending scan request: [server=%s] [id=%d]", server.name, s.id)

	cl := newCall(req)
	server.call(cl)

	log.Debug("sent scan request: [server=%s] [id=%d]", server.name, s.id)

	select {
	case msg := <-cl.responseCh:
		return s.processResponse(msg)
	}
}
예제 #21
0
func SelectRoleCallback(conn *GxTcpConn, info *LoginInfo, msg *GxMessage) {
	rdClient := PopRedisClient()
	defer PushRedisClient(rdClient)

	var req SelectRoleReq

	err := msg.UnpackagePbmsg(&req)
	if err != nil {
		SendPbMessage(conn, false, msg.GetId(), msg.GetCmd(), msg.GetSeq(), RetFail, nil)
		return
	}

	if req.RoleId == nil {
		SendPbMessage(conn, false, msg.GetId(), msg.GetCmd(), msg.GetSeq(), RetMsgFormatError, nil)
		return
	}

	if req.Info != nil && req.GetInfo().Token != nil {
		//重新重连
		ret := DisconnLogin(rdClient, req.GetInfo().GetToken(), info)
		if ret != RetSucc {
			SendPbMessage(conn, false, msg.GetId(), msg.GetCmd(), msg.GetSeq(), ret, nil)
			return
		}
	}

	role := new(Role)
	err = role.Get(rdClient, req.GetRoleId())
	if err != nil {
		Debug("role %d is not existst", req.GetRoleId())
		SendPbMessage(conn, false, msg.GetId(), msg.GetCmd(), msg.GetSeq(), RetRoleNotExists, nil)
		return
	}

	info.RoleId = req.GetRoleId()
	info.Save(rdClient)

	SendPbMessage(conn, false, msg.GetId(), msg.GetCmd(), msg.GetSeq(), RetSucc, &SelectRoleRsp{
		Role: &RoleCommonInfo{
			Id:         proto.Uint32(role.Id),
			Name:       proto.String(role.Name),
			Level:      proto.Uint32(role.Level),
			VocationId: proto.Uint32(role.VocationId),
			Expr:       proto.Uint64(role.Expr),
			GodValue:   proto.Uint64(role.GodValue),
			Prestige:   proto.Uint64(role.Prestige),
			Gold:       proto.Uint64(role.Gold),
			Crystal:    proto.Uint64(role.Crystal),
		},
	})

}
예제 #22
0
// Encode writes the response to a writer.
// Returns the number of bytes written and any error that occurs.
func (req *SnapshotRecoveryResponse) Encode(w io.Writer) (int, error) {
	pb := &protobuf.SnapshotRecoveryResponse{
		Term:        proto.Uint64(req.Term),
		Success:     proto.Bool(req.Success),
		CommitIndex: proto.Uint64(req.CommitIndex),
	}
	p, err := proto.Marshal(pb)
	if err != nil {
		return -1, err
	}

	return w.Write(p)
}
예제 #23
0
func newBlockReadOp(block *hdfs.LocatedBlockProto, offset, length uint64) *hdfs.OpReadBlockProto {
	return &hdfs.OpReadBlockProto{
		Header: &hdfs.ClientOperationHeaderProto{
			BaseHeader: &hdfs.BaseHeaderProto{
				Block: block.GetB(),
				Token: block.GetBlockToken(),
			},
			ClientName: proto.String(ClientName),
		},
		Offset: proto.Uint64(offset),
		Len:    proto.Uint64(length),
	}
}
예제 #24
0
// Handle a client's subscribe request.  The response may specify a timeout,
// after which an active client must resubscribe to renew their session.
func OnUtilSubscribe(s *Session, body []byte) *Packet {
	if s.timeout == 0 {
		s.timeout = 120 * time.Second
	}
	if s.route == 0 {
		s.route = 1
	}
	s.subscribed = time.Now()
	res := util.SubscribeResponse{}
	res.Route = proto.Uint64(s.route)
	res.KeepAliveSecs = proto.Uint64(uint64(s.timeout.Seconds()))
	return EncodePacket(util.SubscribeResponse_ID, &res)
}
예제 #25
0
// Encodes the SnapshotRequest to a buffer. Returns the number of bytes
// written and any error that may have occurred.
func (req *SnapshotRequest) Encode(w io.Writer) (int, error) {
	pb := &protobuf.SnapshotRequest{
		LeaderName: proto.String(req.LeaderName),
		LastIndex:  proto.Uint64(req.LastIndex),
		LastTerm:   proto.Uint64(req.LastTerm),
	}
	p, err := proto.Marshal(pb)
	if err != nil {
		return -1, err
	}

	return w.Write(p)
}
예제 #26
0
파일: subscription.go 프로젝트: beheh/stove
// Handle a client's subscribe request.  The response may specify a timeout,
// after which an active client must resubscribe to renew their session.
func OnUtilSubscribe(s *Session, body []byte) ([]byte, error) {
	if s.timeout == 0 {
		s.timeout = 120 * time.Second
	}
	if s.route == 0 {
		s.route = 1
	}
	s.subscribed = time.Now()
	res := hsproto.PegasusUtil_SubscribeResponse{}
	res.Route = proto.Uint64(s.route)
	res.SupportedFeatures = proto.Uint64(3)
	res.KeepAliveSecs = proto.Uint64(uint64(s.timeout.Seconds()))
	return EncodeUtilResponse(315, &res)
}
예제 #27
0
func expire(now time.Time) {
	for k, v := range bindings {
		v.Age = proto.Uint64(uint64(now.Sub(v.added)))
		if !v.expiration.IsZero() {
			ttl := int64(v.expiration.Sub(now))
			if ttl <= 0 {
				delete(bindings, k)
				verbose.Printf("Expired binding: %s\n", k)
			} else {
				v.Ttl = proto.Uint64(uint64(ttl))
			}
		}
	}
}
예제 #28
0
func TestSerialDeserial(t *testing.T) {
	volumeMessage := &VolumeInformationMessage{
		Id:               proto.Uint32(12),
		Size:             proto.Uint64(2341234),
		Collection:       proto.String("benchmark"),
		FileCount:        proto.Uint64(2341234),
		DeleteCount:      proto.Uint64(234),
		DeletedByteCount: proto.Uint64(21234),
		ReadOnly:         proto.Bool(false),
		ReplicaPlacement: proto.Uint32(210),
		Version:          proto.Uint32(2),
	}
	var volumeMessages []*VolumeInformationMessage
	volumeMessages = append(volumeMessages, volumeMessage)

	joinMessage := &JoinMessage{
		IsInit:         proto.Bool(true),
		Ip:             proto.String("127.0.3.12"),
		Port:           proto.Uint32(34546),
		PublicUrl:      proto.String("localhost:2342"),
		MaxVolumeCount: proto.Uint32(210),
		MaxFileKey:     proto.Uint64(324234423),
		DataCenter:     proto.String("dc1"),
		Rack:           proto.String("rack2"),
		Volumes:        volumeMessages,
	}

	data, err := proto.Marshal(joinMessage)
	if err != nil {
		log.Fatal("marshaling error: ", err)
	}
	newMessage := &JoinMessage{}
	err = proto.Unmarshal(data, newMessage)
	if err != nil {
		log.Fatal("unmarshaling error: ", err)
	}
	log.Println("The pb data size is", len(data))

	jsonData, jsonError := json.Marshal(joinMessage)
	if jsonError != nil {
		log.Fatal("json marshaling error: ", jsonError)
	}
	log.Println("The json data size is", len(jsonData), string(jsonData))

	// Now test and newTest contain the same data.
	if *joinMessage.PublicUrl != *newMessage.PublicUrl {
		log.Fatalf("data mismatch %q != %q", *joinMessage.PublicUrl, *newMessage.PublicUrl)
	}
}
예제 #29
0
func (c *txnCommitter) commitSingleRegion(bo *Backoffer, batch batchKeys) error {
	req := &pb.Request{
		Type: pb.MessageType_CmdCommit.Enum(),
		CmdCommitReq: &pb.CmdCommitRequest{
			StartVersion:  proto.Uint64(c.startTS),
			Keys:          batch.keys,
			CommitVersion: proto.Uint64(c.commitTS),
		},
	}

	resp, err := c.store.SendKVReq(bo, req, batch.region)
	if err != nil {
		return errors.Trace(err)
	}
	if regionErr := resp.GetRegionError(); regionErr != nil {
		err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
		if err != nil {
			return errors.Trace(err)
		}
		// re-split keys and commit again.
		err = c.commitKeys(bo, batch.keys)
		return errors.Trace(err)
	}
	commitResp := resp.GetCmdCommitResp()
	if commitResp == nil {
		return errors.Trace(errBodyMissing)
	}
	if keyErr := commitResp.GetError(); keyErr != nil {
		c.mu.RLock()
		defer c.mu.RUnlock()
		err = errors.Errorf("commit failed: %v", keyErr.String())
		if c.mu.committed {
			// No secondary key could be rolled back after it's primary key is committed.
			// There must be a serious bug somewhere.
			log.Errorf("txn failed commit key after primary key committed: %v, tid: %d", err, c.startTS)
			return errors.Trace(err)
		}
		// The transaction maybe rolled back by concurrent transactions.
		log.Warnf("txn failed commit primary key: %v, retry later, tid: %d", err, c.startTS)
		return errors.Annotate(err, txnRetryableMark)
	}

	c.mu.Lock()
	defer c.mu.Unlock()
	// Group that contains primary key is always the first.
	// We mark transaction's status committed when we receive the first success response.
	c.mu.committed = true
	return nil
}
예제 #30
0
파일: auth.go 프로젝트: beheh/stove
func (s *AuthServerService) FinishQueue() {
	update := hsproto.BnetProtocolAuthentication_LogonQueueUpdateRequest{}
	update.Position = proto.Uint32(0)
	update.EstimatedTime = proto.Uint64(0)
	update.EtaDeviationInSec = proto.Uint64(0)
	updateBody, err := proto.Marshal(&update)
	if err != nil {
		log.Panicf("FinishQueue: %v", err)
	}
	updateHeader := s.sess.MakeRequestHeader(s.client, 12, len(updateBody))
	s.sess.QueuePacket(updateHeader, updateBody)

	endHeader := s.sess.MakeRequestHeader(s.client, 13, 0)
	s.sess.QueuePacket(endHeader, nil)
}