func makeRequest(fix fixture, wants, has float64) (*pb.GetCapacityResponse, error) { req := &pb.GetCapacityRequest{ ClientId: proto.String("client"), Resource: []*pb.ResourceRequest{ { ResourceId: proto.String("resource"), Priority: proto.Int64(1), Has: &pb.Lease{ ExpiryTime: proto.Int64(0), RefreshInterval: proto.Int64(0), Capacity: proto.Float64(0), }, Wants: proto.Float64(wants), }, }, } if has > 0 { req.Resource[0].Has = &pb.Lease{ ExpiryTime: proto.Int64(time.Now().Add(1 * time.Minute).Unix()), RefreshInterval: proto.Int64(5), Capacity: proto.Float64(has), } } return fix.client.GetCapacity(context.Background(), req) }
// TODO: Use gauge-go result object rather than ProtoExecutionResult func executeFunc(fn reflect.Value, args ...interface{}) (res *m.ProtoExecutionResult) { rargs := make([]reflect.Value, len(args)) for i, a := range args { rargs[i] = reflect.ValueOf(a) } res = &m.ProtoExecutionResult{} T = &testingT{} start := time.Now() defer func() { if r := recover(); r != nil { res.ScreenShot = getScreenshot() res.Failed = proto.Bool(true) res.ExecutionTime = proto.Int64(time.Since(start).Nanoseconds()) res.StackTrace = proto.String(strings.SplitN(string(debug.Stack()), "\n", 9)[8]) res.ErrorMessage = proto.String(fmt.Sprintf("%s", r)) } T = &testingT{} }() fn.Call(rargs) res.Failed = proto.Bool(false) if len(T.errors) != 0 { res.ScreenShot = getScreenshot() res.Failed = proto.Bool(true) res.StackTrace = proto.String(T.getStacktraces()) res.ErrorMessage = proto.String(T.getErrors()) } res.ExecutionTime = proto.Int64(time.Since(start).Nanoseconds()) return res }
func (u *User) Leave() { notifyMsg := new(gs_protocol.NotifyQuitMsg) if DEBUG { gsutil.Log("Leave user id : ", gsutil.Itoa64(u.userID)) } notifyMsg.UserID = proto.Int64(u.userID) if u.room != nil { if DEBUG { gsutil.Log("Leave room id : ", gsutil.Itoa64(u.room.roomID)) } notifyMsg.RoomID = proto.Int64(u.room.roomID) msg, err := proto.Marshal(notifyMsg) gsutil.CheckError(err) // race condition by broadcast goroutine and ClientSender goroutine u.room.Leave(u.userID) // notify all members in the room u.SendToAll(NewMessage(u.userID, gs_protocol.Type_NotifyQuit, msg)) if DEBUG { gsutil.Log("NotifyQuit message send") } } if DEBUG { gsutil.Log("Leave func end") } }
// toAuthDBRevision converts AuthReplicationState to AuthDBRevision proto. func toAuthDBRevision(rs *model.AuthReplicationState) *AuthDBRevision { return &AuthDBRevision{ PrimaryId: proto.String(rs.PrimaryID), AuthDbRev: proto.Int64(rs.AuthDBRev), ModifiedTs: proto.Int64(timeToTimestamp(rs.ModifiedTimestamp)), } }
func TestProto3SetDefaults(t *testing.T) { in := &pb.Message{ Terrain: map[string]*pb.Nested{ "meadow": new(pb.Nested), }, Proto2Field: new(tpb.SubDefaults), Proto2Value: map[string]*tpb.SubDefaults{ "badlands": new(tpb.SubDefaults), }, } got := proto.Clone(in).(*pb.Message) proto.SetDefaults(got) // There are no defaults in proto3. Everything should be the zero value, but // we need to remember to set defaults for nested proto2 messages. want := &pb.Message{ Terrain: map[string]*pb.Nested{ "meadow": new(pb.Nested), }, Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, Proto2Value: map[string]*tpb.SubDefaults{ "badlands": {N: proto.Int64(7)}, }, } if !proto.Equal(got, want) { t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) } }
// ConsensusUpdate implements paxos.Watcher interface. func (this *Election) ConsensusUpdate(uid string, index int64, value []byte) { round := this.ElectionRoundFromPaxosUID(uid) if round <= this.CurrentRound() { return } winner := string(value) lock, errLock := this.ctlr.LockAll() if errLock != nil { return } defer lock.Unlock() change := thispb.ElectionChange{} change.ElectionRound = proto.Int64(round) change.ElectionWinner = proto.String(winner) if err := this.doUpdateElection(&change); err != nil { this.Fatalf("could not update new election round status: %v", err) } if this.InCommittee() { change := thispb.CommitteeChange{} change.NewElectionRound = proto.Int64(round) change.NewElectionWinner = proto.String(winner) if err := this.doUpdateCommittee(&change); err != nil { this.Fatalf("could not update committee with new election status: %v", err) } } }
// NewPost creates a message header for a post message. func (this *Messenger) NewPost() *msgpb.Header { header := &msgpb.Header{} header.MessageId = proto.Int64(atomic.AddInt64(&this.lastMessageID, 1)) header.MessengerId = proto.String(this.uid) header.CreateTimestampNsecs = proto.Int64(time.Now().UnixNano()) return header }
func (executor *specExecutor) setExecutionResultForConcept(protoConcept *gauge_messages.ProtoConcept) { var conceptExecutionTime int64 for _, step := range protoConcept.GetSteps() { if step.GetItemType() == gauge_messages.ProtoItem_Concept { stepExecResult := step.GetConcept().GetConceptExecutionResult().GetExecutionResult() conceptExecutionTime += stepExecResult.GetExecutionTime() if step.GetConcept().GetConceptExecutionResult().GetExecutionResult().GetFailed() { conceptExecutionResult := &gauge_messages.ProtoStepExecutionResult{ExecutionResult: step.GetConcept().GetConceptExecutionResult().GetExecutionResult(), Skipped: proto.Bool(false)} conceptExecutionResult.ExecutionResult.ExecutionTime = proto.Int64(conceptExecutionTime) protoConcept.ConceptExecutionResult = conceptExecutionResult protoConcept.ConceptStep.StepExecutionResult = conceptExecutionResult return } } else if step.GetItemType() == gauge_messages.ProtoItem_Step { stepExecResult := step.GetStep().GetStepExecutionResult().GetExecutionResult() conceptExecutionTime += stepExecResult.GetExecutionTime() if stepExecResult.GetFailed() { conceptExecutionResult := &gauge_messages.ProtoStepExecutionResult{ExecutionResult: stepExecResult, Skipped: proto.Bool(false)} conceptExecutionResult.ExecutionResult.ExecutionTime = proto.Int64(conceptExecutionTime) protoConcept.ConceptExecutionResult = conceptExecutionResult protoConcept.ConceptStep.StepExecutionResult = conceptExecutionResult return } } } protoConcept.ConceptExecutionResult = &gauge_messages.ProtoStepExecutionResult{ExecutionResult: &gauge_messages.ProtoExecutionResult{Failed: proto.Bool(false), ExecutionTime: proto.Int64(conceptExecutionTime)}} protoConcept.ConceptStep.StepExecutionResult = protoConcept.ConceptExecutionResult protoConcept.ConceptStep.StepExecutionResult.Skipped = proto.Bool(false) }
func TestLeaseLengthAndRefreshInterval(t *testing.T) { const ( leaseLength = 342 refreshInterval = 5 ) store, algo := NewLeaseStore("test"), ProportionalShare(&pb.Algorithm{ LeaseLength: proto.Int64(leaseLength), RefreshInterval: proto.Int64(refreshInterval), }) now := time.Now() lease := algo(store, 100, Request{ Client: "b", Wants: 10, Subclients: 1, }) leaseLengthSec := lease.Expiry.Unix() - now.Unix() if math.Abs(float64(leaseLengthSec-leaseLength)) > 1 { t.Errorf("lease.Expiry = %v (%d seconds), want %d seconds", lease.Expiry, leaseLengthSec, leaseLength) } if lease.RefreshInterval.Seconds() != refreshInterval { t.Errorf("lease.RefreshInterval = %v, want %d seconds", lease.RefreshInterval, refreshInterval) } }
func (c *httpContext) logf(level int64, levelName, format string, args ...interface{}) { s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. log.Println(levelName + ": " + s) // Truncate long log lines. const maxLogLine = 8192 if len(s) > maxLogLine { suffix := fmt.Sprintf("...(length %d)", len(s)) s = s[:maxLogLine-len(suffix)] + suffix } buf, err := proto.Marshal(&lpb.UserAppLogGroup{ LogLine: []*lpb.UserAppLogLine{ { TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), Level: proto.Int64(level), Message: proto.String(s), }}}) if err != nil { log.Printf("appengine_internal.flushLog: failed marshaling AppLogGroup: %v", err) return } req := &lpb.FlushRequest{ Logs: buf, } res := &basepb.VoidProto{} if err := c.Call("logservice", "Flush", req, res, nil); err != nil { log.Printf("appengine_internal.flushLog: failed Flush RPC: %v", err) } }
func Action1Handler(user *User, data []byte) { // request body unmarshaling req := new(gs_protocol.ReqAction1) err := proto.Unmarshal(data, req) gs.CheckError(err) // TODO create business logic for Action1 Type if DEBUG { gs.Log("Action1 userID : ", gs.Itoa64(req.GetUserID())) } // broadcast message notifyMsg := new(gs_protocol.NotifyAction1Msg) notifyMsg.UserID = proto.Int64(user.userID) msg, err := proto.Marshal(notifyMsg) gs.CheckError(err) user.SendToAll(NewMessage(user.userID, gs_protocol.Type_NotifyAction1, msg)) // response body marshaling res := new(gs_protocol.ResAction1) res.UserID = proto.Int64(user.userID) res.Result = proto.Int32(1) // is success? msg, err = proto.Marshal(res) gs.CheckError(err) user.Push(NewMessage(user.userID, gs_protocol.Type_DefinedAction1, msg)) }
func (lm *logManager) commitTransaction(tid TransactionID) error { cm, ok := lm.currMutexes[tid] if !ok { return fmt.Errorf("transaction with ID %d is not currently running", tid) } // Write out COMMIT and END log entries lm.addLogEntry(&pb.LogEntry{ Tid: proto.Int64(int64(tid)), EntryType: pb.LogEntry_COMMIT.Enum(), }) lm.addLogEntry(&pb.LogEntry{ Tid: proto.Int64(int64(tid)), EntryType: pb.LogEntry_END.Enum(), }) // Flush out log if err := lm.flushLog(); err != nil { return fmt.Errorf("error while flushing log: %v", err) } // Release all locks and remove from current transactions for _, rw := range cm { rw.unlock() } delete(lm.currMutexes, tid) return nil }
func TestRunBroadcastFive(t *testing.T) { c := make(chan Packet, 100) var r run r.seqn = 1 r.out = c r.addr = []*net.UDPAddr{ MustResolveUDPAddr("udp", "1.2.3.4:5"), MustResolveUDPAddr("udp", "2.3.4.5:6"), MustResolveUDPAddr("udp", "3.4.5.6:7"), MustResolveUDPAddr("udp", "4.5.6.7:8"), MustResolveUDPAddr("udp", "5.6.7.8:9"), } r.broadcast(newInvite(1)) c <- Packet{} exp := Msg{ Seqn: proto.Int64(1), Cmd: invite, Crnd: proto.Int64(1), } addr := make([]*net.UDPAddr, len(r.addr)) for i := 0; i < len(r.addr); i++ { p := <-c addr[i] = p.Addr assert.Equal(t, exp, p.Msg) } assert.Equal(t, Packet{}, <-c) assert.Equal(t, r.addr, addr) }
// Insert can insert record into a btree func (t *Btree) insert(record TreeLog) error { tnode, err := t.getTreeNode(t.GetRoot()) if err != nil { if err.Error() != "no data" { return err } nnode := t.newTreeNode() nnode.NodeType = proto.Int32(isLeaf) _, err = nnode.insertRecord(record, t) if err == nil { t.Nodes[nnode.GetId()], err = proto.Marshal(nnode) } t.Root = proto.Int64(nnode.GetId()) return err } clonednode, err := tnode.insertRecord(record, t) if err == nil && len(clonednode.GetKeys()) > int(t.GetNodeMax()) { nnode := t.newTreeNode() nnode.NodeType = proto.Int32(isNode) key, left, right := clonednode.split(t) nnode.insertOnce(key, left, right, t) t.Nodes[nnode.GetId()], err = proto.Marshal(nnode) t.Root = proto.Int64(nnode.GetId()) } else { t.Root = proto.Int64(clonednode.GetId()) } return err }
func CreateHandler(user *User, data []byte) { // request body unmarshaling req := new(gs_protocol.ReqCreate) err := proto.Unmarshal(data, req) gs.CheckError(err) if user.userID != req.GetUserID() { if DEBUG { gs.Log("Fail room create, user id missmatch") } return } // room create roomID := GetRandomRoomID() r := NewRoom(roomID) r.users.Set(user.userID, user) // insert user user.room = r // set room rooms.Set(roomID, r) // set room into global shared map if DEBUG { gs.Log("Get rand room id : ", gs.Itoa64(roomID)) } // response body marshaling res := new(gs_protocol.ResCreate) res.RoomID = proto.Int64(roomID) res.UserID = proto.Int64(user.userID) if DEBUG { gs.Log("Room create, room id : ", gs.Itoa64(roomID)) } msg, err := proto.Marshal(res) gs.CheckError(err) user.Push(NewMessage(user.userID, gs_protocol.Type_Create, msg)) }
func TestRunBroadcastThree(t *testing.T) { c := make(chan Packet, 100) var r run r.seqn = 1 r.out = c r.addr = []*net.UDPAddr{ MustResolveUDPAddr("udp", "1.2.3.4:5"), MustResolveUDPAddr("udp", "2.3.4.5:6"), MustResolveUDPAddr("udp", "3.4.5.6:7"), } r.broadcast(newInvite(1)) c <- Packet{} exp := msg{ Seqn: proto.Int64(1), Cmd: invite, Crnd: proto.Int64(1), } addr := make([]*net.UDPAddr, len(r.addr)) for i := 0; i < len(r.addr); i++ { p := <-c addr[i] = p.Addr var got msg err := proto.Unmarshal(p.Data, &got) assert.Equal(t, nil, err) assert.Equal(t, exp, got) } assert.Equal(t, Packet{}, <-c) assert.Equal(t, r.addr, addr) }
func (s *Server) requestVotes() { theOtherNodes := s.theOtherNodes() // if re-relect, reset the voteChan s.getVoteResponseChan = make(chan wrappedVoteResponse, len(theOtherNodes)) for id, node := range theOtherNodes { go func(id int32, node Node) { // send vote request simultaneously for { // not responded, keep trying logger.Printf("node %d send to node %d\n", s.id, id) pb := &RequestVoteRequest{ CandidateID: proto.Int32(s.id), Term: proto.Int64(s.currentTerm), LastLogIndex: proto.Int64(s.log.lastLogIndex()), LastLogTerm: proto.Int64(s.log.lastLogTerm()), } responseProto, err := node.rpcRequestVote(s, pb) if err == nil { if responseProto.GetVoteGranted() { logger.Printf("node %d got node %d granted\n", s.id, id) } else { logger.Printf("node %d got node %d reject\n", s.id, id) } s.getVoteResponseChan <- wrappedVoteResponse{ id: id, RequestVoteResponse: responseProto, } return } logger.Println("vote response error:", err.Error()) } }(id, node) } }
// Phase1RPC handles ClassicPaxos.Phase1 rpc. func (this *Paxos) Phase1RPC(header *msgpb.Header, request *thispb.Phase1Request) (status error) { if !this.IsAcceptor() { this.Errorf("this paxos instance is not an acceptor; rejecting %s", header) return errs.ErrInvalid } lock, errLock := this.ctlr.TimedLock(msg.RequestTimeout(header), "acceptor") if errLock != nil { return errLock } defer lock.Unlock() clientID := header.GetMessengerId() respond := func() error { response := thispb.Phase1Response{} response.PromisedBallot = proto.Int64(this.promisedBallot) if this.votedBallot >= 0 { response.VotedBallot = proto.Int64(this.votedBallot) response.VotedValue = this.votedValue } message := thispb.PaxosMessage{} message.Phase1Response = &response errSend := msg.SendResponseProto(this.msn, header, &message) if errSend != nil { this.Errorf("could not send phase1 response to %s: %v", clientID, errSend) return errSend } return nil } ballot := request.GetBallotNumber() if ballot < this.promisedBallot { this.Warningf("phase1 request from %s is ignored due to stale ballot %d", clientID, ballot) return respond() } if ballot == this.promisedBallot { this.Warningf("duplicate phase1 request from client %s with an already "+ "promised ballot number %d", clientID, ballot) return respond() } // Save the promise into the wal. change := thispb.AcceptorChange{} change.PromisedBallot = proto.Int64(ballot) if err := this.doUpdateAcceptor(&change); err != nil { this.Errorf("could not update acceptor state: %v", err) return err } this.Infof("this acceptor has now promised higher ballot %d from %s", ballot, clientID) return respond() }
func addExecutionTimes(stepExecResult *gauge_messages.ProtoStepExecutionResult, execResults ...*gauge_messages.ProtoExecutionResult) { for _, execResult := range execResults { currentScenarioExecTime := stepExecResult.ExecutionResult.ExecutionTime if currentScenarioExecTime == nil { stepExecResult.ExecutionResult.ExecutionTime = proto.Int64(execResult.GetExecutionTime()) } else { stepExecResult.ExecutionResult.ExecutionTime = proto.Int64(*currentScenarioExecTime + execResult.GetExecutionTime()) } } }
func BuildMessage(from int, to string, data []byte, tp Msg_Type, tick int64) *Msg { m := &Msg{ From: proto.Int64(int64(from)), To: proto.String(to), Tp: &tp, D: data, Ct: proto.Int64(tick), } return m }
func (lm *logManager) abortTransaction(tid TransactionID) (err error) { cm, ok := lm.currMutexes[tid] if !ok { err = fmt.Errorf("transaction with ID %d is not currently running", tid) return } // Write out ABORT entry lm.addLogEntry(&pb.LogEntry{ Tid: proto.Int64(int64(tid)), EntryType: pb.LogEntry_ABORT.Enum(), }) // Undo updates (and write log entries) entries := &lm.log.Entry iterateEntries := (*entries)[:] iterate: for i := len(iterateEntries) - 1; i >= 0; i-- { e := iterateEntries[i] if *e.Tid == int64(tid) { switch *e.EntryType { case pb.LogEntry_UPDATE: // Undo UPDATE records oldValue, newValue, err := lm.updateStoreMapValue(cm, Key(*e.Key), Value(e.OldValue)) if err != nil { return err } lm.addLogEntry(&pb.LogEntry{ Tid: proto.Int64(int64(tid)), EntryType: pb.LogEntry_UNDO.Enum(), Key: e.Key, OldValue: oldValue, // e.NewValue NewValue: newValue, // e.OldValue UndoLsn: e.Lsn, }) case pb.LogEntry_BEGIN: // Stop when BEGIN record is reached break iterate } } } lm.addLogEntry(&pb.LogEntry{ Tid: proto.Int64(int64(tid)), EntryType: pb.LogEntry_END.Enum(), }) // Flush out log lm.flushLog() // Release all locks and remove from current transactions for _, rw := range cm { rw.unlock() } delete(lm.currMutexes, tid) return }
func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) { req := &pb.LogReadRequest{} req.AppId = &appID if !params.StartTime.IsZero() { req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3) } if !params.EndTime.IsZero() { req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3) } if len(params.Offset) > 0 { var offset pb.LogOffset if err := proto.Unmarshal(params.Offset, &offset); err != nil { return nil, fmt.Errorf("bad Offset: %v", err) } req.Offset = &offset } if params.Incomplete { req.IncludeIncomplete = ¶ms.Incomplete } if params.AppLogs { req.IncludeAppLogs = ¶ms.AppLogs } if params.ApplyMinLevel { req.MinimumLogLevel = proto.Int32(int32(params.MinLevel)) } if params.Versions == nil { // If no versions were specified, default to the default module at // the major version being used by this module. if i := strings.Index(versionID, "."); i >= 0 { versionID = versionID[:i] } req.VersionId = []string{versionID} } else { req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions)) for _, v := range params.Versions { var m *string if i := strings.Index(v, ":"); i >= 0 { m, v = proto.String(v[:i]), v[i+1:] } req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{ ModuleId: m, VersionId: proto.String(v), }) } } if params.RequestIDs != nil { ids := make([][]byte, len(params.RequestIDs)) for i, v := range params.RequestIDs { ids[i] = []byte(v) } req.RequestId = ids } return req, nil }
func (this *Paxos) doSaveAcceptor(state *thispb.AcceptorState) { state.PromisedBallot = proto.Int64(this.promisedBallot) state.VotedBallot = proto.Int64(this.votedBallot) state.VotedValue = append([]byte{}, this.votedValue...) for ballot, learnerSet := range this.learnerAckMap { for learner := range learnerSet { state.AckedBallotList = append(state.AckedBallotList, ballot) state.AckedLearnerList = append(state.AckedLearnerList, learner) } } }
func (s *Server) sendAppendRequestTo(id int32, node Node) (*AppendEntriesResponse, error) { appendRequest := AppendEntriesRequest{ LeaderID: proto.Int32(s.id), Term: proto.Int64(s.currentTerm), PrevLogIndex: proto.Int64(s.log.prevLogIndex(s.nextIndex.get(id))), PrevLogTerm: proto.Int64(s.log.prevLogTerm(s.nextIndex.get(id))), CommitIndex: proto.Int64(s.log.commitIndex), Entries: s.log.entriesAfer(s.nextIndex.get(id)), } return node.rpcAppendEntries(s, &appendRequest) }
// Method Get Key Range func (self *NetworkClient) GetKeyRange(startKey []byte, endKey []byte) (<-chan [][]byte, <-chan error, error) { var startKeyInclusive bool = true var endKeyInclusive bool = true var maxReturned int32 = 200 var reverse bool = false cmd := &kproto.Command{ Header: &kproto.Command_Header{ ConnectionID: proto.Int64(self.connectionId), Sequence: proto.Int64(self.sequence), MessageType: kproto.Command_GETKEYRANGE.Enum(), }, Body: &kproto.Command_Body{ Range: &kproto.Command_Range{ StartKey: startKey, StartKeyInclusive: &startKeyInclusive, EndKey: endKey, EndKeyInclusive: &endKeyInclusive, MaxReturned: &maxReturned, Reverse: &reverse, }, }, } cmd_bytes, err := proto.Marshal(cmd) if err != nil { return nil, nil, err } msg := &kproto.Message{ AuthType: kproto.Message_HMACAUTH.Enum(), HmacAuth: &kproto.Message_HMACauth{ Identity: proto.Int64(self.userId), Hmac: calculate_hmac(self.secret, cmd_bytes), }, CommandBytes: cmd_bytes, } err = network.Send(self.conn, msg, nil) if err != nil { return nil, nil, err } // TODO: return single chan status := make(chan error, 1) array := make(chan [][]byte, 1) pending := PendingOperation{sequence: self.sequence, receiver: status, array: array} self.notifier <- pending self.sequence += 1 return array, status, nil }
func ToProto(channelStatuses []*util.ChannelStatus) (ret []*cmd.ChannelStatus) { for _, stat := range channelStatuses { ret = append(ret, &cmd.ChannelStatus{ Length: proto.Int64(stat.Length), StartTime: proto.Int64(stat.StartTime.Unix()), StopTime: proto.Int64(stat.StopTime.Unix()), Name: proto.String(stat.Name), }) } return }
func makeResourceTemplate(name string, algo pb.Algorithm_Kind) *pb.ResourceTemplate { return &pb.ResourceTemplate{ IdentifierGlob: proto.String(name), Capacity: proto.Float64(100), Algorithm: &pb.Algorithm{ Kind: algo.Enum(), RefreshInterval: proto.Int64(5), LeaseLength: proto.Int64(20), LearningModeDuration: proto.Int64(0), }, } }
func (log *Log) newConfigEntry(term int64, nodes []Node, state string) (*LogEntry, error) { var bytesBuffer bytes.Buffer if err := gob.NewEncoder(&bytesBuffer).Encode(nodes); err != nil { return nil, err } pbEntry := &LogEntry{ Index: proto.Int64(log.lastLogIndex() + 1), Term: proto.Int64(term), CommandName: proto.String(state), Command: bytesBuffer.Bytes(), } return pbEntry, nil }
func (log *Log) newLogEntry(term int64, command Command) (*LogEntry, error) { var bytesBuffer bytes.Buffer if err := json.NewEncoder(&bytesBuffer).Encode(command); err != nil { return nil, err } pbEntry := &LogEntry{ Index: proto.Int64(log.lastLogIndex() + 1), Term: proto.Int64(term), CommandName: proto.String(command.Name()), Command: bytesBuffer.Bytes(), } return pbEntry, nil }
// IndexToProto converts a model.IndexInfo to a tipb.IndexInfo. func IndexToProto(t *model.TableInfo, idx *model.IndexInfo) *tipb.IndexInfo { pi := &tipb.IndexInfo{ TableId: proto.Int64(t.ID), IndexId: proto.Int64(idx.ID), Unique: proto.Bool(idx.Unique), } cols := make([]*tipb.ColumnInfo, 0, len(idx.Columns)) for _, c := range idx.Columns { cols = append(cols, columnToProto(t.Columns[c.Offset])) } pi.Columns = cols return pi }