// handleFetchData handles a request for the current nodes meta data func (r *rpc) handleFetchData(req *internal.FetchDataRequest) (*internal.FetchDataResponse, error) { var ( b []byte data *Data err error ) for { data = r.store.cachedData() if data.Index != req.GetIndex() { b, err = data.MarshalBinary() if err != nil { return nil, err } break } if !req.GetBlocking() { break } if err := r.store.WaitForDataChanged(); err != nil { return nil, err } } return &internal.FetchDataResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, Index: proto.Uint64(data.Index), Term: proto.Uint64(data.Term), Data: b}, nil }
// handleJoinRequest handles a request to join the cluster func (r *rpc) handleJoinRequest(req *internal.JoinRequest) (*internal.JoinResponse, error) { r.traceCluster("join request from: %v", *req.Addr) node, err := func() (*NodeInfo, error) { // attempt to create the node node, err := r.store.CreateNode(*req.Addr) // if it exists, return the existing node if err == ErrNodeExists { return r.store.NodeByHost(*req.Addr) } else if err != nil { return nil, fmt.Errorf("create node: %v", err) } // FIXME: jwilder: adding raft nodes is tricky since going // from 1 node (leader) to two kills the cluster because // quorum is lost after adding the second node. For now, // can only add non-raft enabled nodes // If we have less than 3 nodes, add them as raft peers // if len(r.store.Peers()) < MaxRaftNodes { // if err = r.store.AddPeer(*req.Addr); err != nil { // return node, fmt.Errorf("add peer: %v", err) // } // } return node, err }() nodeID := uint64(0) if node != nil { nodeID = node.ID } if err != nil { return nil, err } return &internal.JoinResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, //EnableRaft: proto.Bool(contains(r.store.Peers(), *req.Addr)), EnableRaft: proto.Bool(false), RaftNodes: r.store.Peers(), NodeID: proto.Uint64(nodeID), }, err }
func newTestMessage() *pb.MyMessage { msg := &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), Quote: proto.String(`"I didn't want to go."`), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &pb.InnerMessage{ Host: proto.String("footrest.syd"), Port: proto.Int32(7001), Connected: proto.Bool(true), }, Others: []*pb.OtherMessage{ { Key: proto.Int64(0xdeadbeef), Value: []byte{1, 65, 7, 12}, }, { Weight: proto.Float32(6.022), Inner: &pb.InnerMessage{ Host: proto.String("lesha.mtv"), Port: proto.Int32(8002), }, }, }, Bikeshed: pb.MyMessage_BLUE.Enum(), Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(8), }, // One normally wouldn't do this. // This is an undeclared tag 13, as a varint (wire type 0) with value 4. XXX_unrecognized: []byte{13<<3 | 0, 4}, } ext := &pb.Ext{ Data: proto.String("Big gobs for big rats"), } if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { panic(err) } greetings := []string{"adg", "easy", "cow"} if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { panic(err) } // Add an unknown extension. We marshal a pb.Ext, and fake the ID. b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) if err != nil { panic(err) } b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) proto.SetRawExtension(msg, 201, b) // Extensions can be plain fields, too, so let's test that. b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) proto.SetRawExtension(msg, 202, b) return msg }
func (r *rpc) sendError(conn net.Conn, msg string) { r.traceCluster(msg) resp := &internal.ErrorResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(false), Error: proto.String(msg), }, } r.sendResponse(conn, internal.RPCType_Error, resp) }
// marshal serializes to a protobuf representation. func (ui UserInfo) marshal() *internal.UserInfo { pb := &internal.UserInfo{ Name: proto.String(ui.Name), Hash: proto.String(ui.Hash), Admin: proto.Bool(ui.Admin), } for database, privilege := range ui.Privileges { pb.Privileges = append(pb.Privileges, &internal.UserPrivilege{ Database: proto.String(database), Privilege: proto.Int32(int32(privilege)), }) } return pb }
// fetchMetaData returns the latest copy of the meta store data from the current // leader. func (r *rpc) fetchMetaData(blocking bool) (*Data, error) { assert(r.store != nil, "store is nil") // Retrieve the current known leader. leader := r.store.Leader() if leader == "" { return nil, errors.New("no leader") } var index, term uint64 data := r.store.cachedData() if data != nil { index = data.Index term = data.Index } resp, err := r.call(leader, &internal.FetchDataRequest{ Index: proto.Uint64(index), Term: proto.Uint64(term), Blocking: proto.Bool(blocking), }) if err != nil { return nil, err } switch t := resp.(type) { case *internal.FetchDataResponse: // If data is nil, then the term and index we sent matches the leader if t.GetData() == nil { return nil, nil } ms := &Data{} if err := ms.UnmarshalBinary(t.GetData()); err != nil { return nil, fmt.Errorf("rpc unmarshal metadata: %v", err) } return ms, nil case *internal.ErrorResponse: return nil, fmt.Errorf("rpc failed: %s", t.GetHeader().GetError()) default: return nil, fmt.Errorf("rpc failed: unknown response type: %v", t.String()) } }
import ( "testing" "github.com/nathanielc/morgoth/Godeps/_workspace/src/github.com/gogo/protobuf/proto" pb "github.com/nathanielc/morgoth/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata" ) var cloneTestMessage = &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &pb.InnerMessage{ Host: proto.String("niles"), Port: proto.Int32(9099), Connected: proto.Bool(true), }, Others: []*pb.OtherMessage{ { Value: []byte("some bytes"), }, }, Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(6), }, RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, } func init() { ext := &pb.Ext{ Data: proto.String("extension"),
// handleRPCConn reads a command from the connection and executes it. func (r *rpc) handleRPCConn(conn net.Conn) { defer conn.Close() // RPC connections should execute on the leader. If we are not the leader, // proxy the connection to the leader so that clients an connect to any node // in the cluster. r.traceCluster("rpc connection from: %v", conn.RemoteAddr()) if !r.store.IsLeader() { r.proxyLeader(conn.(*net.TCPConn)) return } // Read and execute request. typ, resp, err := func() (internal.RPCType, proto.Message, error) { // Read request size. var sz uint64 if err := binary.Read(conn, binary.BigEndian, &sz); err != nil { return internal.RPCType_Error, nil, fmt.Errorf("read size: %s", err) } if sz == 0 { return 0, nil, fmt.Errorf("invalid message size: %d", sz) } if sz >= MaxMessageSize { return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) } // Read request. buf := make([]byte, sz) if _, err := io.ReadFull(conn, buf); err != nil { return internal.RPCType_Error, nil, fmt.Errorf("read request: %s", err) } // Determine the RPC type rpcType := internal.RPCType(btou64(buf[0:8])) buf = buf[8:] r.traceCluster("recv %v request on: %v", rpcType, conn.RemoteAddr()) switch rpcType { case internal.RPCType_FetchData: var req internal.FetchDataRequest if err := proto.Unmarshal(buf, &req); err != nil { return internal.RPCType_Error, nil, fmt.Errorf("fetch request unmarshal: %v", err) } resp, err := r.handleFetchData(&req) return rpcType, resp, err case internal.RPCType_Join: var req internal.JoinRequest if err := proto.Unmarshal(buf, &req); err != nil { return internal.RPCType_Error, nil, fmt.Errorf("join request unmarshal: %v", err) } resp, err := r.handleJoinRequest(&req) return rpcType, resp, err default: return internal.RPCType_Error, nil, fmt.Errorf("unknown rpc type:%v", rpcType) } }() // Handle unexpected RPC errors if err != nil { resp = &internal.ErrorResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(false), }, } typ = internal.RPCType_Error } // Set the status header and error message if reply, ok := resp.(Reply); ok { reply.GetHeader().OK = proto.Bool(err == nil) if err != nil { reply.GetHeader().Error = proto.String(err.Error()) } } r.sendResponse(conn, typ, resp) }