// readTCP is used to read the start of a TCP stream. // it decrypts and decompresses the stream if necessary func (m *Memberlist) readTCP(conn net.Conn) (messageType, io.Reader, *codec.Decoder, error) { // Created a buffered reader var bufConn io.Reader = bufio.NewReader(conn) // Read the message type buf := [1]byte{0} if _, err := bufConn.Read(buf[:]); err != nil { return 0, nil, nil, err } msgType := messageType(buf[0]) // Check if the message is encrypted if msgType == encryptMsg { if !m.config.EncryptionEnabled() { return 0, nil, nil, fmt.Errorf("Remote state is encrypted and encryption is not configured") } plain, err := m.decryptRemoteState(bufConn) if err != nil { return 0, nil, nil, err } // Reset message type and bufConn msgType = messageType(plain[0]) bufConn = bytes.NewReader(plain[1:]) } else if m.config.EncryptionEnabled() { return 0, nil, nil, fmt.Errorf("Encryption is configured but remote state is not encrypted") } // Get the msgPack decoders hd := codec.MsgpackHandle{} dec := codec.NewDecoder(bufConn, &hd) // Check if we have a compressed message if msgType == compressMsg { var c compress if err := dec.Decode(&c); err != nil { return 0, nil, nil, err } decomp, err := decompressBuffer(&c) if err != nil { return 0, nil, nil, err } // Reset the message type msgType = messageType(decomp[0]) // Create a new bufConn bufConn = bytes.NewReader(decomp[1:]) // Create a new decoder dec = codec.NewDecoder(bufConn, &hd) } return msgType, bufConn, dec, nil }
func decodePeerMsg(buf []byte) []string { var data []string if err := codec.NewDecoder(bytes.NewReader(buf), &mh).Decode(&data); err != nil { kingpin.Errorf("Error while decoding (generic msgpack) message: %s\n", err.Error()) } return data }
func printMsgPackData(log raft.Log, msgtype string, buf []byte) { var data interface{} if err := codec.NewDecoder(bytes.NewReader(buf), &mh).Decode(&data); err != nil { kingpin.Errorf("Error while decoding (generic msgpack) message: %s\n", err.Error()) } printJson(log, msgtype, data) }
// joins the raft leader and sets up infrastructure for // processing commands // can return ErrNotLeader func newConnToLeader(conn net.Conn, advertiseAddr string, lg *log.Logger) (*connToLeader, error) { // send join command h := &codec.MsgpackHandle{} ret := &connToLeader{ c: conn, e: codec.NewEncoder(conn, h), d: codec.NewDecoder(conn, h), l: new(sync.Mutex), lg: lg, pending: make(chan *commandCallback, 64), } join := &joinReq{ PeerAddr: advertiseAddr, } err := ret.e.Encode(join) if err != nil { ret.c.Close() return nil, err } joinResp := &joinResp{} err = ret.d.Decode(joinResp) if err != nil { ret.lg.Printf("Error connecting to leader at %s : %s", conn.RemoteAddr().String(), err) ret.c.Close() return nil, err } go ret.readResponses() return ret, nil }
// handleSnapshotRequest reads the request from the conn and dispatches it. This // will be called from a goroutine after an incoming stream is determined to be // a snapshot request. func (s *Server) handleSnapshotRequest(conn net.Conn) error { var args structs.SnapshotRequest dec := codec.NewDecoder(conn, &codec.MsgpackHandle{}) if err := dec.Decode(&args); err != nil { return fmt.Errorf("failed to decode request: %v", err) } var reply structs.SnapshotResponse snap, err := s.dispatchSnapshotRequest(&args, conn, &reply) if err != nil { reply.Error = err.Error() goto RESPOND } defer func() { if err := snap.Close(); err != nil { s.logger.Printf("[ERR] consul: Failed to close snapshot: %v", err) } }() RESPOND: enc := codec.NewEncoder(conn, &codec.MsgpackHandle{}) if err := enc.Encode(&reply); err != nil { return fmt.Errorf("failed to encode response: %v", err) } if snap != nil { if _, err := io.Copy(conn, snap); err != nil { return fmt.Errorf("failed to stream snapshot: %v", err) } } return nil }
// getConn is used to get a connection from the pool. func (n *NetworkTransport) getConn(target string) (*netConn, error) { // Check for a pooled conn if conn := n.getPooledConn(target); conn != nil { return conn, nil } // Dial a new connection conn, err := n.stream.Dial(target, n.timeout) if err != nil { return nil, err } // Wrap the conn netConn := &netConn{ target: target, conn: conn, r: bufio.NewReader(conn), w: bufio.NewWriter(conn), } // Setup encoder/decoders netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) // Done return netConn, nil }
// listen is a long running routine that listens for new clients func (i *AgentRPC) listen() { for { conn, err := i.listener.Accept() if err != nil { if i.stop { return } i.logger.Printf("[ERR] agent.rpc: Failed to accept client: %v", err) continue } i.logger.Printf("[INFO] agent.rpc: Accepted client: %v", conn.RemoteAddr()) // Wrap the connection in a client client := &rpcClient{ name: conn.RemoteAddr().String(), conn: conn, reader: bufio.NewReader(conn), writer: bufio.NewWriter(conn), } client.dec = codec.NewDecoder(client.reader, msgpackHandle) client.enc = codec.NewEncoder(client.writer, msgpackHandle) // Register the client i.Lock() if !i.stop { i.clients[client.name] = client go i.handleClient(client) } else { conn.Close() } i.Unlock() } }
func MockDecoder(buf []byte) interface{} { out := new(MockData) err := codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out) if err != nil { panic(err) } return out }
// NewCodec returns a MsgpackCodec that can be used as either // a Client or Server rpc Codec. It also provides controls for // enabling and disabling buffering for both reads and writes. func NewCodec(bufReads, bufWrites bool, conn io.ReadWriteCloser) *MsgpackCodec { cc := &MsgpackCodec{ conn: conn, } if bufReads { cc.bufR = bufio.NewReader(conn) cc.dec = codec.NewDecoder(cc.bufR, msgpackHandle) } else { cc.dec = codec.NewDecoder(cc.conn, msgpackHandle) } if bufWrites { cc.bufW = bufio.NewWriter(conn) cc.enc = codec.NewEncoder(cc.bufW, msgpackHandle) } else { cc.enc = codec.NewEncoder(cc.conn, msgpackHandle) } return cc }
// SnapshotRPC is a streaming client function for performing a snapshot RPC // request to a remote server. It will create a fresh connection for each // request, send the request header, and then stream in any data from the // reader (for a restore). It will then parse the received response header, and // if there's no error will return an io.ReadCloser (that you must close) with // the streaming output (for a snapshot). If the reply contains an error, this // will always return an error as well, so you don't need to check the error // inside the filled-in reply. func SnapshotRPC(pool *ConnPool, dc string, addr net.Addr, args *structs.SnapshotRequest, in io.Reader, reply *structs.SnapshotResponse) (io.ReadCloser, error) { conn, hc, err := pool.Dial(dc, addr) if err != nil { return nil, err } // keep will disarm the defer on success if we are returning the caller // our connection to stream the output. var keep bool defer func() { if !keep { conn.Close() } }() // Write the snapshot RPC byte to set the mode, then perform the // request. if _, err := conn.Write([]byte{byte(rpcSnapshot)}); err != nil { return nil, fmt.Errorf("failed to write stream type: %v", err) } // Push the header encoded as msgpack, then stream the input. enc := codec.NewEncoder(conn, &codec.MsgpackHandle{}) if err := enc.Encode(&args); err != nil { return nil, fmt.Errorf("failed to encode request: %v", err) } if _, err := io.Copy(conn, in); err != nil { return nil, fmt.Errorf("failed to copy snapshot in: %v", err) } // Our RPC protocol requires support for a half-close in order to signal // the other side that they are done reading the stream, since we don't // know the size in advance. This saves us from having to buffer just to // calculate the size. if hc != nil { if err := hc.CloseWrite(); err != nil { return nil, fmt.Errorf("failed to half close snapshot connection: %v", err) } } else { return nil, fmt.Errorf("snapshot connection requires half-close support") } // Pull the header decoded as msgpack. The caller can continue to read // the conn to stream the remaining data. dec := codec.NewDecoder(conn, &codec.MsgpackHandle{}) if err := dec.Decode(reply); err != nil { return nil, fmt.Errorf("failed to decode response: %v", err) } if reply.Error != "" { return nil, errors.New(reply.Error) } keep = true return conn, nil }
func decodeQueryEventRecord(buf []byte) (EventRecord, error) { var qr QueryEventRecord handle := codec.MsgpackHandle{RawToString: true, WriteExt: true} if err := codec.NewDecoder(bytes.NewReader(buf), &handle).Decode(&qr); err != nil { log.Printf("decoding failed") return nil, err } return EventRecord(qr), nil }
// See raft.FSM. func (m *MockFSM) Restore(in io.ReadCloser) error { m.Lock() defer m.Unlock() defer in.Close() hd := codec.MsgpackHandle{} dec := codec.NewDecoder(in, &hd) m.logs = nil return dec.Decode(&m.logs) }
// decodeTags is used to decode a tag map func (t *tribe) decodeTags(buf []byte) map[string]string { tags := make(map[string]string) r := bytes.NewReader(buf) dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) if err := dec.Decode(&tags); err != nil { t.logger.WithFields(log.Fields{ "_block": "decode-tags", "error": err, }).Error("Failed to decode tags") } return tags }
// decodeTags is used to decode a tag map func (s *Serf) decodeTags(buf []byte) map[string]string { tags := make(map[string]string) // Backwards compatibility mode if len(buf) == 0 || buf[0] != tagMagicByte { tags["role"] = string(buf) return tags } // Decode the tags r := bytes.NewReader(buf[1:]) dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) if err := dec.Decode(&tags); err != nil { s.logger.Printf("[ERR] serf: Failed to decode tags: %v", err) } return tags }
// ClientFromConfig is used to create a new RPC client given the // configuration object. This will return a client, or an error if // the connection could not be established. func ClientFromConfig(c *Config) (*RPCClient, error) { // Setup the defaults if c.Timeout == 0 { c.Timeout = DefaultTimeout } // Try to dial to serf conn, err := net.DialTimeout("tcp", c.Addr, c.Timeout) if err != nil { return nil, err } // Create the client client := &RPCClient{ seq: 0, timeout: c.Timeout, conn: conn.(*net.TCPConn), reader: bufio.NewReader(conn), writer: bufio.NewWriter(conn), dispatch: make(map[uint64]seqHandler), shutdownCh: make(chan struct{}), } client.dec = codec.NewDecoder(client.reader, &codec.MsgpackHandle{RawToString: true, WriteExt: true}) client.enc = codec.NewEncoder(client.writer, &codec.MsgpackHandle{RawToString: true, WriteExt: true}) go client.listen() // Do the initial handshake if err := client.handshake(); err != nil { client.Close() return nil, err } // Do the initial authentication if needed if c.AuthKey != "" { if err := client.auth(c.AuthKey); err != nil { client.Close() return nil, err } } return client, err }
// listen is a long running routine that listens for new clients func (i *AgentIPC) listen() { for { conn, err := i.listener.Accept() if err != nil { if i.stop { return } i.logger.Printf("[ERR] agent.ipc: Failed to accept client: %v", err) continue } i.logger.Printf("[INFO] agent.ipc: Accepted client: %v", conn.RemoteAddr()) metrics.IncrCounter([]string{"agent", "ipc", "accept"}, 1) // Wrap the connection in a client client := &IPCClient{ name: conn.RemoteAddr().String(), conn: conn, reader: bufio.NewReader(conn), writer: bufio.NewWriter(conn), eventStreams: make(map[uint64]*eventStream), pendingQueries: make(map[uint64]*serf.Query), } client.dec = codec.NewDecoder(client.reader, &codec.MsgpackHandle{RawToString: true, WriteExt: true}) client.enc = codec.NewEncoder(client.writer, &codec.MsgpackHandle{RawToString: true, WriteExt: true}) if err != nil { i.logger.Printf("[ERR] agent.ipc: Failed to create decoder: %v", err) conn.Close() continue } // Register the client i.Lock() if !i.stop { i.clients[client.name] = client go i.handleClient(client) } else { conn.Close() } i.Unlock() } }
// NotifyPingComplete is called when this node successfully completes a direct ping // of a peer node. func (p *pingDelegate) NotifyPingComplete(other *memberlist.Node, rtt time.Duration, payload []byte) { if payload == nil || len(payload) == 0 { return } // Verify ping version in the header. version := payload[0] if version != PingVersion { log.Printf("[ERR] serf: Unsupported ping version: %v", version) return } // Process the remainder of the message as a coordinate. r := bytes.NewReader(payload[1:]) dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) var coord coordinate.Coordinate if err := dec.Decode(&coord); err != nil { log.Printf("[ERR] serf: Failed to decode coordinate from ping: %v", err) } // Apply the update. Since this is a coordinate coming from some place // else we harden this and look for dimensionality problems proactively. before := p.serf.coordClient.GetCoordinate() if before.IsCompatibleWith(&coord) { after := p.serf.coordClient.Update(other.Name, &coord, rtt) // Publish some metrics to give us an idea of how much we are // adjusting each time we update. d := float32(before.DistanceTo(after).Seconds() * 1.0e3) metrics.AddSample([]string{"serf", "coordinate", "adjustment-ms"}, d) // Cache the coordinate for the other node, and add our own // to the cache as well since it just got updated. This lets // users call GetCachedCoordinate with our node name, which is // more friendly. p.serf.coordCacheLock.Lock() p.serf.coordCache[other.Name] = &coord p.serf.coordCache[p.serf.config.NodeName] = p.serf.coordClient.GetCoordinate() p.serf.coordCacheLock.Unlock() } else { log.Printf("[ERR] serf: Rejected bad coordinate: %v\n", coord) } }
// handleConn is used to handle an inbound connection for its lifespan. func (n *NetworkTransport) handleConn(conn net.Conn) { defer conn.Close() r := bufio.NewReader(conn) w := bufio.NewWriter(conn) dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) for { if err := n.handleCommand(r, dec, enc); err != nil { if err != io.EOF { n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) } return } if err := w.Flush(); err != nil { n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) return } } }
func TestTimeTable_SerializeDeserialize(t *testing.T) { tt := NewTimeTable(time.Second, time.Minute) // Witness some data start := time.Now() plusOne := start.Add(time.Minute) plusTwo := start.Add(2 * time.Minute) plusFive := start.Add(5 * time.Minute) plusThirty := start.Add(30 * time.Minute) plusHour := start.Add(60 * time.Minute) tt.Witness(2, start) tt.Witness(10, plusOne) tt.Witness(20, plusTwo) tt.Witness(30, plusFive) tt.Witness(40, plusThirty) tt.Witness(50, plusHour) var buf bytes.Buffer enc := codec.NewEncoder(&buf, msgpackHandle) err := tt.Serialize(enc) if err != nil { t.Fatalf("err: %v", err) } dec := codec.NewDecoder(&buf, msgpackHandle) tt2 := NewTimeTable(time.Second, time.Minute) err = tt2.Deserialize(dec) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(tt.table, tt2.table) { t.Fatalf("bad: %#v %#v", tt, tt2) } }
// NewRPCClient is used to create a new RPC client given the address. // This will properly dial, handshake, and start listening func NewRPCClient(addr string) (*RPCClient, error) { var conn net.Conn var err error if envAddr := os.Getenv("CONSUL_RPC_ADDR"); envAddr != "" { addr = envAddr } // Try to dial to agent mode := "tcp" if strings.HasPrefix(addr, "/") { mode = "unix" } if conn, err = net.Dial(mode, addr); err != nil { return nil, err } // Create the client client := &RPCClient{ seq: 0, conn: conn, reader: bufio.NewReader(conn), writer: bufio.NewWriter(conn), dispatch: make(map[uint64]seqHandler), shutdownCh: make(chan struct{}), } client.dec = codec.NewDecoder(client.reader, msgpackHandle) client.enc = codec.NewEncoder(client.writer, msgpackHandle) go client.listen() // Do the initial handshake if err := client.handshake(); err != nil { client.Close() return nil, err } return client, err }
// Decode reverses the encode operation on a byte slice input func decode(buf []byte, out interface{}) error { r := bytes.NewReader(buf) hd := codec.MsgpackHandle{} dec := codec.NewDecoder(r, &hd) return dec.Decode(out) }
func (n *nomadFSM) Restore(old io.ReadCloser) error { defer old.Close() // Create a new state store newState, err := state.NewStateStore(n.logOutput) if err != nil { return err } n.state = newState // Start the state restore restore, err := newState.Restore() if err != nil { return err } defer restore.Abort() // Create a decoder dec := codec.NewDecoder(old, structs.MsgpackHandle) // Read in the header var header snapshotHeader if err := dec.Decode(&header); err != nil { return err } // Populate the new state msgType := make([]byte, 1) for { // Read the message type _, err := old.Read(msgType) if err == io.EOF { break } else if err != nil { return err } // Decode switch SnapshotType(msgType[0]) { case TimeTableSnapshot: if err := n.timetable.Deserialize(dec); err != nil { return fmt.Errorf("time table deserialize failed: %v", err) } case NodeSnapshot: node := new(structs.Node) if err := dec.Decode(node); err != nil { return err } if err := restore.NodeRestore(node); err != nil { return err } case JobSnapshot: job := new(structs.Job) if err := dec.Decode(job); err != nil { return err } if err := restore.JobRestore(job); err != nil { return err } case EvalSnapshot: eval := new(structs.Evaluation) if err := dec.Decode(eval); err != nil { return err } if err := restore.EvalRestore(eval); err != nil { return err } case AllocSnapshot: alloc := new(structs.Allocation) if err := dec.Decode(alloc); err != nil { return err } if err := restore.AllocRestore(alloc); err != nil { return err } case IndexSnapshot: idx := new(state.IndexEntry) if err := dec.Decode(idx); err != nil { return err } if err := restore.IndexRestore(idx); err != nil { return err } case PeriodicLaunchSnapshot: launch := new(structs.PeriodicLaunch) if err := dec.Decode(launch); err != nil { return err } if err := restore.PeriodicLaunchRestore(launch); err != nil { return err } default: return fmt.Errorf("Unrecognized snapshot type: %v", msgType) } } // Commit the state restore restore.Commit() return nil }
// Decode is used to decode a MsgPack encoded object func Decode(buf []byte, out interface{}) error { return codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out) }
func (c *consulFSM) Restore(old io.ReadCloser) error { defer old.Close() // Create a new state store stateNew, err := state.NewStateStore(c.gc) if err != nil { return err } c.state = stateNew // Set up a new restore transaction restore := c.state.Restore() defer restore.Abort() // Create a decoder dec := codec.NewDecoder(old, msgpackHandle) // Read in the header var header snapshotHeader if err := dec.Decode(&header); err != nil { return err } // Populate the new state msgType := make([]byte, 1) for { // Read the message type _, err := old.Read(msgType) if err == io.EOF { break } else if err != nil { return err } // Decode switch structs.MessageType(msgType[0]) { case structs.RegisterRequestType: var req structs.RegisterRequest if err := dec.Decode(&req); err != nil { return err } if err := restore.Registration(header.LastIndex, &req); err != nil { return err } case structs.KVSRequestType: var req structs.DirEntry if err := dec.Decode(&req); err != nil { return err } if err := restore.KVS(&req); err != nil { return err } case structs.TombstoneRequestType: var req structs.DirEntry if err := dec.Decode(&req); err != nil { return err } // For historical reasons, these are serialized in the // snapshots as KV entries. We want to keep the snapshot // format compatible with pre-0.6 versions for now. stone := &state.Tombstone{ Key: req.Key, Index: req.ModifyIndex, } if err := restore.Tombstone(stone); err != nil { return err } case structs.SessionRequestType: var req structs.Session if err := dec.Decode(&req); err != nil { return err } if err := restore.Session(&req); err != nil { return err } case structs.ACLRequestType: var req structs.ACL if err := dec.Decode(&req); err != nil { return err } if err := restore.ACL(&req); err != nil { return err } case structs.CoordinateBatchUpdateType: var req structs.Coordinates if err := dec.Decode(&req); err != nil { return err } if err := restore.Coordinates(header.LastIndex, req); err != nil { return err } case structs.PreparedQueryRequestType: var req structs.PreparedQuery if err := dec.Decode(&req); err != nil { return err } if err := restore.PreparedQuery(&req); err != nil { return err } default: return fmt.Errorf("Unrecognized msg type: %v", msgType) } } restore.Commit() return nil }
// serves a follower from a leader server // we tell all servers to go elsewhere if we are not leader func serveFollower(lg *log.Logger, follower net.Conn, leader *server) { ch := &codec.MsgpackHandle{} decode := codec.NewDecoder(follower, ch) encode := codec.NewEncoder(follower, ch) jReq := &joinReq{} jResp := &joinResp{} err := decode.Decode(jReq) if err != nil { lg.Printf("Error serving follower at %s : %s", follower.RemoteAddr(), err) return } // register with leader isLeader := true if leader.IsLeader() { lf := leader.raft.VerifyLeader() err := lf.Error() if err != nil { lg.Printf("Error while verifying leader on host %s : %s", leader.rpcLayer.Addr().String(), err) isLeader = false } peerAddr, err := net.ResolveTCPAddr("tcp", jReq.PeerAddr) if err != nil { lg.Printf("Couldn't resolve pathname %s processing join from %s", jReq.PeerAddr, follower.RemoteAddr().String()) follower.Close() return } addFuture := leader.raft.AddPeer(peerAddr) err = addFuture.Error() if err == raft.ErrKnownPeer { lg.Printf("Tried to add already existing peer %s, continuing", peerAddr) } if err != nil && err != raft.ErrKnownPeer { lg.Printf("Error adding peer %s : %s, terminating conn", peerAddr, err) follower.Close() return } } else { isLeader = false } if !isLeader { // send response indicating leader is someone else, then return lg.Printf("Node %s not leader, refusing connection to peer %s", leader.rpcLayer.Addr().String(), jReq.PeerAddr) leaderAddr := leader.raft.Leader() if leaderAddr != nil { jResp.LeaderHost = leaderAddr.String() } encode.Encode(jResp) follower.Close() return } // send join resp err = encode.Encode(jResp) if err != nil { lg.Printf("Error sending joinResp : %s", err) follower.Close() return } // read commands err = nil futures := make(chan raft.ApplyFuture, 16) defer func() { // die follower.Close() close(futures) }() go sendResponses(futures, lg, encode, follower) for { cmdReq := &raft.Log{} err = decode.Decode(cmdReq) if err != nil { lg.Printf("Error reading command from node %s : '%s', closing conn", follower.RemoteAddr().String(), err.Error()) follower.Close() return } // exec with leader future := leader.raft.Apply(cmdReq.Data, 1*time.Minute) futures <- future } }
func decodeMessage(buf []byte, out interface{}) error { var handle codec.MsgpackHandle return codec.NewDecoder(bytes.NewReader(buf), &handle).Decode(out) }
// recvRemoteState is used to read the remote state from a connection func (m *Memberlist) readRemoteState(conn net.Conn) (bool, []pushNodeState, []byte, error) { // Setup a deadline conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) // Created a buffered reader var bufConn io.Reader = bufio.NewReader(conn) // Read the message type buf := [1]byte{0} if _, err := bufConn.Read(buf[:]); err != nil { return false, nil, nil, err } msgType := messageType(buf[0]) // Check if the message is encrypted if msgType == encryptMsg { if !m.config.EncryptionEnabled() { return false, nil, nil, fmt.Errorf("Remote state is encrypted and encryption is not configured") } plain, err := m.decryptRemoteState(bufConn) if err != nil { return false, nil, nil, err } // Reset message type and bufConn msgType = messageType(plain[0]) bufConn = bytes.NewReader(plain[1:]) } else if m.config.EncryptionEnabled() { return false, nil, nil, fmt.Errorf("Encryption is configured but remote state is not encrypted") } // Get the msgPack decoders hd := codec.MsgpackHandle{} dec := codec.NewDecoder(bufConn, &hd) // Check if we have a compressed message if msgType == compressMsg { var c compress if err := dec.Decode(&c); err != nil { return false, nil, nil, err } decomp, err := decompressBuffer(&c) if err != nil { return false, nil, nil, err } // Reset the message type msgType = messageType(decomp[0]) // Create a new bufConn bufConn = bytes.NewReader(decomp[1:]) // Create a new decoder dec = codec.NewDecoder(bufConn, &hd) } // Quit if not push/pull if msgType != pushPullMsg { err := fmt.Errorf("received invalid msgType (%d)", msgType) return false, nil, nil, err } // Read the push/pull header var header pushPullHeader if err := dec.Decode(&header); err != nil { return false, nil, nil, err } // Allocate space for the transfer remoteNodes := make([]pushNodeState, header.Nodes) // Try to decode all the states for i := 0; i < header.Nodes; i++ { if err := dec.Decode(&remoteNodes[i]); err != nil { return false, remoteNodes, nil, err } } // Read the remote user state into a buffer var userBuf []byte if header.UserStateLen > 0 { userBuf = make([]byte, header.UserStateLen) bytes, err := io.ReadAtLeast(bufConn, userBuf, header.UserStateLen) if err == nil && bytes != header.UserStateLen { err = fmt.Errorf( "Failed to read full user state (%d / %d)", bytes, header.UserStateLen) } if err != nil { return false, remoteNodes, nil, err } } // For proto versions < 2, there is no port provided. Mask old // behavior by using the configured port for idx := range remoteNodes { if m.ProtocolVersion() < 2 || remoteNodes[idx].Port == 0 { remoteNodes[idx].Port = uint16(m.config.BindPort) } } return header.Join, remoteNodes, userBuf, nil }
func TestTCPPushPull(t *testing.T) { m := GetMemberlist(t) defer m.Shutdown() m.nodes = append(m.nodes, &nodeState{ Node: Node{ Name: "Test 0", Addr: net.ParseIP(m.config.BindAddr), Port: uint16(m.config.BindPort), }, Incarnation: 0, State: stateSuspect, StateChange: time.Now().Add(-1 * time.Second), }) addr := fmt.Sprintf("%s:%d", m.config.BindAddr, m.config.BindPort) conn, err := net.Dial("tcp", addr) if err != nil { t.Fatalf("unexpected err %s", err) } defer conn.Close() localNodes := make([]pushNodeState, 3) localNodes[0].Name = "Test 0" localNodes[0].Addr = net.ParseIP(m.config.BindAddr) localNodes[0].Port = uint16(m.config.BindPort) localNodes[0].Incarnation = 1 localNodes[0].State = stateAlive localNodes[1].Name = "Test 1" localNodes[1].Addr = net.ParseIP(m.config.BindAddr) localNodes[1].Port = uint16(m.config.BindPort) localNodes[1].Incarnation = 1 localNodes[1].State = stateAlive localNodes[2].Name = "Test 2" localNodes[2].Addr = net.ParseIP(m.config.BindAddr) localNodes[2].Port = uint16(m.config.BindPort) localNodes[2].Incarnation = 1 localNodes[2].State = stateAlive // Send our node state header := pushPullHeader{Nodes: 3} hd := codec.MsgpackHandle{} enc := codec.NewEncoder(conn, &hd) // Send the push/pull indicator conn.Write([]byte{byte(pushPullMsg)}) if err := enc.Encode(&header); err != nil { t.Fatalf("unexpected err %s", err) } for i := 0; i < header.Nodes; i++ { if err := enc.Encode(&localNodes[i]); err != nil { t.Fatalf("unexpected err %s", err) } } // Read the message type var msgType messageType if err := binary.Read(conn, binary.BigEndian, &msgType); err != nil { t.Fatalf("unexpected err %s", err) } var bufConn io.Reader = conn msghd := codec.MsgpackHandle{} dec := codec.NewDecoder(bufConn, &msghd) // Check if we have a compressed message if msgType == compressMsg { var c compress if err := dec.Decode(&c); err != nil { t.Fatalf("unexpected err %s", err) } decomp, err := decompressBuffer(&c) if err != nil { t.Fatalf("unexpected err %s", err) } // Reset the message type msgType = messageType(decomp[0]) // Create a new bufConn bufConn = bytes.NewReader(decomp[1:]) // Create a new decoder dec = codec.NewDecoder(bufConn, &hd) } // Quit if not push/pull if msgType != pushPullMsg { t.Fatalf("bad message type") } if err := dec.Decode(&header); err != nil { t.Fatalf("unexpected err %s", err) } // Allocate space for the transfer remoteNodes := make([]pushNodeState, header.Nodes) // Try to decode all the states for i := 0; i < header.Nodes; i++ { if err := dec.Decode(&remoteNodes[i]); err != nil { t.Fatalf("unexpected err %s", err) } } if len(remoteNodes) != 1 { t.Fatalf("bad response") } n := &remoteNodes[0] if n.Name != "Test 0" { t.Fatalf("bad name") } if bytes.Compare(n.Addr, net.ParseIP(m.config.BindAddr)) != 0 { t.Fatal("bad addr") } if n.Incarnation != 0 { t.Fatal("bad incarnation") } if n.State != stateSuspect { t.Fatal("bad state") } }
func (c *consulFSM) Restore(old io.ReadCloser) error { defer old.Close() // Create a temporary path for the state store tmpPath, err := ioutil.TempDir(c.path, "state") if err != nil { return err } // Create a new state store state, err := NewStateStorePath(c.gc, tmpPath, c.logOutput) if err != nil { return err } c.state.Close() c.state = state // Create a decoder dec := codec.NewDecoder(old, msgpackHandle) // Read in the header var header snapshotHeader if err := dec.Decode(&header); err != nil { return err } // Populate the new state msgType := make([]byte, 1) for { // Read the message type _, err := old.Read(msgType) if err == io.EOF { break } else if err != nil { return err } // Decode switch structs.MessageType(msgType[0]) { case structs.RegisterRequestType: var req structs.RegisterRequest if err := dec.Decode(&req); err != nil { return err } c.applyRegister(&req, header.LastIndex) case structs.KVSRequestType: var req structs.DirEntry if err := dec.Decode(&req); err != nil { return err } if err := c.state.KVSRestore(&req); err != nil { return err } case structs.SessionRequestType: var req structs.Session if err := dec.Decode(&req); err != nil { return err } if err := c.state.SessionRestore(&req); err != nil { return err } case structs.ACLRequestType: var req structs.ACL if err := dec.Decode(&req); err != nil { return err } if err := c.state.ACLRestore(&req); err != nil { return err } case structs.TombstoneRequestType: var req structs.DirEntry if err := dec.Decode(&req); err != nil { return err } if err := c.state.TombstoneRestore(&req); err != nil { return err } default: return fmt.Errorf("Unrecognized msg type: %v", msgType) } } return nil }