Ejemplo n.º 1
0
// rpcRelayBlock is an RPC that accepts a block from a peer.
// COMPATv0.5.1
func (cs *ConsensusSet) rpcRelayBlock(conn modules.PeerConn) error {
	err := cs.tg.Add()
	if err != nil {
		return err
	}
	defer cs.tg.Done()

	// Decode the block from the connection.
	var b types.Block
	err = encoding.ReadObject(conn, &b, types.BlockSizeLimit)
	if err != nil {
		return err
	}

	// Submit the block to the consensus set and broadcast it.
	err = cs.managedAcceptBlock(b)
	if err == errOrphan {
		// If the block is an orphan, try to find the parents. The block
		// received from the peer is discarded and will be downloaded again if
		// the parent is found.
		go func() {
			err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks)
			if err != nil {
				cs.log.Debugln("WARN: failed to get parents of orphan block:", err)
			}
		}()
	}
	if err != nil {
		return err
	}
	cs.managedBroadcastBlock(b)
	return nil
}
Ejemplo n.º 2
0
Archivo: rpc.go Proyecto: pcoindev/Sia
// threadedHandleConn reads header data from a connection, then routes it to the
// appropriate handler for further processing.
func (g *Gateway) threadedHandleConn(conn modules.PeerConn) {
	defer conn.Close()
	var id rpcID
	if err := encoding.ReadObject(conn, &id, 8); err != nil {
		return
	}
	// call registered handler for this ID
	lockid := g.mu.RLock()
	fn, ok := g.handlers[id]
	g.mu.RUnlock(lockid)
	if !ok {
		g.log.Printf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id)
		return
	}

	// call fn
	err := fn(conn)
	// don't log benign errors
	if err == modules.ErrDuplicateTransactionSet || err == modules.ErrBlockKnown {
		err = nil
	}
	if err != nil {
		g.log.Printf("WARN: incoming RPC \"%v\" failed: %v", id, err)
	}
}
Ejemplo n.º 3
0
// requestNodes is the calling end of the ShareNodes RPC.
func (g *Gateway) requestNodes(conn modules.PeerConn) error {
	var nodes []modules.NetAddress
	if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength); err != nil {
		return err
	}
	g.log.Printf("INFO: %v sent us %v nodes", conn.RemoteAddr(), len(nodes))
	id := g.mu.Lock()
	for _, node := range nodes {
		g.addNode(node)
	}
	g.save()
	g.mu.Unlock(id)
	return nil
}
Ejemplo n.º 4
0
// requestNodes is the calling end of the ShareNodes RPC.
func (g *Gateway) requestNodes(conn modules.PeerConn) error {
	var nodes []modules.NetAddress
	if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength); err != nil {
		return err
	}
	g.mu.Lock()
	for _, node := range nodes {
		err := g.addNode(node)
		if err != nil && err != errNodeExists && err != errOurAddress {
			g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node)
		}
	}
	g.save()
	g.mu.Unlock()
	return nil
}
Ejemplo n.º 5
0
// shareNodes is the receiving end of the ShareNodes RPC. It writes up to 10
// randomly selected nodes to the caller.
func (g *Gateway) shareNodes(conn modules.PeerConn) error {
	conn.SetDeadline(time.Now().Add(connStdDeadline))

	// Assemble a list of nodes to send to the peer.
	var nodes []modules.NetAddress
	func() {
		g.mu.RLock()
		defer g.mu.RUnlock()

		// Create a random permutation of nodes from the gateway to iterate
		// through.
		gnodes := make([]modules.NetAddress, 0, len(g.nodes))
		for node := range g.nodes {
			gnodes = append(gnodes, node)
		}
		perm, err := crypto.Perm(len(g.nodes))
		if err != nil {
			g.log.Severe("Unable to get random permutation for sharing nodes")
		}

		// Iterate through the random permutation of nodes and select the
		// desirable ones.
		remoteNA := modules.NetAddress(conn.RemoteAddr().String())
		for _, i := range perm {
			// Don't share local peers with remote peers. That means that if 'node'
			// is loopback, it will only be shared if the remote peer is also
			// loopback. And if 'node' is private, it will only be shared if the
			// remote peer is either the loopback or is also private.
			node := gnodes[i]
			if node.IsLoopback() && !remoteNA.IsLoopback() {
				continue
			}
			if node.IsLocal() && !remoteNA.IsLocal() {
				continue
			}

			nodes = append(nodes, node)
			if uint64(len(nodes)) == maxSharedNodes {
				break
			}
		}
	}()
	return encoding.WriteObject(conn, nodes)
}
Ejemplo n.º 6
0
// relayBlock is an RPC that accepts a block from a peer.
func (cs *ConsensusSet) relayBlock(conn modules.PeerConn) error {
	// Decode the block from the connection.
	var b types.Block
	err := encoding.ReadObject(conn, &b, types.BlockSizeLimit)
	if err != nil {
		return err
	}

	// Submit the block to the consensus set.
	err = cs.AcceptBlock(b)
	if err == errOrphan {
		// If the block is an orphan, try to find the parents. The block
		// received from the peer is discarded and will be downloaded again if
		// the parent is found.
		go cs.gateway.RPC(modules.NetAddress(conn.RemoteAddr().String()), "SendBlocks", cs.threadedReceiveBlocks)
	}
	if err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 7
0
// requestNodes is the calling end of the ShareNodes RPC.
func (g *Gateway) requestNodes(conn modules.PeerConn) error {
	conn.SetDeadline(time.Now().Add(connStdDeadline))

	var nodes []modules.NetAddress
	if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength); err != nil {
		return err
	}

	g.mu.Lock()
	for _, node := range nodes {
		err := g.addNode(node)
		if err != nil && err != errNodeExists && err != errOurAddress {
			g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node)
		}
	}
	err := g.save()
	if err != nil {
		g.log.Println("WARN: failed to save nodelist after requesting nodes:", err)
	}
	g.mu.Unlock()
	return nil
}
Ejemplo n.º 8
0
// threadedHandleConn reads header data from a connection, then routes it to the
// appropriate handler for further processing.
func (g *Gateway) threadedHandleConn(conn modules.PeerConn) {
	defer conn.Close()
	var id rpcID
	if err := encoding.ReadObject(conn, &id, 8); err != nil {
		g.log.Printf("WARN: could not read RPC identifier from incoming conn %v: %v", conn.RemoteAddr(), err)
		return
	}
	// call registered handler for this ID
	lockid := g.mu.RLock()
	fn, ok := g.handlers[id]
	g.mu.RUnlock(lockid)
	if !ok {
		g.log.Printf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id)
		return
	}

	if err := fn(conn); err != nil {
		g.log.Printf("WARN: incoming RPC \"%v\" failed: %v", id, err)
	}
}
Ejemplo n.º 9
0
// threadedRPCRelayHeader is an RPC that accepts a block header from a peer.
func (cs *ConsensusSet) threadedRPCRelayHeader(conn modules.PeerConn) error {
	err := cs.tg.Add()
	if err != nil {
		return err
	}
	wg := new(sync.WaitGroup)
	defer func() {
		go func() {
			wg.Wait()
			cs.tg.Done()
		}()
	}()

	// Decode the block header from the connection.
	var h types.BlockHeader
	err = encoding.ReadObject(conn, &h, types.BlockHeaderSize)
	if err != nil {
		return err
	}

	// Start verification inside of a bolt View tx.
	cs.mu.RLock()
	err = cs.db.View(func(tx *bolt.Tx) error {
		// Do some relatively inexpensive checks to validate the header
		return cs.validateHeader(boltTxWrapper{tx}, h)
	})
	cs.mu.RUnlock()
	if err == errOrphan {
		// If the header is an orphan, try to find the parents. Call needs to
		// be made in a separate goroutine as execution requires calling an
		// exported gateway method - threadedRPCRelayHeader was likely called
		// from an exported gateway function.
		//
		// NOTE: In general this is bad design. Rather than recycling other
		// calls, the whole protocol should have been kept in a single RPC.
		// Because it is not, we have to do weird threading to prevent
		// deadlocks, and we also have to be concerned every time the code in
		// managedReceiveBlocks is adjusted.
		wg.Add(1)
		go func() {
			err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks)
			if err != nil {
				cs.log.Debugln("WARN: failed to get parents of orphan header:", err)
			}
			wg.Done()
		}()
		return nil
	} else if err != nil {
		return err
	}

	// If the header is valid and extends the heaviest chain, fetch the
	// corresponding block. Call needs to be made in a separate goroutine
	// because an exported call to the gateway is used, which is a deadlock
	// risk given that rpcRelayHeader is called from the gateway.
	//
	// NOTE: In general this is bad design. Rather than recycling other calls,
	// the whole protocol should have been kept in a single RPC. Because it is
	// not, we have to do weird threading to prevent deadlocks, and we also
	// have to be concerned every time the code in managedReceiveBlock is
	// adjusted.
	wg.Add(1)
	go func() {
		err = cs.gateway.RPC(conn.RPCAddr(), "SendBlk", cs.managedReceiveBlock(h.ID()))
		if err != nil {
			cs.log.Debugln("WARN: failed to get header's corresponding block:", err)
		}
		wg.Done()
	}()
	return nil
}
Ejemplo n.º 10
0
// managedReceiveBlocks is the calling end of the SendBlocks RPC, without the
// threadgroup wrapping.
func (cs *ConsensusSet) managedReceiveBlocks(conn modules.PeerConn) (returnErr error) {
	// Set a deadline after which SendBlocks will timeout. During IBD, esepcially,
	// SendBlocks will timeout. This is by design so that IBD switches peers to
	// prevent any one peer from stalling IBD.
	err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout))
	// Ignore errors returned by SetDeadline if the conn is a pipe in testing.
	// Pipes do not support Set{,Read,Write}Deadline and should only be used in
	// testing.
	if opErr, ok := err.(*net.OpError); ok && opErr.Op == "set" && opErr.Net == "pipe" && build.Release == "testing" {
		err = nil
	}
	if err != nil {
		return err
	}
	stalled := true
	defer func() {
		// TODO: Timeout errors returned by muxado do not conform to the net.Error
		// interface and therefore we cannot check if the error is a timeout using
		// the Timeout() method. Once muxado issue #14 is resolved change the below
		// condition to:
		//     if netErr, ok := returnErr.(net.Error); ok && netErr.Timeout() && stalled { ... }
		if stalled && returnErr != nil && (returnErr.Error() == "Read timeout" || returnErr.Error() == "Write timeout") {
			returnErr = errSendBlocksStalled
		}
	}()

	// Get blockIDs to send.
	var history [32]types.BlockID
	cs.mu.RLock()
	err = cs.db.View(func(tx *bolt.Tx) error {
		history = blockHistory(tx)
		return nil
	})
	cs.mu.RUnlock()
	if err != nil {
		return err
	}

	// Send the block ids.
	if err := encoding.WriteObject(conn, history); err != nil {
		return err
	}

	// Broadcast the last block accepted. This functionality is in a defer to
	// ensure that a block is always broadcast if any blocks are accepted. This
	// is to stop an attacker from preventing block broadcasts.
	chainExtended := false
	defer func() {
		cs.mu.RLock()
		synced := cs.synced
		cs.mu.RUnlock()
		if chainExtended && synced {
			// The last block received will be the current block since
			// managedAcceptBlock only returns nil if a block extends the longest chain.
			currentBlock := cs.managedCurrentBlock()
			// COMPATv0.5.1 - broadcast the block to all peers <= v0.5.1 and block header to all peers > v0.5.1
			var relayBlockPeers, relayHeaderPeers []modules.Peer
			for _, p := range cs.gateway.Peers() {
				if build.VersionCmp(p.Version, "0.5.1") <= 0 {
					relayBlockPeers = append(relayBlockPeers, p)
				} else {
					relayHeaderPeers = append(relayHeaderPeers, p)
				}
			}
			go cs.gateway.Broadcast("RelayBlock", currentBlock, relayBlockPeers)
			go cs.gateway.Broadcast("RelayHeader", currentBlock.Header(), relayHeaderPeers)
		}
	}()

	// Read blocks off of the wire and add them to the consensus set until
	// there are no more blocks available.
	moreAvailable := true
	for moreAvailable {
		// Read a slice of blocks from the wire.
		var newBlocks []types.Block
		if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil {
			return err
		}
		if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
			return err
		}

		// Integrate the blocks into the consensus set.
		for _, block := range newBlocks {
			stalled = false
			// Call managedAcceptBlock instead of AcceptBlock so as not to broadcast
			// every block.
			acceptErr := cs.managedAcceptBlock(block)
			// Set a flag to indicate that we should broadcast the last block received.
			if acceptErr == nil {
				chainExtended = true
			}
			// ErrNonExtendingBlock must be ignored until headers-first block
			// sharing is implemented, block already in database should also be
			// ignored.
			if acceptErr == modules.ErrNonExtendingBlock || acceptErr == modules.ErrBlockKnown {
				acceptErr = nil
			}
			if acceptErr != nil {
				return acceptErr
			}
		}
	}
	return nil
}