Пример #1
0
// shareNodes is the receiving end of the ShareNodes RPC. It writes up to 10
// randomly selected nodes to the caller.
func (g *Gateway) shareNodes(conn modules.PeerConn) error {
	conn.SetDeadline(time.Now().Add(connStdDeadline))

	// Assemble a list of nodes to send to the peer.
	var nodes []modules.NetAddress
	func() {
		g.mu.RLock()
		defer g.mu.RUnlock()

		// Create a random permutation of nodes from the gateway to iterate
		// through.
		gnodes := make([]modules.NetAddress, 0, len(g.nodes))
		for node := range g.nodes {
			gnodes = append(gnodes, node)
		}
		perm, err := crypto.Perm(len(g.nodes))
		if err != nil {
			g.log.Severe("Unable to get random permutation for sharing nodes")
		}

		// Iterate through the random permutation of nodes and select the
		// desirable ones.
		remoteNA := modules.NetAddress(conn.RemoteAddr().String())
		for _, i := range perm {
			// Don't share local peers with remote peers. That means that if 'node'
			// is loopback, it will only be shared if the remote peer is also
			// loopback. And if 'node' is private, it will only be shared if the
			// remote peer is either the loopback or is also private.
			node := gnodes[i]
			if node.IsLoopback() && !remoteNA.IsLoopback() {
				continue
			}
			if node.IsLocal() && !remoteNA.IsLocal() {
				continue
			}

			nodes = append(nodes, node)
			if uint64(len(nodes)) == maxSharedNodes {
				break
			}
		}
	}()
	return encoding.WriteObject(conn, nodes)
}
Пример #2
0
// requestNodes is the calling end of the ShareNodes RPC.
func (g *Gateway) requestNodes(conn modules.PeerConn) error {
	conn.SetDeadline(time.Now().Add(connStdDeadline))

	var nodes []modules.NetAddress
	if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength); err != nil {
		return err
	}

	g.mu.Lock()
	for _, node := range nodes {
		err := g.addNode(node)
		if err != nil && err != errNodeExists && err != errOurAddress {
			g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node)
		}
	}
	err := g.save()
	if err != nil {
		g.log.Println("WARN: failed to save nodelist after requesting nodes:", err)
	}
	g.mu.Unlock()
	return nil
}
Пример #3
0
// managedReceiveBlocks is the calling end of the SendBlocks RPC, without the
// threadgroup wrapping.
func (cs *ConsensusSet) managedReceiveBlocks(conn modules.PeerConn) (returnErr error) {
	// Set a deadline after which SendBlocks will timeout. During IBD, esepcially,
	// SendBlocks will timeout. This is by design so that IBD switches peers to
	// prevent any one peer from stalling IBD.
	err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout))
	// Ignore errors returned by SetDeadline if the conn is a pipe in testing.
	// Pipes do not support Set{,Read,Write}Deadline and should only be used in
	// testing.
	if opErr, ok := err.(*net.OpError); ok && opErr.Op == "set" && opErr.Net == "pipe" && build.Release == "testing" {
		err = nil
	}
	if err != nil {
		return err
	}
	stalled := true
	defer func() {
		// TODO: Timeout errors returned by muxado do not conform to the net.Error
		// interface and therefore we cannot check if the error is a timeout using
		// the Timeout() method. Once muxado issue #14 is resolved change the below
		// condition to:
		//     if netErr, ok := returnErr.(net.Error); ok && netErr.Timeout() && stalled { ... }
		if stalled && returnErr != nil && (returnErr.Error() == "Read timeout" || returnErr.Error() == "Write timeout") {
			returnErr = errSendBlocksStalled
		}
	}()

	// Get blockIDs to send.
	var history [32]types.BlockID
	cs.mu.RLock()
	err = cs.db.View(func(tx *bolt.Tx) error {
		history = blockHistory(tx)
		return nil
	})
	cs.mu.RUnlock()
	if err != nil {
		return err
	}

	// Send the block ids.
	if err := encoding.WriteObject(conn, history); err != nil {
		return err
	}

	// Broadcast the last block accepted. This functionality is in a defer to
	// ensure that a block is always broadcast if any blocks are accepted. This
	// is to stop an attacker from preventing block broadcasts.
	chainExtended := false
	defer func() {
		cs.mu.RLock()
		synced := cs.synced
		cs.mu.RUnlock()
		if chainExtended && synced {
			// The last block received will be the current block since
			// managedAcceptBlock only returns nil if a block extends the longest chain.
			currentBlock := cs.managedCurrentBlock()
			// COMPATv0.5.1 - broadcast the block to all peers <= v0.5.1 and block header to all peers > v0.5.1
			var relayBlockPeers, relayHeaderPeers []modules.Peer
			for _, p := range cs.gateway.Peers() {
				if build.VersionCmp(p.Version, "0.5.1") <= 0 {
					relayBlockPeers = append(relayBlockPeers, p)
				} else {
					relayHeaderPeers = append(relayHeaderPeers, p)
				}
			}
			go cs.gateway.Broadcast("RelayBlock", currentBlock, relayBlockPeers)
			go cs.gateway.Broadcast("RelayHeader", currentBlock.Header(), relayHeaderPeers)
		}
	}()

	// Read blocks off of the wire and add them to the consensus set until
	// there are no more blocks available.
	moreAvailable := true
	for moreAvailable {
		// Read a slice of blocks from the wire.
		var newBlocks []types.Block
		if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil {
			return err
		}
		if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
			return err
		}

		// Integrate the blocks into the consensus set.
		for _, block := range newBlocks {
			stalled = false
			// Call managedAcceptBlock instead of AcceptBlock so as not to broadcast
			// every block.
			acceptErr := cs.managedAcceptBlock(block)
			// Set a flag to indicate that we should broadcast the last block received.
			if acceptErr == nil {
				chainExtended = true
			}
			// ErrNonExtendingBlock must be ignored until headers-first block
			// sharing is implemented, block already in database should also be
			// ignored.
			if acceptErr == modules.ErrNonExtendingBlock || acceptErr == modules.ErrBlockKnown {
				acceptErr = nil
			}
			if acceptErr != nil {
				return acceptErr
			}
		}
	}
	return nil
}