func TestShareNodes(t *testing.T) { g1 := newTestingGateway("TestShareNodes1", t) defer g1.Close() g2 := newTestingGateway("TestShareNodes2", t) defer g2.Close() // add a node to g2 id := g2.mu.Lock() g2.addNode(dummyNode) g2.mu.Unlock(id) // connect err := g1.Connect(g2.Address()) if err != nil { t.Fatal("couldn't connect:", err) } // g1 should have received the node time.Sleep(100 * time.Millisecond) id = g1.mu.Lock() if g1.addNode(dummyNode) == nil { t.Fatal("gateway did not receive nodes during Connect:", g1.nodes) } g1.mu.Unlock(id) // remove all nodes from both peers id = g1.mu.Lock() g1.nodes = map[modules.NetAddress]struct{}{} g1.mu.Unlock(id) id = g2.mu.Lock() g2.nodes = map[modules.NetAddress]struct{}{} g2.mu.Unlock(id) // SharePeers should now return no peers var nodes []modules.NetAddress err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { return encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength) }) if err != nil { t.Fatal(err) } if len(nodes) != 0 { t.Fatal("gateway gave non-existent addresses:", nodes) } // sharing should be capped at maxSharedNodes for i := 0; i < maxSharedNodes+10; i++ { g2.addNode(modules.NetAddress("111.111.111.111:" + strconv.Itoa(i))) } err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { return encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength) }) if err != nil { t.Fatal(err) } if len(nodes) != maxSharedNodes { t.Fatalf("gateway gave wrong number of nodes: expected %v, got %v", maxSharedNodes, len(nodes)) } }
// receiveBlocks is the calling end of the SendBlocks RPC. func (s *ConsensusSet) receiveBlocks(conn modules.PeerConn) error { // get blockIDs to send lockID := s.mu.RLock() if !s.db.open { s.mu.RUnlock(lockID) return errors.New("database not open") } history := s.blockHistory() s.mu.RUnlock(lockID) if err := encoding.WriteObject(conn, history); err != nil { return err } // loop until no more blocks are available moreAvailable := true for moreAvailable { var newBlocks []types.Block if err := encoding.ReadObject(conn, &newBlocks, MaxCatchUpBlocks*types.BlockSizeLimit); err != nil { return err } if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil { return err } // integrate received blocks. for _, block := range newBlocks { // Blocks received during synchronize aren't trusted; activate full // verification. lockID := s.mu.Lock() if !s.db.open { s.mu.Unlock(lockID) return errors.New("database not open") } acceptErr := s.acceptBlock(block) s.mu.Unlock(lockID) // ErrNonExtendingBlock must be ignored until headers-first block // sharing is implemented. if acceptErr == modules.ErrNonExtendingBlock { acceptErr = nil } if acceptErr != nil { return acceptErr } // Yield the processor to give other processes time to grab a lock. // The Lock/Unlock cycle in this loop is very tight, and has been // known to prevent interrupts from getting lock access quickly. runtime.Gosched() } } return nil }
// managedRPCRecentRevision sends the most recent known file contract // revision, including signatures, to the renter, for the file contract with // the input id. func (h *Host) managedRPCRecentRevision(conn net.Conn) (types.FileContractID, *storageObligation, error) { // Set the negotiation deadline. conn.SetDeadline(time.Now().Add(modules.NegotiateRecentRevisionTime)) // Receive the file contract id from the renter. var fcid types.FileContractID err := encoding.ReadObject(conn, &fcid, uint64(len(fcid))) if err != nil { return types.FileContractID{}, nil, err } // Send a challenge to the renter to verify that the renter has write // access to the revision being opened. var challenge crypto.Hash _, err = rand.Read(challenge[:]) if err != nil { return types.FileContractID{}, nil, err } err = encoding.WriteObject(conn, challenge) if err != nil { return types.FileContractID{}, nil, err } // Read the signed response from the renter. var challengeResponse crypto.Signature err = encoding.ReadObject(conn, &challengeResponse, uint64(len(challengeResponse))) if err != nil { return types.FileContractID{}, nil, err } // Verify the response. In the process, fetch the related storage // obligation, file contract revision, and transaction signatures. so, recentRevision, revisionSigs, err := h.verifyChallengeResponse(fcid, challenge, challengeResponse) if err != nil { return types.FileContractID{}, nil, modules.WriteNegotiationRejection(conn, err) } // Send the file contract revision and the corresponding signatures to the // renter. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return types.FileContractID{}, nil, err } err = encoding.WriteObject(conn, recentRevision) if err != nil { return types.FileContractID{}, nil, err } err = encoding.WriteObject(conn, revisionSigs) if err != nil { return types.FileContractID{}, nil, err } return fcid, so, nil }
// negotiateRevision sends the revision and new piece data to the host. func negotiateRevision(conn net.Conn, rev types.FileContractRevision, piece []byte, secretKey crypto.SecretKey) (types.Transaction, error) { conn.SetDeadline(time.Now().Add(5 * time.Minute)) // sufficient to transfer 4 MB over 100 kbps defer conn.SetDeadline(time.Time{}) // reset timeout after each revision // create transaction containing the revision signedTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(rev.ParentID), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 0, // renter key is always first -- see negotiateContract }}, } // sign the transaction encodedSig, _ := crypto.SignHash(signedTxn.SigHash(0), secretKey) // no error possible signedTxn.TransactionSignatures[0].Signature = encodedSig[:] // send the transaction if err := encoding.WriteObject(conn, signedTxn); err != nil { return types.Transaction{}, errors.New("couldn't send revision transaction: " + err.Error()) } // host sends acceptance var response string if err := encoding.ReadObject(conn, &response, 128); err != nil { return types.Transaction{}, errors.New("couldn't read host acceptance: " + err.Error()) } if response != modules.AcceptResponse { return types.Transaction{}, errors.New("host rejected revision: " + response) } // transfer piece if _, err := conn.Write(piece); err != nil { return types.Transaction{}, errors.New("couldn't transfer piece: " + err.Error()) } // read txn signed by host var signedHostTxn types.Transaction if err := encoding.ReadObject(conn, &signedHostTxn, types.BlockSizeLimit); err != nil { return types.Transaction{}, errors.New("couldn't read signed revision transaction: " + err.Error()) } if signedHostTxn.ID() != signedTxn.ID() { return types.Transaction{}, errors.New("host sent bad signed transaction") } return signedHostTxn, nil }
// TestNegotiateRevisionStopResponse tests that when the host sends // StopResponse, the renter continues processing the revision instead of // immediately terminating. func TestNegotiateRevisionStopResponse(t *testing.T) { // simulate a renter-host connection rConn, hConn := net.Pipe() // handle the host's half of the pipe go func() { defer hConn.Close() // read revision encoding.ReadObject(hConn, new(types.FileContractRevision), 1<<22) // write acceptance modules.WriteNegotiationAcceptance(hConn) // read txn signature encoding.ReadObject(hConn, new(types.TransactionSignature), 1<<22) // write StopResponse modules.WriteNegotiationStop(hConn) // write txn signature encoding.WriteObject(hConn, types.TransactionSignature{}) }() // since the host wrote StopResponse, we should proceed to validating the // transaction. This will return a known error because we are supplying an // empty revision. _, err := negotiateRevision(rConn, types.FileContractRevision{}, crypto.SecretKey{}) if err != types.ErrFileContractWindowStartViolation { t.Fatalf("expected %q, got \"%v\"", types.ErrFileContractWindowStartViolation, err) } rConn.Close() // same as above, but send an error instead of StopResponse. The error // should be returned by negotiateRevision immediately (if it is not, we // should expect to see a transaction validation error instead). rConn, hConn = net.Pipe() go func() { defer hConn.Close() encoding.ReadObject(hConn, new(types.FileContractRevision), 1<<22) modules.WriteNegotiationAcceptance(hConn) encoding.ReadObject(hConn, new(types.TransactionSignature), 1<<22) // write a sentinel error modules.WriteNegotiationRejection(hConn, errors.New("sentinel")) encoding.WriteObject(hConn, types.TransactionSignature{}) }() expectedErr := "host did not accept transaction signature: sentinel" _, err = negotiateRevision(rConn, types.FileContractRevision{}, crypto.SecretKey{}) if err == nil || err.Error() != expectedErr { t.Fatalf("expected %q, got \"%v\"", expectedErr, err) } rConn.Close() }
// rpcSendBlk is an RPC that sends the requested block to the requesting peer. func (cs *ConsensusSet) rpcSendBlk(conn modules.PeerConn) error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Decode the block id from the connection. var id types.BlockID err = encoding.ReadObject(conn, &id, crypto.HashSize) if err != nil { return err } // Lookup the corresponding block. var b types.Block cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { return err } b = pb.Block return nil }) cs.mu.RUnlock() if err != nil { return err } // Encode and send the block to the caller. err = encoding.WriteObject(conn, b) if err != nil { return err } return nil }
// TODO: maintain compatibility func (h *Host) handleConn(conn net.Conn) { defer conn.Close() // Set an initial duration that is generous, but finite. RPCs can extend // this if so desired. conn.SetDeadline(time.Now().Add(5 * time.Minute)) var id types.Specifier if err := encoding.ReadObject(conn, &id, 16); err != nil { return } var err error switch id { case modules.RPCSettings: err = h.rpcSettings(conn) case modules.RPCUpload: err = h.rpcUpload(conn) case modules.RPCRevise: err = h.rpcRevise(conn) case modules.RPCDownload: err = h.rpcDownload(conn) default: h.log.Printf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id) return } if err != nil { h.log.Printf("WARN: incoming RPC \"%v\" failed: %v", id, err) } }
// threadedHandleConn reads header data from a connection, then routes it to the // appropriate handler for further processing. func (g *Gateway) threadedHandleConn(conn modules.PeerConn) { defer conn.Close() var id rpcID if err := encoding.ReadObject(conn, &id, 8); err != nil { return } // call registered handler for this ID lockid := g.mu.RLock() fn, ok := g.handlers[id] g.mu.RUnlock(lockid) if !ok { g.log.Printf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id) return } // call fn err := fn(conn) // don't log benign errors if err == modules.ErrDuplicateTransactionSet || err == modules.ErrBlockKnown { err = nil } if err != nil { g.log.Printf("WARN: incoming RPC \"%v\" failed: %v", id, err) } }
// rpcRetrieve is an RPC that uploads a specified file to a client. // // Mutexes are applied carefully to avoid locking during I/O. All necessary // interaction with the host involves looking up the filepath of the file being // requested. This is done all at once. func (h *Host) rpcRetrieve(conn net.Conn) error { // Get the filename. var contractID types.FileContractID err := encoding.ReadObject(conn, &contractID, crypto.HashSize) if err != nil { return err } // Verify the file exists, using a mutex while reading the host. lockID := h.mu.RLock() contractObligation, exists := h.obligationsByID[contractID] if !exists { h.mu.RUnlock(lockID) return errors.New("no record of that file") } path := filepath.Join(h.saveDir, contractObligation.Path) h.mu.RUnlock(lockID) // Open the file. file, err := os.Open(path) if err != nil { return err } defer file.Close() // Transmit the file. _, err = io.CopyN(conn, file, int64(contractObligation.FileContract.FileSize)) if err != nil { return err } return nil }
// rpcRelayBlock is an RPC that accepts a block from a peer. // COMPATv0.5.1 func (cs *ConsensusSet) rpcRelayBlock(conn modules.PeerConn) error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Decode the block from the connection. var b types.Block err = encoding.ReadObject(conn, &b, types.BlockSizeLimit) if err != nil { return err } // Submit the block to the consensus set and broadcast it. err = cs.managedAcceptBlock(b) if err == errOrphan { // If the block is an orphan, try to find the parents. The block // received from the peer is discarded and will be downloaded again if // the parent is found. go func() { err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks) if err != nil { cs.log.Debugln("WARN: failed to get parents of orphan block:", err) } }() } if err != nil { return err } cs.managedBroadcastBlock(b) return nil }
// rpcDownload is an RPC that uploads requested segments of a file. After the // RPC has been initiated, the host will read and process requests in a loop // until the 'stop' signal is received or the connection times out. func (h *Host) rpcDownload(conn net.Conn) error { // Read the contract ID. var contractID types.FileContractID err := encoding.ReadObject(conn, &contractID, crypto.HashSize) if err != nil { return err } // Verify the file exists, using a mutex while reading the host. h.mu.RLock() co, exists := h.obligationsByID[contractID] if !exists { h.mu.RUnlock() return errors.New("no record of that file") } h.mu.RUnlock() // Open the file. file, err := os.Open(co.Path) if err != nil { return err } defer file.Close() // Process requests until 'stop' signal is received. var request modules.DownloadRequest for { if err := encoding.ReadObject(conn, &request, 16); err != nil { return err } // Check for termination signal. // TODO: perform other sanity checks on offset/length? if request.Length == 0 { break } conn.SetDeadline(time.Now().Add(5 * time.Minute)) // sufficient to transfer 4 MB over 100 kbps // Write segment to conn. segment := io.NewSectionReader(file, int64(request.Offset), int64(request.Length)) _, err := io.Copy(conn, segment) if err != nil { return err } } return nil }
// acceptConn adds a connecting node as a peer. func (g *Gateway) acceptConn(conn net.Conn) { addr := modules.NetAddress(conn.RemoteAddr().String()) g.log.Printf("INFO: %v wants to connect", addr) // read version var remoteVersion string if err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil { conn.Close() g.log.Printf("INFO: %v wanted to connect, but we could not read their version: %v", addr, err) return } // check that version is acceptable // NOTE: this version must be bumped whenever the gateway or consensus // breaks compatibility. if build.VersionCmp(remoteVersion, "0.3.3") < 0 { encoding.WriteObject(conn, "reject") conn.Close() g.log.Printf("INFO: %v wanted to connect, but their version (%v) was unacceptable", addr, remoteVersion) return } // respond with our version if err := encoding.WriteObject(conn, build.Version); err != nil { conn.Close() g.log.Printf("INFO: could not write version ack to %v: %v", addr, err) return } // If we are already fully connected, kick out an old peer to make room // for the new one. Importantly, prioritize kicking a peer with the same // IP as the connecting peer. This protects against Sybil attacks. id := g.mu.Lock() if len(g.peers) >= fullyConnectedThreshold { // first choose a random peer, preferably inbound. If have only // outbound peers, we'll wind up kicking an outbound peer; but // subsequent inbound connections will kick each other instead of // continuing to replace outbound peers. kick, err := g.randomInboundPeer() if err != nil { kick, _ = g.randomPeer() } // if another peer shares this IP, choose that one instead for p := range g.peers { if p.Host() == addr.Host() { kick = p break } } g.peers[kick].sess.Close() delete(g.peers, kick) g.log.Printf("INFO: disconnected from %v to make room for %v", kick, addr) } // add the peer g.addPeer(&peer{addr: addr, sess: muxado.Server(conn), inbound: true}) g.mu.Unlock(id) g.log.Printf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion) }
// relayTransactionSet is an RPC that accepts a transaction set from a peer. If // the accept is successful, the transaction will be relayed to the gateway's // other peers. func (tp *TransactionPool) relayTransactionSet(conn modules.PeerConn) error { var ts []types.Transaction err := encoding.ReadObject(conn, &ts, types.BlockSizeLimit) if err != nil { return err } return tp.AcceptTransactionSet(ts) }
// threadedReceiveBlocks is the calling end of the SendBlocks RPC. func (cs *ConsensusSet) threadedReceiveBlocks(conn modules.PeerConn) error { // Get blockIDs to send. var history [32]types.BlockID err := cs.db.View(func(tx *bolt.Tx) error { history = blockHistory(tx) return nil }) if err != nil { return err } // Send the block ids. if err := encoding.WriteObject(conn, history); err != nil { return err } // Read blocks off of the wire and add them to the consensus set until // there are no more blocks available. moreAvailable := true for moreAvailable { // Read a slice of blocks from the wire. var newBlocks []types.Block if err := encoding.ReadObject(conn, &newBlocks, MaxCatchUpBlocks*types.BlockSizeLimit); err != nil { return err } if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil { return err } // Integrate the blocks into the consensus set. for _, block := range newBlocks { acceptErr := cs.AcceptBlock(block) // ErrNonExtendingBlock must be ignored until headers-first block // sharing is implemented, block already in database should also be // ignored. if acceptErr == modules.ErrNonExtendingBlock || acceptErr == modules.ErrBlockKnown { acceptErr = nil } if acceptErr != nil { return acceptErr } } } return nil }
// managedRPCRenew is an RPC that allows a renter to renew a file contract. The // protocol is identical to standard contract negotiation, except that the // Merkle root is copied over from the old contract. func (h *Host) managedRPCRenew(conn net.Conn) error { // Terminate connection if host is not accepting contracts. h.mu.RLock() accepting := h.settings.AcceptingContracts h.mu.RUnlock() if !accepting { return nil } // read ID of contract to be renewed var fcid types.FileContractID if err := encoding.ReadObject(conn, &fcid, crypto.HashSize); err != nil { return errors.New("couldn't read contract ID: " + err.Error()) } h.mu.RLock() obligation, exists := h.obligationsByID[fcid] h.mu.RUnlock() if !exists { return errors.New("no record of that contract") } // need to protect against simultaneous renewals of the same contract obligation.mu.Lock() defer obligation.mu.Unlock() // copy over old file data h.mu.RLock() h.fileCounter++ filename := filepath.Join(h.persistDir, strconv.Itoa(int(h.fileCounter))) h.mu.RUnlock() // TODO: What happens if the copy operation fails partway through? Does // there need to be garbage collection at startup for failed uploads that // might still be on disk? old, err := os.Open(obligation.Path) if err != nil { return err } renewed, err := os.Create(filename) if err != nil { return err } _, err = io.Copy(renewed, old) if err != nil { return err } err = h.managedNegotiateContract(conn, obligation.fileSize(), obligation.merkleRoot(), filename) if err != nil { // Negotiation failed, delete the copied file. err2 := os.Remove(filename) if err2 != nil { return errors.New(err.Error() + " and " + err2.Error()) } return err } return nil }
func TestThreadedHandleConn(t *testing.T) { g1 := newTestingGateway("TestThreadedHandleConn1", t) defer g1.Close() g2 := newTestingGateway("TestThreadedHandleConn2", t) defer g2.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } g2.RegisterRPC("Foo", func(conn modules.PeerConn) error { var i uint64 err := encoding.ReadObject(conn, &i, 8) if err != nil { return err } else if i == 0xdeadbeef { return encoding.WriteObject(conn, "foo") } else { return encoding.WriteObject(conn, "bar") } }) // custom rpc fn (doesn't automatically write rpcID) rpcFn := func(fn func(modules.PeerConn) error) error { conn, err := g1.peers[g2.Address()].open() if err != nil { return err } defer conn.Close() return fn(conn) } // bad rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, [3]byte{1, 2, 3}) }) if err != nil { t.Fatal("rpcFn failed:", err) } // unknown rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, handlerName("bar")) }) if err != nil { t.Fatal("rpcFn failed:", err) } // valid rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, handlerName("Foo")) }) if err != nil { t.Fatal("rpcFn failed:", err) } }
// Connect establishes a persistent connection to a peer, and adds it to the // Gateway's peer list. func (g *Gateway) Connect(addr modules.NetAddress) error { if addr == g.Address() { return errors.New("can't connect to our own address") } id := g.mu.RLock() _, exists := g.peers[addr] g.mu.RUnlock(id) if exists { return errors.New("peer already added") } conn, err := net.DialTimeout("tcp", string(addr), dialTimeout) if err != nil { return err } // send our version if err := encoding.WriteObject(conn, "0.3.3"); err != nil { return err } // read version ack var remoteVersion string if err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil { return err } else if remoteVersion == "reject" { return errors.New("peer rejected connection") } // decide whether to accept this version if build.VersionCmp(remoteVersion, "0.3.3") < 0 { conn.Close() return errors.New("unacceptable version: " + remoteVersion) } g.log.Println("INFO: connected to new peer", addr) id = g.mu.Lock() g.addPeer(&peer{addr: addr, sess: muxado.Client(conn), inbound: false}) g.mu.Unlock(id) // call initRPCs id = g.mu.RLock() var wg sync.WaitGroup wg.Add(len(g.initRPCs)) for name, fn := range g.initRPCs { go func(name string, fn modules.RPCFunc) { // errors here are non-fatal g.RPC(addr, name, fn) wg.Done() }(name, fn) } g.mu.RUnlock(id) wg.Wait() return nil }
// verifyRecentRevision confirms that the host and contractor agree upon the current // state of the contract being revised. func verifyRecentRevision(conn net.Conn, contract modules.RenterContract) error { // send contract ID if err := encoding.WriteObject(conn, contract.ID); err != nil { return errors.New("couldn't send contract ID: " + err.Error()) } // read challenge var challenge crypto.Hash if err := encoding.ReadObject(conn, &challenge, 32); err != nil { return errors.New("couldn't read challenge: " + err.Error()) } // sign and return sig, err := crypto.SignHash(challenge, contract.SecretKey) if err != nil { return err } else if err := encoding.WriteObject(conn, sig); err != nil { return errors.New("couldn't send challenge response: " + err.Error()) } // read acceptance if err := modules.ReadNegotiationAcceptance(conn); err != nil { return errors.New("host did not accept revision request: " + err.Error()) } // read last revision and signatures var lastRevision types.FileContractRevision var hostSignatures []types.TransactionSignature if err := encoding.ReadObject(conn, &lastRevision, 2048); err != nil { return errors.New("couldn't read last revision: " + err.Error()) } if err := encoding.ReadObject(conn, &hostSignatures, 2048); err != nil { return errors.New("couldn't read host signatures: " + err.Error()) } // Check that the unlock hashes match; if they do not, something is // seriously wrong. Otherwise, check that the revision numbers match. if lastRevision.UnlockConditions.UnlockHash() != contract.LastRevision.UnlockConditions.UnlockHash() { return errors.New("unlock conditions do not match") } else if lastRevision.NewRevisionNumber != contract.LastRevision.NewRevisionNumber { return &recentRevisionError{contract.LastRevision.NewRevisionNumber, lastRevision.NewRevisionNumber} } // NOTE: we can fake the blockheight here because it doesn't affect // verification; it just needs to be above the fork height and below the // contract expiration (which was checked earlier). return modules.VerifyFileContractRevisionTransactionSignatures(lastRevision, hostSignatures, contract.FileContract.WindowStart-1) }
// receiveBlocks is the calling end of the SendBlocks RPC. func (s *State) receiveBlocks(conn modules.PeerConn) error { // get blockIDs to send lockID := s.mu.RLock() history := s.blockHistory() s.mu.RUnlock(lockID) if err := encoding.WriteObject(conn, history); err != nil { return err } // loop until no more blocks are available moreAvailable := true for moreAvailable { var newBlocks []types.Block if err := encoding.ReadObject(conn, &newBlocks, MaxCatchUpBlocks*types.BlockSizeLimit); err != nil { return err } if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil { return err } // integrate received blocks. for _, block := range newBlocks { // Blocks received during synchronize aren't trusted; activate full // verification. lockID := s.mu.Lock() s.verificationRigor = fullVerification acceptErr := s.acceptBlock(block) s.mu.Unlock(lockID) // these errors are benign if acceptErr == modules.ErrNonExtendingBlock || acceptErr == ErrBlockKnown { acceptErr = nil } if acceptErr != nil { return acceptErr } } } return nil }
// negotiateRevision sends a revision and actions to the host for approval, // completing one iteration of the revision loop. func negotiateRevision(conn net.Conn, rev types.FileContractRevision, secretKey crypto.SecretKey) (types.Transaction, error) { // create transaction containing the revision signedTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(rev.ParentID), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 0, // renter key is always first -- see formContract }}, } // sign the transaction encodedSig, _ := crypto.SignHash(signedTxn.SigHash(0), secretKey) // no error possible signedTxn.TransactionSignatures[0].Signature = encodedSig[:] // send the revision if err := encoding.WriteObject(conn, rev); err != nil { return types.Transaction{}, errors.New("couldn't send revision: " + err.Error()) } // read acceptance if err := modules.ReadNegotiationAcceptance(conn); err != nil { return types.Transaction{}, errors.New("host did not accept revision: " + err.Error()) } // send the new transaction signature if err := encoding.WriteObject(conn, signedTxn.TransactionSignatures[0]); err != nil { return types.Transaction{}, errors.New("couldn't send transaction signature: " + err.Error()) } // read the host's acceptance and transaction signature // NOTE: if the host sends ErrStopResponse, we should continue processing // the revision, but return the error anyway. responseErr := modules.ReadNegotiationAcceptance(conn) if responseErr != nil && responseErr != modules.ErrStopResponse { return types.Transaction{}, errors.New("host did not accept transaction signature: " + responseErr.Error()) } var hostSig types.TransactionSignature if err := encoding.ReadObject(conn, &hostSig, 16e3); err != nil { return types.Transaction{}, errors.New("couldn't read host's signature: " + err.Error()) } // add the signature to the transaction and verify it // NOTE: we can fake the blockheight here because it doesn't affect // verification; it just needs to be above the fork height and below the // contract expiration (which was checked earlier). verificationHeight := rev.NewWindowStart - 1 signedTxn.TransactionSignatures = append(signedTxn.TransactionSignatures, hostSig) if err := signedTxn.StandaloneValid(verificationHeight); err != nil { return types.Transaction{}, err } // if the host sent ErrStopResponse, return it return signedTxn, responseErr }
// requestNodes is the calling end of the ShareNodes RPC. func (g *Gateway) requestNodes(conn modules.PeerConn) error { var nodes []modules.NetAddress if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength); err != nil { return err } id := g.mu.Lock() for _, node := range nodes { g.addNode(node) } g.save() g.mu.Unlock(id) return nil }
// threadedHandleConn handles an incoming connection to the host, typically an // RPC. func (h *Host) threadedHandleConn(conn net.Conn) { h.resourceLock.RLock() defer h.resourceLock.RUnlock() if h.closed { return } // Set an initial duration that is generous, but finite. RPCs can extend // this if desired. err := conn.SetDeadline(time.Now().Add(5 * time.Minute)) if err != nil { h.log.Println("WARN: could not set deadline on connection:", err) return } defer conn.Close() // Read a specifier indicating which action is beeing called. var id types.Specifier if err := encoding.ReadObject(conn, &id, 16); err != nil { atomic.AddUint64(&h.atomicUnrecognizedCalls, 1) atomic.AddUint64(&h.atomicErroredCalls, 1) h.log.Printf("WARN: incoming conn %v was malformed", conn.RemoteAddr()) return } switch id { case modules.RPCDownload: atomic.AddUint64(&h.atomicDownloadCalls, 1) err = h.managedRPCDownload(conn) case modules.RPCRenew: atomic.AddUint64(&h.atomicRenewCalls, 1) err = h.managedRPCRenew(conn) case modules.RPCRevise: atomic.AddUint64(&h.atomicReviseCalls, 1) err = h.managedRPCRevise(conn) case modules.RPCSettings: atomic.AddUint64(&h.atomicSettingsCalls, 1) err = h.managedRPCSettings(conn) case modules.RPCUpload: atomic.AddUint64(&h.atomicUploadCalls, 1) err = h.managedRPCUpload(conn) default: atomic.AddUint64(&h.atomicErroredCalls, 1) h.log.Printf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id) return } if err != nil { atomic.AddUint64(&h.atomicErroredCalls, 1) h.log.Printf("WARN: incoming RPC \"%v\" failed: %v", id, err) } }
// requestNodes is the calling end of the ShareNodes RPC. func (g *Gateway) requestNodes(conn modules.PeerConn) error { var nodes []modules.NetAddress if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength); err != nil { return err } g.log.Printf("INFO: %v sent us %v nodes", conn.RemoteAddr(), len(nodes)) id := g.mu.Lock() for _, node := range nodes { g.addNode(node) } g.save() g.mu.Unlock(id) return nil }
func TestBroadcast(t *testing.T) { g1 := newTestingGateway("TestBroadcast1", t) defer g1.Close() g2 := newTestingGateway("TestBroadcast2", t) defer g2.Close() g3 := newTestingGateway("TestBroadcast3", t) defer g3.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } err = g1.Connect(g3.Address()) if err != nil { t.Fatal("failed to connect:", err) } var g2Payload, g3Payload string doneChan := make(chan struct{}) g2.RegisterRPC("Recv", func(conn modules.PeerConn) error { encoding.ReadObject(conn, &g2Payload, 100) doneChan <- struct{}{} return nil }) g3.RegisterRPC("Recv", func(conn modules.PeerConn) error { encoding.ReadObject(conn, &g3Payload, 100) doneChan <- struct{}{} return nil }) g1.Broadcast("Recv", "foo") <-doneChan <-doneChan if g2Payload != "foo" || g3Payload != "foo" { t.Fatal("broadcast failed:", g2Payload, g3Payload) } }
// ReadNegotiationAcceptance reads an accept/reject response from r (usually a // net.Conn). If the response is not AcceptResponse, ReadNegotiationAcceptance // returns the response as an error. If the response is StopResponse, // ErrStopResponse is returned, allowing for direct error comparison. // // Note that since errors returned by ReadNegotiationAcceptance are newly // allocated, they cannot be compared to other errors in the traditional // fashion. func ReadNegotiationAcceptance(r io.Reader) error { var resp string err := encoding.ReadObject(r, &resp, NegotiateMaxErrorSize) if err != nil { return err } switch resp { case AcceptResponse: return nil case StopResponse: return ErrStopResponse default: return errors.New(resp) } }
// requestNodes is the calling end of the ShareNodes RPC. func (g *Gateway) requestNodes(conn modules.PeerConn) error { var nodes []modules.NetAddress if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength); err != nil { return err } g.mu.Lock() for _, node := range nodes { err := g.addNode(node) if err != nil && err != errNodeExists && err != errOurAddress { g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node) } } g.save() g.mu.Unlock() return nil }
// managedReceiveBlock takes a block id and returns an RPCFunc that requests that // block and then calls AcceptBlock on it. The returned function should be used // as the calling end of the SendBlk RPC. Note that although the function // itself does not do any locking, it is still prefixed with "threaded" because // the function it returns calls the exported method AcceptBlock. func (cs *ConsensusSet) managedReceiveBlock(id types.BlockID) modules.RPCFunc { return func(conn modules.PeerConn) error { if err := encoding.WriteObject(conn, id); err != nil { return err } var block types.Block if err := encoding.ReadObject(conn, &block, types.BlockSizeLimit); err != nil { return err } if err := cs.managedAcceptBlock(block); err != nil { return err } cs.managedBroadcastBlock(block) return nil } }
// threadedProbeHost tries to fetch the settings of a host. If successful, the // host is put in the set of active hosts. If unsuccessful, the host id deleted // from the set of active hosts. func (hdb *HostDB) threadedProbeHosts() { for hostEntry := range hdb.scanPool { // Request settings from the queued host entry. var settings modules.HostSettings err := func() error { conn, err := net.DialTimeout("tcp", string(hostEntry.IPAddress), hostRequestTimeout) if err != nil { return err } defer conn.Close() err = encoding.WriteObject(conn, [8]byte{'S', 'e', 't', 't', 'i', 'n', 'g', 's'}) if err != nil { return err } return encoding.ReadObject(conn, &settings, maxSettingsLen) }() // Now that network communication is done, lock the hostdb to modify the // host entry. id := hdb.mu.Lock() { if err != nil { hdb.decrementReliability(hostEntry.IPAddress, UnreachablePenalty) hdb.mu.Unlock(id) continue } // Update the host settings, reliability, and weight. The old IPAddress // must be preserved. settings.IPAddress = hostEntry.HostSettings.IPAddress hostEntry.HostSettings = settings hostEntry.reliability = MaxReliability hostEntry.weight = hdb.hostWeight(*hostEntry) // If the host is not already in the database and 'MaxActiveHosts' has not // been reached, add the host to the database. _, exists1 := hdb.activeHosts[hostEntry.IPAddress] _, exists2 := hdb.allHosts[hostEntry.IPAddress] if !exists1 && exists2 && len(hdb.activeHosts) < MaxActiveHosts { hdb.insertNode(hostEntry) hdb.notifySubscribers() } } hdb.mu.Unlock(id) } }
// relayNode is the recipient end of the RelayNode RPC. It reads a node, adds // it to the Gateway's node list, and relays it to each of the Gateway's // peers. If the node is already in the node list, it is not relayed. func (g *Gateway) relayNode(conn modules.PeerConn) error { // read address var addr modules.NetAddress if err := encoding.ReadObject(conn, &addr, maxAddrLength); err != nil { return err } // add node id := g.mu.Lock() defer g.mu.Unlock(id) if err := g.addNode(addr); err != nil { return err } g.save() // relay go g.Broadcast("RelayNode", addr) return nil }
// acceptConnVersionHandshake performs the version handshake and should be // called on the side accepting a connection request. The remote version is // only returned if err == nil. func acceptConnVersionHandshake(conn net.Conn, version string) (remoteVersion string, err error) { // Read remote version. if err := encoding.ReadObject(conn, &remoteVersion, build.MaxEncodedVersionLength); err != nil { return "", fmt.Errorf("failed to read remote version: %v", err) } // Check that their version is acceptable. if err := acceptableVersion(remoteVersion); err != nil { if err := encoding.WriteObject(conn, "reject"); err != nil { return "", fmt.Errorf("failed to write reject: %v", err) } return "", err } // Send our version. if err := encoding.WriteObject(conn, version); err != nil { return "", fmt.Errorf("failed to write version: %v", err) } return remoteVersion, nil }