Пример #1
0
// threadedAcceptConn adds a connecting node as a peer.
func (g *Gateway) threadedAcceptConn(conn net.Conn) {
	if g.threads.Add() != nil {
		conn.Close()
		return
	}
	defer g.threads.Done()
	conn.SetDeadline(time.Now().Add(connStdDeadline))

	addr := modules.NetAddress(conn.RemoteAddr().String())
	g.log.Debugf("INFO: %v wants to connect", addr)

	remoteVersion, err := acceptConnVersionHandshake(conn, build.Version)
	if err != nil {
		g.log.Debugf("INFO: %v wanted to connect but version handshake failed: %v", addr, err)
		conn.Close()
		return
	}

	if build.VersionCmp(remoteVersion, handshakeUpgradeVersion) < 0 {
		err = g.managedAcceptConnOldPeer(conn, remoteVersion)
	} else {
		err = g.managedAcceptConnNewPeer(conn, remoteVersion)
	}
	if err != nil {
		g.log.Debugf("INFO: %v wanted to connect, but failed: %v", addr, err)
		conn.Close()
		return
	}
	// Handshake successful, remove the deadline.
	conn.SetDeadline(time.Time{})

	g.log.Debugf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion)
}
Пример #2
0
// threadedAcceptConn adds a connecting node as a peer.
func (g *Gateway) threadedAcceptConn(conn net.Conn) {
	if g.threads.Add() != nil {
		return
	}
	defer g.threads.Done()

	addr := modules.NetAddress(conn.RemoteAddr().String())
	g.log.Debugf("INFO: %v wants to connect", addr)

	remoteVersion, err := acceptConnVersionHandshake(conn, build.Version)
	if err != nil {
		g.log.Debugf("INFO: %v wanted to connect but version handshake failed: %v", addr, err)
		conn.Close()
		return
	}

	if build.VersionCmp(remoteVersion, "1.0.0") < 0 {
		err = g.managedAcceptConnOldPeer(conn, remoteVersion)
	} else {
		err = g.managedAcceptConnNewPeer(conn, remoteVersion)
	}
	if err != nil {
		g.log.Debugf("INFO: %v wanted to connect, but failed: %v", addr, err)
		conn.Close()
		return
	}

	g.log.Debugf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion)
}
Пример #3
0
// acceptConn adds a connecting node as a peer.
func (g *Gateway) acceptConn(conn net.Conn) {
	addr := modules.NetAddress(conn.RemoteAddr().String())
	g.log.Printf("INFO: %v wants to connect", addr)

	// read version
	var remoteVersion string
	if err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil {
		conn.Close()
		g.log.Printf("INFO: %v wanted to connect, but we could not read their version: %v", addr, err)
		return
	}

	// check that version is acceptable
	// NOTE: this version must be bumped whenever the gateway or consensus
	// breaks compatibility.
	if build.VersionCmp(remoteVersion, "0.3.3") < 0 {
		encoding.WriteObject(conn, "reject")
		conn.Close()
		g.log.Printf("INFO: %v wanted to connect, but their version (%v) was unacceptable", addr, remoteVersion)
		return
	}

	// respond with our version
	if err := encoding.WriteObject(conn, build.Version); err != nil {
		conn.Close()
		g.log.Printf("INFO: could not write version ack to %v: %v", addr, err)
		return
	}

	// If we are already fully connected, kick out an old peer to make room
	// for the new one. Importantly, prioritize kicking a peer with the same
	// IP as the connecting peer. This protects against Sybil attacks.
	id := g.mu.Lock()
	if len(g.peers) >= fullyConnectedThreshold {
		// first choose a random peer, preferably inbound. If have only
		// outbound peers, we'll wind up kicking an outbound peer; but
		// subsequent inbound connections will kick each other instead of
		// continuing to replace outbound peers.
		kick, err := g.randomInboundPeer()
		if err != nil {
			kick, _ = g.randomPeer()
		}
		// if another peer shares this IP, choose that one instead
		for p := range g.peers {
			if p.Host() == addr.Host() {
				kick = p
				break
			}
		}
		g.peers[kick].sess.Close()
		delete(g.peers, kick)
		g.log.Printf("INFO: disconnected from %v to make room for %v", kick, addr)
	}
	// add the peer
	g.addPeer(&peer{addr: addr, sess: muxado.Server(conn), inbound: true})
	g.mu.Unlock(id)

	g.log.Printf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion)
}
Пример #4
0
// checkForUpdate checks a centralized server for a more recent version of
// Sia. If an update is available, it returns true, along with the newer
// version.
func checkForUpdate() (bool, string, error) {
	manifest, err := fetchManifest("current")
	if err != nil {
		return false, "", err
	}
	version := manifest[0]
	return build.VersionCmp(build.Version, version) < 0, version, nil
}
Пример #5
0
// acceptableVersion returns an error if the version is unacceptable.
func acceptableVersion(version string) error {
	if !build.IsVersion(version) {
		return invalidVersionError(version)
	}
	if build.VersionCmp(version, minAcceptableVersion) < 0 {
		return insufficientVersionError(version)
	}
	return nil
}
Пример #6
0
// Connect establishes a persistent connection to a peer, and adds it to the
// Gateway's peer list.
func (g *Gateway) Connect(addr modules.NetAddress) error {
	if addr == g.Address() {
		return errors.New("can't connect to our own address")
	}

	id := g.mu.RLock()
	_, exists := g.peers[addr]
	g.mu.RUnlock(id)
	if exists {
		return errors.New("peer already added")
	}

	conn, err := net.DialTimeout("tcp", string(addr), dialTimeout)
	if err != nil {
		return err
	}
	// send our version
	if err := encoding.WriteObject(conn, "0.3.3"); err != nil {
		return err
	}
	// read version ack
	var remoteVersion string
	if err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil {
		return err
	} else if remoteVersion == "reject" {
		return errors.New("peer rejected connection")
	}
	// decide whether to accept this version
	if build.VersionCmp(remoteVersion, "0.3.3") < 0 {
		conn.Close()
		return errors.New("unacceptable version: " + remoteVersion)
	}

	g.log.Println("INFO: connected to new peer", addr)

	id = g.mu.Lock()
	g.addPeer(&peer{addr: addr, sess: muxado.Client(conn), inbound: false})
	g.mu.Unlock(id)

	// call initRPCs
	id = g.mu.RLock()
	var wg sync.WaitGroup
	wg.Add(len(g.initRPCs))
	for name, fn := range g.initRPCs {
		go func(name string, fn modules.RPCFunc) {
			// errors here are non-fatal
			g.RPC(addr, name, fn)
			wg.Done()
		}(name, fn)
	}
	g.mu.RUnlock(id)
	wg.Wait()

	return nil
}
Пример #7
0
// managedBroadcastBlock will broadcast a block to the consensus set's peers.
func (cs *ConsensusSet) managedBroadcastBlock(b types.Block) {
	// COMPATv0.5.1 - broadcast the block to all peers <= v0.5.1 and block header to all peers > v0.5.1.
	var relayBlockPeers, relayHeaderPeers []modules.Peer
	for _, p := range cs.gateway.Peers() {
		if build.VersionCmp(p.Version, "0.5.1") <= 0 {
			relayBlockPeers = append(relayBlockPeers, p)
		} else {
			relayHeaderPeers = append(relayHeaderPeers, p)
		}
	}
	go cs.gateway.Broadcast("RelayBlock", b, relayBlockPeers)
	go cs.gateway.Broadcast("RelayHeader", b.Header(), relayHeaderPeers)
}
Пример #8
0
// Editor initiates the contract revision process with a host, and returns
// an Editor.
func (c *Contractor) Editor(contract modules.RenterContract) (Editor, error) {
	c.mu.RLock()
	height := c.blockHeight
	c.mu.RUnlock()
	if height > contract.EndHeight() {
		return nil, errors.New("contract has already ended")
	}
	host, ok := c.hdb.Host(contract.NetAddress)
	if !ok {
		return nil, errors.New("no record of that host")
	}
	if host.StoragePrice.Cmp(maxStoragePrice) > 0 {
		return nil, errTooExpensive
	}
	// cap host.Collateral on new hosts
	if build.VersionCmp(host.Version, "0.6.0") > 0 {
		if host.Collateral.Cmp(maxUploadCollateral) > 0 {
			host.Collateral = maxUploadCollateral
		}
	}

	// create editor
	e, err := proto.NewEditor(host, contract, height)
	if proto.IsRevisionMismatch(err) {
		// try again with the cached revision
		c.mu.RLock()
		cached, ok := c.cachedRevisions[contract.ID]
		c.mu.RUnlock()
		if !ok {
			// nothing we can do; return original error
			return nil, err
		}
		c.log.Printf("host %v has different revision for %v; retrying with cached revision", contract.NetAddress, contract.ID)
		contract.LastRevision = cached.revision
		contract.MerkleRoots = cached.merkleRoots
		e, err = proto.NewEditor(host, contract, height)
	}
	if err != nil {
		return nil, err
	}
	// supply a SaveFn that saves the revision to the contractor's persist
	// (the existing revision will be overwritten when SaveFn is called)
	e.SaveFn = c.saveRevision(contract.ID)

	return &hostEditor{
		editor:     e,
		contract:   contract,
		contractor: c,
	}, nil
}
Пример #9
0
// acceptConn adds a connecting node as a peer.
func (g *Gateway) acceptConn(conn net.Conn) {
	addr := modules.NetAddress(conn.RemoteAddr().String())
	g.log.Printf("INFO: %v wants to connect", addr)

	// don't connect to an IP address more than once
	if build.Release != "testing" {
		id := g.mu.RLock()
		for p := range g.peers {
			if p.Host() == addr.Host() {
				g.mu.RUnlock(id)
				conn.Close()
				g.log.Printf("INFO: rejected connection from %v: already connected", addr)
				return
			}
		}
		g.mu.RUnlock(id)
	}

	// read version
	var remoteVersion string
	if err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil {
		conn.Close()
		g.log.Printf("INFO: %v wanted to connect, but we could not read their version: %v", addr, err)
		return
	}

	// decide whether to accept
	// NOTE: this version must be bumped whenever the gateway or consensus
	// breaks compatibility.
	if build.VersionCmp(remoteVersion, "0.3.3") < 0 {
		encoding.WriteObject(conn, "reject")
		conn.Close()
		g.log.Printf("INFO: %v wanted to connect, but their version (%v) was unacceptable", addr, remoteVersion)
		return
	}

	// respond with our version
	if err := encoding.WriteObject(conn, "0.3.3"); err != nil {
		conn.Close()
		g.log.Printf("INFO: could not write version ack to %v: %v", addr, err)
		return
	}

	// If we are already fully connected, kick out an old inbound peer to make
	// room for the new one. Among other things, this ensures that bootstrap
	// nodes will always be connectible. Worst case, you'll connect, receive a
	// node list, and immediately get booted. But once you have the node list
	// you should be able to connect to less full peers.
	id := g.mu.Lock()
	if len(g.peers) >= fullyConnectedThreshold {
		oldPeer, err := g.randomInboundPeer()
		if err == nil {
			g.peers[oldPeer].sess.Close()
			delete(g.peers, oldPeer)
			g.log.Printf("INFO: disconnected from %v to make room for %v", oldPeer, addr)
		}
	}
	// add the peer
	g.addPeer(&peer{addr: addr, sess: muxado.Server(conn), inbound: true})
	g.mu.Unlock(id)

	g.log.Printf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion)
}
Пример #10
0
// Connect establishes a persistent connection to a peer, and adds it to the
// Gateway's peer list.
func (g *Gateway) Connect(addr modules.NetAddress) error {
	if err := g.threads.Add(); err != nil {
		return err
	}
	defer g.threads.Done()

	if addr == g.Address() {
		return errors.New("can't connect to our own address")
	}
	if err := addr.IsValid(); err != nil {
		return errors.New("can't connect to invalid address")
	}
	if net.ParseIP(addr.Host()) == nil {
		return errors.New("address must be an IP address")
	}

	g.mu.RLock()
	_, exists := g.peers[addr]
	g.mu.RUnlock()
	if exists {
		return errors.New("peer already added")
	}

	conn, err := net.DialTimeout("tcp", string(addr), dialTimeout)
	if err != nil {
		return err
	}
	remoteVersion, err := connectVersionHandshake(conn, build.Version)
	if err != nil {
		conn.Close()
		return err
	}

	if build.VersionCmp(remoteVersion, "1.0.0") < 0 {
		err = g.managedConnectOldPeer(conn, remoteVersion, addr)
	} else {
		err = g.managedConnectNewPeer(conn, remoteVersion, addr)
	}
	if err != nil {
		conn.Close()
		return err
	}

	g.log.Debugln("INFO: connected to new peer", addr)

	// call initRPCs
	g.mu.RLock()
	for name, fn := range g.initRPCs {
		go func(name string, fn modules.RPCFunc) {
			if g.threads.Add() != nil {
				return
			}
			defer g.threads.Done()

			err := g.RPC(addr, name, fn)
			if err != nil {
				g.log.Debugf("INFO: RPC %q on peer %q failed: %v", name, addr, err)
			}
		}(name, fn)
	}
	g.mu.RUnlock()

	return nil
}
Пример #11
0
// managedReceiveBlocks is the calling end of the SendBlocks RPC, without the
// threadgroup wrapping.
func (cs *ConsensusSet) managedReceiveBlocks(conn modules.PeerConn) (returnErr error) {
	// Set a deadline after which SendBlocks will timeout. During IBD, esepcially,
	// SendBlocks will timeout. This is by design so that IBD switches peers to
	// prevent any one peer from stalling IBD.
	err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout))
	// Ignore errors returned by SetDeadline if the conn is a pipe in testing.
	// Pipes do not support Set{,Read,Write}Deadline and should only be used in
	// testing.
	if opErr, ok := err.(*net.OpError); ok && opErr.Op == "set" && opErr.Net == "pipe" && build.Release == "testing" {
		err = nil
	}
	if err != nil {
		return err
	}
	stalled := true
	defer func() {
		// TODO: Timeout errors returned by muxado do not conform to the net.Error
		// interface and therefore we cannot check if the error is a timeout using
		// the Timeout() method. Once muxado issue #14 is resolved change the below
		// condition to:
		//     if netErr, ok := returnErr.(net.Error); ok && netErr.Timeout() && stalled { ... }
		if stalled && returnErr != nil && (returnErr.Error() == "Read timeout" || returnErr.Error() == "Write timeout") {
			returnErr = errSendBlocksStalled
		}
	}()

	// Get blockIDs to send.
	var history [32]types.BlockID
	cs.mu.RLock()
	err = cs.db.View(func(tx *bolt.Tx) error {
		history = blockHistory(tx)
		return nil
	})
	cs.mu.RUnlock()
	if err != nil {
		return err
	}

	// Send the block ids.
	if err := encoding.WriteObject(conn, history); err != nil {
		return err
	}

	// Broadcast the last block accepted. This functionality is in a defer to
	// ensure that a block is always broadcast if any blocks are accepted. This
	// is to stop an attacker from preventing block broadcasts.
	chainExtended := false
	defer func() {
		cs.mu.RLock()
		synced := cs.synced
		cs.mu.RUnlock()
		if chainExtended && synced {
			// The last block received will be the current block since
			// managedAcceptBlock only returns nil if a block extends the longest chain.
			currentBlock := cs.managedCurrentBlock()
			// COMPATv0.5.1 - broadcast the block to all peers <= v0.5.1 and block header to all peers > v0.5.1
			var relayBlockPeers, relayHeaderPeers []modules.Peer
			for _, p := range cs.gateway.Peers() {
				if build.VersionCmp(p.Version, "0.5.1") <= 0 {
					relayBlockPeers = append(relayBlockPeers, p)
				} else {
					relayHeaderPeers = append(relayHeaderPeers, p)
				}
			}
			go cs.gateway.Broadcast("RelayBlock", currentBlock, relayBlockPeers)
			go cs.gateway.Broadcast("RelayHeader", currentBlock.Header(), relayHeaderPeers)
		}
	}()

	// Read blocks off of the wire and add them to the consensus set until
	// there are no more blocks available.
	moreAvailable := true
	for moreAvailable {
		// Read a slice of blocks from the wire.
		var newBlocks []types.Block
		if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil {
			return err
		}
		if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
			return err
		}

		// Integrate the blocks into the consensus set.
		for _, block := range newBlocks {
			stalled = false
			// Call managedAcceptBlock instead of AcceptBlock so as not to broadcast
			// every block.
			acceptErr := cs.managedAcceptBlock(block)
			// Set a flag to indicate that we should broadcast the last block received.
			if acceptErr == nil {
				chainExtended = true
			}
			// ErrNonExtendingBlock must be ignored until headers-first block
			// sharing is implemented, block already in database should also be
			// ignored.
			if acceptErr == modules.ErrNonExtendingBlock || acceptErr == modules.ErrBlockKnown {
				acceptErr = nil
			}
			if acceptErr != nil {
				return acceptErr
			}
		}
	}
	return nil
}
Пример #12
0
// managedConnect establishes a persistent connection to a peer, and adds it to
// the Gateway's peer list.
func (g *Gateway) managedConnect(addr modules.NetAddress) error {
	// Perform verification on the input address.
	g.mu.RLock()
	gaddr := g.myAddr
	g.mu.RUnlock()
	if addr == gaddr {
		return errors.New("can't connect to our own address")
	}
	if err := addr.IsStdValid(); err != nil {
		return errors.New("can't connect to invalid address")
	}
	if net.ParseIP(addr.Host()) == nil {
		return errors.New("address must be an IP address")
	}
	g.mu.RLock()
	_, exists := g.peers[addr]
	g.mu.RUnlock()
	if exists {
		return errPeerExists
	}

	// Dial the peer and perform peer initialization.
	conn, err := g.dial(addr)
	if err != nil {
		return err
	}

	// Perform peer initialization.
	remoteVersion, err := connectVersionHandshake(conn, build.Version)
	if err != nil {
		conn.Close()
		return err
	}
	if build.VersionCmp(remoteVersion, handshakeUpgradeVersion) < 0 {
		err = g.managedConnectOldPeer(conn, remoteVersion, addr)
	} else {
		err = g.managedConnectNewPeer(conn, remoteVersion, addr)
	}
	if err != nil {
		conn.Close()
		return err
	}
	g.log.Debugln("INFO: connected to new peer", addr)

	// Connection successful, clear the timeout as to maintain a persistent
	// connection to this peer.
	conn.SetDeadline(time.Time{})

	// call initRPCs
	g.mu.RLock()
	for name, fn := range g.initRPCs {
		go func(name string, fn modules.RPCFunc) {
			if g.threads.Add() != nil {
				return
			}
			defer g.threads.Done()

			err := g.managedRPC(addr, name, fn)
			if err != nil {
				g.log.Debugf("INFO: RPC %q on peer %q failed: %v", name, addr, err)
			}
		}(name, fn)
	}
	g.mu.RUnlock()

	return nil
}