Example #1
0
// newPeer creates a new peer from an establish connection object, and a
// pointer to the main server.
func newPeer(conn net.Conn, server *server, addr *lnwire.NetAddress,
	inbound bool) (*peer, error) {

	nodePub := addr.IdentityKey

	p := &peer{
		conn:        conn,
		lightningID: wire.ShaHash(fastsha256.Sum256(nodePub.SerializeCompressed())),
		addr:        addr,

		id:      atomic.AddInt32(&numNodes, 1),
		inbound: inbound,

		server: server,

		lastNMessages: make(map[lnwire.Message]struct{}),

		sendQueueSync: make(chan struct{}, 1),
		sendQueue:     make(chan outgoinMsg, 1),
		outgoingQueue: make(chan outgoinMsg, outgoingQueueLen),

		barrierInits:     make(chan wire.OutPoint),
		newChanBarriers:  make(map[wire.OutPoint]chan struct{}),
		activeChannels:   make(map[wire.OutPoint]*lnwallet.LightningChannel),
		htlcManagers:     make(map[wire.OutPoint]chan lnwire.Message),
		chanSnapshotReqs: make(chan *chanSnapshotReq),
		newChannels:      make(chan *lnwallet.LightningChannel, 1),

		localCloseChanReqs:  make(chan *closeLinkReq),
		remoteCloseChanReqs: make(chan *lnwire.CloseRequest),

		queueQuit: make(chan struct{}),
		quit:      make(chan struct{}),
	}

	// Initiate the pending channel identifier properly depending on if this
	// node is inbound or outbound. This value will be used in an increasing
	// manner to track pending channels.
	if inbound {
		p.nextPendingChannelID = 1 << 63
	} else {
		p.nextPendingChannelID = 0
	}

	// Fetch and then load all the active channels we have with this
	// remote peer from the database.
	activeChans, err := server.chanDB.FetchOpenChannels(p.addr.IdentityKey)
	if err != nil {
		peerLog.Errorf("unable to fetch active chans "+
			"for peer %v: %v", p, err)
		return nil, err
	}
	peerLog.Debugf("Loaded %v active channels from database with peerID(%v)",
		len(activeChans), p.id)
	if err := p.loadActiveChannels(activeChans); err != nil {
		return nil, err
	}

	return p, nil
}
Example #2
0
// addDebugInvoice adds a debug invoice for the specified amount, identified
// by the passed preimage. Once this invoice is added, sub-systems within the
// daemon add/forward HTLC's are able to obtain the proper preimage required
// for redemption in the case that we're the final destination.
func (i *invoiceRegistry) AddDebugInvoice(amt btcutil.Amount, preimage wire.ShaHash) {
	paymentHash := wire.ShaHash(fastsha256.Sum256(preimage[:]))

	invoice := &channeldb.Invoice{
		CreationDate: time.Now(),
		Terms: channeldb.ContractTerm{
			Value:           amt,
			PaymentPreimage: preimage,
		},
	}

	i.Lock()
	i.debugInvoices[paymentHash] = invoice
	i.Unlock()

	ltndLog.Debugf("Adding debug invoice %v", newLogClosure(func() string {
		return spew.Sdump(invoice)
	}))
}
Example #3
0
// SendPayment dispatches a bi-directional streaming RPC for sending payments
// through the Lightning Network. A single RPC invocation creates a persistent
// bi-directional stream allowing clients to rapidly send payments through the
// Lightning Network with a single persistent connection.
func (r *rpcServer) SendPayment(paymentStream lnrpc.Lightning_SendPaymentServer) error {
	const queryTimeout = time.Duration(time.Second * 10)
	errChan := make(chan error, 1)
	payChan := make(chan *lnrpc.SendRequest)

	// Launch a new goroutine to handle reading new payment requests from
	// the client. This way we can handle errors independently of blocking
	// and waiting for the next payment request to come through.
	go func() {
		for {
			select {
			case <-r.quit:
				errChan <- nil
				return
			default:
				// Receive the next pending payment within the
				// stream sent by the client. If we read the
				// EOF sentinel, then the client has closed the
				// stream, and we can exit normally.
				nextPayment, err := paymentStream.Recv()
				if err == io.EOF {
					errChan <- nil
					return
				} else if err != nil {
					errChan <- err
					return
				}

				payChan <- nextPayment
			}
		}
	}()

	for {
		select {
		case err := <-errChan:
			return err
		case nextPayment := <-payChan:
			// Query the routing table for a potential path to the
			// destination node. If a path is ultimately
			// unavailable, then an error will be returned.
			destNode := hex.EncodeToString(nextPayment.Dest)
			targetVertex := graph.NewID(destNode)
			path, err := r.server.routingMgr.FindPath(targetVertex,
				queryTimeout)
			if err != nil {
				return err
			}
			rpcsLog.Tracef("[sendpayment] selected route: %v", path)

			// If we're in debug HTLC mode, then all outgoing
			// HTLC's will pay to the same debug rHash. Otherwise,
			// we pay to the rHash specified within the RPC
			// request.
			var rHash [32]byte
			if cfg.DebugHTLC {
				rHash = debugHash
			} else {
				copy(rHash[:], nextPayment.PaymentHash)
			}

			// Generate the raw encoded sphinx packet to be
			// included along with the HTLC add message.  We snip
			// off the first hop from the path as within the
			// routing table's star graph, we're always the first
			// hop.
			sphinxPacket, err := generateSphinxPacket(path[1:], rHash[:])
			if err != nil {
				return err
			}

			// Craft an HTLC packet to send to the routing
			// sub-system. The meta-data within this packet will be
			// used to route the payment through the network.
			htlcAdd := &lnwire.HTLCAddRequest{
				Amount:           lnwire.CreditsAmount(nextPayment.Amt),
				RedemptionHashes: [][32]byte{rHash},
				OnionBlob:        sphinxPacket,
			}
			firstHopPub, err := hex.DecodeString(path[1].String())
			if err != nil {
				return err
			}
			destAddr := wire.ShaHash(fastsha256.Sum256(firstHopPub))
			htlcPkt := &htlcPacket{
				dest: destAddr,
				msg:  htlcAdd,
			}

			// TODO(roasbeef): semaphore to limit num outstanding
			// goroutines.
			go func() {
				// Finally, send this next packet to the
				// routing layer in order to complete the next
				// payment.
				// TODO(roasbeef): this should go through the
				// L3 router once multi-hop is in place.
				if err := r.server.htlcSwitch.SendHTLC(htlcPkt); err != nil {
					errChan <- err
					return
				}

				// TODO(roasbeef): proper responses
				resp := &lnrpc.SendResponse{}
				if err := paymentStream.Send(resp); err != nil {
					errChan <- err
					return
				}
			}()
		}
	}

	return nil
}
Example #4
0
// createTestChannels creates two test channels funded witr 10 BTC, with 5 BTC
// allocated to each side.
func createTestChannels(revocationWindow int) (*LightningChannel, *LightningChannel, func(), error) {
	aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
		testWalletPrivKey)
	bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
		bobsPrivKey)

	channelCapacity := btcutil.Amount(10 * 1e8)
	channelBal := channelCapacity / 2
	csvTimeoutAlice := uint32(5)
	csvTimeoutBob := uint32(4)

	witnessScript, _, err := GenFundingPkScript(aliceKeyPub.SerializeCompressed(),
		bobKeyPub.SerializeCompressed(), int64(channelCapacity))
	if err != nil {
		return nil, nil, nil, err
	}

	prevOut := &wire.OutPoint{
		Hash:  wire.ShaHash(testHdSeed),
		Index: 0,
	}
	fundingTxIn := wire.NewTxIn(prevOut, nil, nil)

	bobElkrem := elkrem.NewElkremSender(deriveElkremRoot(bobKeyPriv, bobKeyPub, aliceKeyPub))
	bobFirstRevoke, err := bobElkrem.AtIndex(0)
	if err != nil {
		return nil, nil, nil, err
	}
	bobRevokeKey := DeriveRevocationPubkey(aliceKeyPub, bobFirstRevoke[:])

	aliceElkrem := elkrem.NewElkremSender(deriveElkremRoot(aliceKeyPriv, aliceKeyPub, bobKeyPub))
	aliceFirstRevoke, err := aliceElkrem.AtIndex(0)
	if err != nil {
		return nil, nil, nil, err
	}
	aliceRevokeKey := DeriveRevocationPubkey(bobKeyPub, aliceFirstRevoke[:])

	aliceCommitTx, err := CreateCommitTx(fundingTxIn, aliceKeyPub,
		bobKeyPub, aliceRevokeKey, csvTimeoutAlice, channelBal, channelBal)
	if err != nil {
		return nil, nil, nil, err
	}
	bobCommitTx, err := CreateCommitTx(fundingTxIn, bobKeyPub,
		aliceKeyPub, bobRevokeKey, csvTimeoutBob, channelBal, channelBal)
	if err != nil {
		return nil, nil, nil, err
	}

	alicePath, err := ioutil.TempDir("", "alicedb")
	dbAlice, err := channeldb.Open(alicePath, &chaincfg.TestNet3Params)
	if err != nil {
		return nil, nil, nil, err
	}

	bobPath, err := ioutil.TempDir("", "bobdb")
	dbBob, err := channeldb.Open(bobPath, &chaincfg.TestNet3Params)
	if err != nil {
		return nil, nil, nil, err
	}

	aliceChannelState := &channeldb.OpenChannel{
		IdentityPub:            aliceKeyPub,
		ChanID:                 prevOut,
		OurCommitKey:           aliceKeyPub,
		TheirCommitKey:         bobKeyPub,
		Capacity:               channelCapacity,
		OurBalance:             channelBal,
		TheirBalance:           channelBal,
		OurCommitTx:            aliceCommitTx,
		FundingOutpoint:        prevOut,
		OurMultiSigKey:         aliceKeyPub,
		TheirMultiSigKey:       bobKeyPub,
		FundingWitnessScript:   witnessScript,
		LocalCsvDelay:          csvTimeoutAlice,
		RemoteCsvDelay:         csvTimeoutBob,
		TheirCurrentRevocation: bobRevokeKey,
		LocalElkrem:            aliceElkrem,
		RemoteElkrem:           &elkrem.ElkremReceiver{},
		Db:                     dbAlice,
	}
	bobChannelState := &channeldb.OpenChannel{
		IdentityPub:            bobKeyPub,
		ChanID:                 prevOut,
		OurCommitKey:           bobKeyPub,
		TheirCommitKey:         aliceKeyPub,
		Capacity:               channelCapacity,
		OurBalance:             channelBal,
		TheirBalance:           channelBal,
		OurCommitTx:            bobCommitTx,
		FundingOutpoint:        prevOut,
		OurMultiSigKey:         bobKeyPub,
		TheirMultiSigKey:       aliceKeyPub,
		FundingWitnessScript:   witnessScript,
		LocalCsvDelay:          csvTimeoutBob,
		RemoteCsvDelay:         csvTimeoutAlice,
		TheirCurrentRevocation: aliceRevokeKey,
		LocalElkrem:            bobElkrem,
		RemoteElkrem:           &elkrem.ElkremReceiver{},
		Db:                     dbBob,
	}

	cleanUpFunc := func() {
		os.RemoveAll(bobPath)
		os.RemoveAll(alicePath)
	}

	aliceSigner := &mockSigner{aliceKeyPriv}
	bobSigner := &mockSigner{bobKeyPriv}

	notifier := &mockNotfier{}

	channelAlice, err := NewLightningChannel(aliceSigner, nil, notifier, aliceChannelState)
	if err != nil {
		return nil, nil, nil, err
	}
	channelBob, err := NewLightningChannel(bobSigner, nil, notifier, bobChannelState)
	if err != nil {
		return nil, nil, nil, err
	}

	// Now that the channel are open, simulate the start of a session by
	// having Alice and Bob extend their revocation windows to each other.
	err = initRevocationWindows(channelAlice, channelBob, revocationWindow)
	if err != nil {
		return nil, nil, nil, err
	}

	return channelAlice, channelBob, cleanUpFunc, nil
}
Example #5
0
	"github.com/btcsuite/fastsha256"
	"github.com/davecgh/go-spew/spew"
	"github.com/lightningnetwork/lnd/channeldb"
	"github.com/roasbeef/btcd/wire"
	"github.com/roasbeef/btcutil"
)

var (
	// debugPre is the default debug preimage which is inserted into the
	// invoice registry if the --debughtlc flag is activated on start up.
	// All nodes initialize with the flag active will immediately settle
	// any incoming HTLC whose rHash is corresponds with the debug
	// preimage.
	debugPre, _ = wire.NewShaHash(bytes.Repeat([]byte{1}, 32))

	debugHash = wire.ShaHash(fastsha256.Sum256(debugPre[:]))
)

// invoiceRegistry is a central registry of all the outstanding invoices
// created by the daemon. The registry is a thin wrapper around a map in order
// to ensure that all updates/reads are thread safe.
type invoiceRegistry struct {
	sync.RWMutex

	cdb *channeldb.DB

	clientMtx           sync.Mutex
	nextClientID        uint32
	notificationClients map[uint32]*invoiceSubscription

	// debugInvoices is a mp which stores special "debug" invoices which
Example #6
0
// handleUpstreamMsg processes wire messages related to commitment state
// updates from the upstream peer. The upstream peer is the peer whom we have a
// direct channel with, updating our respective commitment chains.
func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
	switch htlcPkt := msg.(type) {
	// TODO(roasbeef): timeouts
	//  * fail if can't parse sphinx mix-header
	case *lnwire.HTLCAddRequest:
		// Before adding the new HTLC to the state machine, parse the
		// onion object in order to obtain the routing information.
		blobReader := bytes.NewReader(htlcPkt.OnionBlob)
		onionPkt := &sphinx.OnionPacket{}
		if err := onionPkt.Decode(blobReader); err != nil {
			peerLog.Errorf("unable to decode onion pkt: %v", err)
			p.Disconnect()
			return
		}

		// Attempt to process the Sphinx packet. We include the payment
		// hash of the HTLC as it's authenticated within the Sphinx
		// packet itself as associated data in order to thwart attempts
		// a replay attacks. In the case of a replay, an attacker is
		// *forced* to use the same payment hash twice, thereby losing
		// their money entirely.
		rHash := htlcPkt.RedemptionHashes[0][:]
		sphinxPacket, err := state.sphinx.ProcessOnionPacket(onionPkt, rHash)
		if err != nil {
			peerLog.Errorf("unable to process onion pkt: %v", err)
			p.Disconnect()
			return
		}

		// TODO(roasbeef): perform sanity checks on per-hop payload
		//  * time-lock is sane, fee, chain, etc

		// We just received an add request from an upstream peer, so we
		// add it to our state machine, then add the HTLC to our
		// "settle" list in the event that we know the pre-image
		index := state.channel.ReceiveHTLC(htlcPkt)

		switch sphinxPacket.Action {
		// We're the designated payment destination. Therefore we
		// attempt to see if we have an invoice locally which'll allow
		// us to settle this HTLC.
		case sphinx.ExitNode:
			rHash := htlcPkt.RedemptionHashes[0]
			invoice, err := p.server.invoices.LookupInvoice(rHash)
			if err != nil {
				// TODO(roasbeef): send a canceHTLC message if we can't settle.
				peerLog.Errorf("unable to query to locate: %v", err)
				p.Disconnect()
				return
			}

			// TODO(roasbeef): check values accept if >=
			state.htlcsToSettle[index] = invoice

		// There are additional hops left within this route, so we
		// track the next hop according to the index of this HTLC
		// within their log. When forwarding locked-in HLTC's to the
		// switch, we'll attach the routing information so the switch
		// can finalize the circuit.
		case sphinx.MoreHops:
			// TODO(roasbeef): send cancel + error if not in
			// routing table
			state.pendingCircuits[index] = sphinxPacket
		default:
			peerLog.Errorf("mal formed onion packet")
			p.Disconnect()
		}
	case *lnwire.HTLCSettleRequest:
		// TODO(roasbeef): this assumes no "multi-sig"
		pre := htlcPkt.RedemptionProofs[0]
		idx := uint32(htlcPkt.HTLCKey)
		if err := state.channel.ReceiveHTLCSettle(pre, idx); err != nil {
			// TODO(roasbeef): broadcast on-chain
			peerLog.Errorf("settle for outgoing HTLC rejected: %v", err)
			p.Disconnect()
			return
		}

		// TODO(roasbeef): add pre-image to DB in order to swipe
		// repeated r-values
	case *lnwire.CommitSignature:
		// We just received a new update to our local commitment chain,
		// validate this new commitment, closing the link if invalid.
		// TODO(roasbeef): use uint64 for indexes?
		logIndex := uint32(htlcPkt.LogIndex)
		sig := htlcPkt.CommitSig.Serialize()
		if err := state.channel.ReceiveNewCommitment(sig, logIndex); err != nil {
			peerLog.Errorf("unable to accept new commitment: %v", err)
			p.Disconnect()
			return
		}

		if state.numUnAcked > 0 {
			state.numUnAcked -= 1
			// TODO(roasbeef): only start if numUnacked == 0?
			state.logCommitTimer = time.Tick(300 * time.Millisecond)
		} else {
			if _, err := p.updateCommitTx(state); err != nil {
				peerLog.Errorf("unable to update "+
					"commitment: %v", err)
				p.Disconnect()
				return
			}
		}

		// Finally, since we just accepted a new state, send the remote
		// peer a revocation for our prior state.
		nextRevocation, err := state.channel.RevokeCurrentCommitment()
		if err != nil {
			peerLog.Errorf("unable to revoke current commitment: %v", err)
			return
		}
		p.queueMsg(nextRevocation, nil)
	case *lnwire.CommitRevocation:
		// We've received a revocation from the remote chain, if valid,
		// this moves the remote chain forward, and expands our
		// revocation window.
		htlcsToForward, err := state.channel.ReceiveRevocation(htlcPkt)
		if err != nil {
			peerLog.Errorf("unable to accept revocation: %v", err)
			p.Disconnect()
			return
		}

		// If any of the htlc's eligible for forwarding are pending
		// settling or timeing out previous outgoing payments, then we
		// can them from the pending set, and signal the requester (if
		// existing) that the payment has been fully fulfilled.
		var bandwidthUpdate btcutil.Amount
		settledPayments := make(map[lnwallet.PaymentHash]struct{})
		numSettled := 0
		for _, htlc := range htlcsToForward {
			if p, ok := state.clearedHTCLs[htlc.ParentIndex]; ok {
				p.err <- nil
				delete(state.clearedHTCLs, htlc.ParentIndex)
			}

			// TODO(roasbeef): rework log entries to a shared
			// interface.
			if htlc.EntryType != lnwallet.Add {
				continue
			}

			// If we can't immediately settle this HTLC, then we
			// can halt processing here.
			invoice, ok := state.htlcsToSettle[htlc.Index]
			if !ok {
				continue
			}

			// Otherwise, we settle this HTLC within our local
			// state update log, then send the update entry to the
			// remote party.
			preimage := invoice.Terms.PaymentPreimage
			logIndex, err := state.channel.SettleHTLC(preimage)
			if err != nil {
				peerLog.Errorf("unable to settle htlc: %v", err)
				p.Disconnect()
				continue
			}

			settleMsg := &lnwire.HTLCSettleRequest{
				ChannelPoint:     state.chanPoint,
				HTLCKey:          lnwire.HTLCKey(logIndex),
				RedemptionProofs: [][32]byte{preimage},
			}
			p.queueMsg(settleMsg, nil)
			delete(state.htlcsToSettle, htlc.Index)

			bandwidthUpdate += invoice.Terms.Value
			settledPayments[htlc.RHash] = struct{}{}

			numSettled++
		}

		go func() {
			for _, htlc := range htlcsToForward {
				// We don't need to forward any HTLC's that we
				// just settled above.
				// TODO(roasbeef): key by index insteaad?
				if _, ok := settledPayments[htlc.RHash]; ok {
					continue
				}

				onionPkt := state.pendingCircuits[htlc.Index]
				delete(state.pendingCircuits, htlc.Index)

				// Send this fully activated HTLC to the htlc
				// switch to continue the chained clear/settle.
				pkt, err := logEntryToHtlcPkt(*state.chanPoint,
					htlc, onionPkt)
				if err != nil {
					peerLog.Errorf("unable to make htlc pkt: %v",
						err)
					continue
				}

				state.switchChan <- pkt
			}

		}()

		if numSettled == 0 {
			return
		}

		// Send an update to the htlc switch of our newly available
		// payment bandwidth.
		// TODO(roasbeef): ideally should wait for next state update.
		if bandwidthUpdate != 0 {
			p.server.htlcSwitch.UpdateLink(state.chanPoint,
				bandwidthUpdate)
		}

		// With all the settle updates added to the local and remote
		// HTLC logs, initiate a state transition by updating the
		// remote commitment chain.
		if sent, err := p.updateCommitTx(state); err != nil {
			peerLog.Errorf("unable to update commitment: %v", err)
			p.Disconnect()
			return
		} else if sent {
			// TODO(roasbeef): wait to delete from htlcsToSettle?
			state.numUnAcked += 1
		}

		// Notify the invoiceRegistry of the invoices we just settled
		// with this latest commitment update.
		// TODO(roasbeef): wait until next transition?
		for invoice, _ := range settledPayments {
			err := p.server.invoices.SettleInvoice(wire.ShaHash(invoice))
			if err != nil {
				peerLog.Errorf("unable to settle invoice: %v", err)
			}
		}
	}
}