Пример #1
0
//addHTLC will take in a PaymentDescriptor, adds to HTLCs and writes to disk
//Assumes the data has already been validated!
//NOTE: **MUST** HAVE THE MUTEX LOCKED ALREADY WHEN CALLED
func (l *LNChannel) addHTLC(h *PaymentDescriptor) (lnwire.HTLCKey, error) {
	//Sanity check
	if h.State != ADD_PRESTAGE {
		return 0, fmt.Errorf("addHTLC can only add PRESTAGE")
	}

	//Make sure we're not overfull
	//We should *never* hit this...
	//we subtract 3 in case we need to add one to correct for even/odd
	if l.ourLastKey >= ^lnwire.HTLCKey(0)-3 {
		return 0, fmt.Errorf("Channel full!!!")
	}

	//Assign a new value
	//We add 2 due to even/odd assignments
	l.ourLastKey += 2

	//Check whether the even/odd is invalid..
	//If it is, we iterate to the next one
	if l.ourLastKey%1 == 1 {
		if l.isEven {
			l.ourLastKey += 1
		}
	} else {
		if !l.isEven {
			l.ourLastKey += 1
		}
	}

	//Write the new HTLC
	l.HTLCs[l.ourLastKey] = h
	disk() //save l.ourLastKey and the htlcs

	return l.ourLastKey, nil
}
Пример #2
0
// handleDownStreamPkt processes an HTLC packet sent from the downstream HTLC
// Switch. Possible messages sent by the switch include requests to forward new
// HTLC's, timeout previously cleared HTLC's, and finally to settle currently
// cleared HTLC's with the upstream peer.
func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
	var isSettle bool
	switch htlc := pkt.msg.(type) {
	case *lnwire.HTLCAddRequest:
		// A new payment has been initiated via the
		// downstream channel, so we add the new HTLC
		// to our local log, then update the commitment
		// chains.
		htlc.ChannelPoint = state.chanPoint
		index := state.channel.AddHTLC(htlc)
		p.queueMsg(htlc, nil)

		state.pendingBatch = append(state.pendingBatch, &pendingPayment{
			htlc:  htlc,
			index: index,
			err:   pkt.err,
		})

	case *lnwire.HTLCSettleRequest:
		pre := htlc.RedemptionProofs[0]
		logIndex, err := state.channel.SettleHTLC(pre)
		if err != nil {
			// TODO(roasbeef): broadcast on-chain
			peerLog.Errorf("settle for incoming HTLC rejected: %v", err)
			p.Disconnect()
			return
		}

		htlc.ChannelPoint = state.chanPoint
		htlc.HTLCKey = lnwire.HTLCKey(logIndex)

		p.queueMsg(htlc, nil)
		isSettle = true
	}

	// If this newly added update exceeds the max batch size for adds, or
	// this is a settle request, then initiate an update.
	// TODO(roasbeef): enforce max HTLC's in flight limit
	if len(state.pendingBatch) >= 10 || isSettle {
		if sent, err := p.updateCommitTx(state); err != nil {
			peerLog.Errorf("unable to update "+
				"commitment: %v", err)
			p.Disconnect()
			return
		} else if !sent {
			return
		}

		state.numUnAcked += 1
	}
}
Пример #3
0
// handleUpstreamMsg processes wire messages related to commitment state
// updates from the upstream peer. The upstream peer is the peer whom we have a
// direct channel with, updating our respective commitment chains.
func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
	switch htlcPkt := msg.(type) {
	// TODO(roasbeef): timeouts
	//  * fail if can't parse sphinx mix-header
	case *lnwire.HTLCAddRequest:
		// Before adding the new HTLC to the state machine, parse the
		// onion object in order to obtain the routing information.
		blobReader := bytes.NewReader(htlcPkt.OnionBlob)
		onionPkt := &sphinx.OnionPacket{}
		if err := onionPkt.Decode(blobReader); err != nil {
			peerLog.Errorf("unable to decode onion pkt: %v", err)
			p.Disconnect()
			return
		}

		// Attempt to process the Sphinx packet. We include the payment
		// hash of the HTLC as it's authenticated within the Sphinx
		// packet itself as associated data in order to thwart attempts
		// a replay attacks. In the case of a replay, an attacker is
		// *forced* to use the same payment hash twice, thereby losing
		// their money entirely.
		rHash := htlcPkt.RedemptionHashes[0][:]
		sphinxPacket, err := state.sphinx.ProcessOnionPacket(onionPkt, rHash)
		if err != nil {
			peerLog.Errorf("unable to process onion pkt: %v", err)
			p.Disconnect()
			return
		}

		// TODO(roasbeef): perform sanity checks on per-hop payload
		//  * time-lock is sane, fee, chain, etc

		// We just received an add request from an upstream peer, so we
		// add it to our state machine, then add the HTLC to our
		// "settle" list in the event that we know the pre-image
		index := state.channel.ReceiveHTLC(htlcPkt)

		switch sphinxPacket.Action {
		// We're the designated payment destination. Therefore we
		// attempt to see if we have an invoice locally which'll allow
		// us to settle this HTLC.
		case sphinx.ExitNode:
			rHash := htlcPkt.RedemptionHashes[0]
			invoice, err := p.server.invoices.LookupInvoice(rHash)
			if err != nil {
				// TODO(roasbeef): send a canceHTLC message if we can't settle.
				peerLog.Errorf("unable to query to locate: %v", err)
				p.Disconnect()
				return
			}

			// TODO(roasbeef): check values accept if >=
			state.htlcsToSettle[index] = invoice

		// There are additional hops left within this route, so we
		// track the next hop according to the index of this HTLC
		// within their log. When forwarding locked-in HLTC's to the
		// switch, we'll attach the routing information so the switch
		// can finalize the circuit.
		case sphinx.MoreHops:
			// TODO(roasbeef): send cancel + error if not in
			// routing table
			state.pendingCircuits[index] = sphinxPacket
		default:
			peerLog.Errorf("mal formed onion packet")
			p.Disconnect()
		}
	case *lnwire.HTLCSettleRequest:
		// TODO(roasbeef): this assumes no "multi-sig"
		pre := htlcPkt.RedemptionProofs[0]
		idx := uint32(htlcPkt.HTLCKey)
		if err := state.channel.ReceiveHTLCSettle(pre, idx); err != nil {
			// TODO(roasbeef): broadcast on-chain
			peerLog.Errorf("settle for outgoing HTLC rejected: %v", err)
			p.Disconnect()
			return
		}

		// TODO(roasbeef): add pre-image to DB in order to swipe
		// repeated r-values
	case *lnwire.CommitSignature:
		// We just received a new update to our local commitment chain,
		// validate this new commitment, closing the link if invalid.
		// TODO(roasbeef): use uint64 for indexes?
		logIndex := uint32(htlcPkt.LogIndex)
		sig := htlcPkt.CommitSig.Serialize()
		if err := state.channel.ReceiveNewCommitment(sig, logIndex); err != nil {
			peerLog.Errorf("unable to accept new commitment: %v", err)
			p.Disconnect()
			return
		}

		if state.numUnAcked > 0 {
			state.numUnAcked -= 1
			// TODO(roasbeef): only start if numUnacked == 0?
			state.logCommitTimer = time.Tick(300 * time.Millisecond)
		} else {
			if _, err := p.updateCommitTx(state); err != nil {
				peerLog.Errorf("unable to update "+
					"commitment: %v", err)
				p.Disconnect()
				return
			}
		}

		// Finally, since we just accepted a new state, send the remote
		// peer a revocation for our prior state.
		nextRevocation, err := state.channel.RevokeCurrentCommitment()
		if err != nil {
			peerLog.Errorf("unable to revoke current commitment: %v", err)
			return
		}
		p.queueMsg(nextRevocation, nil)
	case *lnwire.CommitRevocation:
		// We've received a revocation from the remote chain, if valid,
		// this moves the remote chain forward, and expands our
		// revocation window.
		htlcsToForward, err := state.channel.ReceiveRevocation(htlcPkt)
		if err != nil {
			peerLog.Errorf("unable to accept revocation: %v", err)
			p.Disconnect()
			return
		}

		// If any of the htlc's eligible for forwarding are pending
		// settling or timeing out previous outgoing payments, then we
		// can them from the pending set, and signal the requester (if
		// existing) that the payment has been fully fulfilled.
		var bandwidthUpdate btcutil.Amount
		settledPayments := make(map[lnwallet.PaymentHash]struct{})
		numSettled := 0
		for _, htlc := range htlcsToForward {
			if p, ok := state.clearedHTCLs[htlc.ParentIndex]; ok {
				p.err <- nil
				delete(state.clearedHTCLs, htlc.ParentIndex)
			}

			// TODO(roasbeef): rework log entries to a shared
			// interface.
			if htlc.EntryType != lnwallet.Add {
				continue
			}

			// If we can't immediately settle this HTLC, then we
			// can halt processing here.
			invoice, ok := state.htlcsToSettle[htlc.Index]
			if !ok {
				continue
			}

			// Otherwise, we settle this HTLC within our local
			// state update log, then send the update entry to the
			// remote party.
			preimage := invoice.Terms.PaymentPreimage
			logIndex, err := state.channel.SettleHTLC(preimage)
			if err != nil {
				peerLog.Errorf("unable to settle htlc: %v", err)
				p.Disconnect()
				continue
			}

			settleMsg := &lnwire.HTLCSettleRequest{
				ChannelPoint:     state.chanPoint,
				HTLCKey:          lnwire.HTLCKey(logIndex),
				RedemptionProofs: [][32]byte{preimage},
			}
			p.queueMsg(settleMsg, nil)
			delete(state.htlcsToSettle, htlc.Index)

			bandwidthUpdate += invoice.Terms.Value
			settledPayments[htlc.RHash] = struct{}{}

			numSettled++
		}

		go func() {
			for _, htlc := range htlcsToForward {
				// We don't need to forward any HTLC's that we
				// just settled above.
				// TODO(roasbeef): key by index insteaad?
				if _, ok := settledPayments[htlc.RHash]; ok {
					continue
				}

				onionPkt := state.pendingCircuits[htlc.Index]
				delete(state.pendingCircuits, htlc.Index)

				// Send this fully activated HTLC to the htlc
				// switch to continue the chained clear/settle.
				pkt, err := logEntryToHtlcPkt(*state.chanPoint,
					htlc, onionPkt)
				if err != nil {
					peerLog.Errorf("unable to make htlc pkt: %v",
						err)
					continue
				}

				state.switchChan <- pkt
			}

		}()

		if numSettled == 0 {
			return
		}

		// Send an update to the htlc switch of our newly available
		// payment bandwidth.
		// TODO(roasbeef): ideally should wait for next state update.
		if bandwidthUpdate != 0 {
			p.server.htlcSwitch.UpdateLink(state.chanPoint,
				bandwidthUpdate)
		}

		// With all the settle updates added to the local and remote
		// HTLC logs, initiate a state transition by updating the
		// remote commitment chain.
		if sent, err := p.updateCommitTx(state); err != nil {
			peerLog.Errorf("unable to update commitment: %v", err)
			p.Disconnect()
			return
		} else if sent {
			// TODO(roasbeef): wait to delete from htlcsToSettle?
			state.numUnAcked += 1
		}

		// Notify the invoiceRegistry of the invoices we just settled
		// with this latest commitment update.
		// TODO(roasbeef): wait until next transition?
		for invoice, _ := range settledPayments {
			err := p.server.invoices.SettleInvoice(wire.ShaHash(invoice))
			if err != nil {
				peerLog.Errorf("unable to settle invoice: %v", err)
			}
		}
	}
}