func TestNeighborHelloMessageReadWrite(t *testing.T) {
	Id1 := graph.NewID(1)
	Id2 := graph.NewID(2)
	rt1 := rt.NewRoutingTable()
	rt1.AddChannel(Id1, Id2, graph.NewEdgeID("1"), &rt.ChannelInfo{1, 1})
	b := new(bytes.Buffer)
	msg1 := &NeighborHelloMessage{RT: rt1}
	_, err := WriteMessage(b, msg1, 0, wire.SimNet)
	if err != nil {
		t.Fatalf("Can't write message %v", err)
	}
	_, msg2, _, err := ReadMessage(b, 0, wire.SimNet)
	if err != nil {
		t.Fatalf("Can't read message %v", err)
	}
	msg2c, ok := msg2.(*NeighborHelloMessage)
	if !ok {
		t.Fatalf("Can't convert to *NeighborHelloMessage")
	}
	if msg2c.RT == nil {
		t.Fatal("After decoding RT should not be nil")
	}
	if !msg2c.RT.HasChannel(Id1, Id2, graph.NewEdgeID("1")) {
		t.Errorf("msg2.RT.HasChannel(Id1, Id2) = false, want true")
	}
	if !msg2c.RT.HasChannel(Id2, Id1, graph.NewEdgeID("1")) {
		t.Errorf("msg2.RT.HasChannel(Id2, Id1) = false, want true")
	}
}
func TestNeighborHelloMessageEncodeDecode(t *testing.T) {
	Id1 := graph.NewID(1)
	Id2 := graph.NewID(2)
	rt1 := rt.NewRoutingTable()
	rt1.AddChannel(Id1, Id2, graph.NewEdgeID("1"), &rt.ChannelInfo{1, 1})
	b := new(bytes.Buffer)
	msg1 := NeighborHelloMessage{RT: rt1}
	err := msg1.Encode(b, 0)
	if err != nil {
		t.Fatalf("Can't encode message ", err)
	}
	msg2 := new(NeighborHelloMessage)
	err = msg2.Decode(b, 0)
	if err != nil {
		t.Fatalf("Can't decode message ", err)
	}
	if msg2.RT == nil {
		t.Fatal("After decoding RT should not be nil")
	}
	if !msg2.RT.HasChannel(Id1, Id2, graph.NewEdgeID("1")) {
		t.Errorf("msg2.RT.HasChannel(Id1, Id2) = false, want true")
	}
	if !msg2.RT.HasChannel(Id2, Id1, graph.NewEdgeID("1")) {
		t.Errorf("msg2.RT.HasChannel(Id2, Id1) = false, want true")
	}
}
Exemple #3
0
// newServer creates a new instance of the server which is to listen using the
// passed listener address.
func newServer(listenAddrs []string, notifier chainntnfs.ChainNotifier,
	bio lnwallet.BlockChainIO, wallet *lnwallet.LightningWallet,
	chanDB *channeldb.DB) (*server, error) {

	privKey, err := wallet.GetIdentitykey()
	if err != nil {
		return nil, err
	}

	listeners := make([]net.Listener, len(listenAddrs))
	for i, addr := range listenAddrs {
		listeners[i], err = brontide.NewListener(privKey, addr)
		if err != nil {
			return nil, err
		}
	}

	serializedPubKey := privKey.PubKey().SerializeCompressed()
	s := &server{
		bio:           bio,
		chainNotifier: notifier,
		chanDB:        chanDB,
		fundingMgr:    newFundingManager(wallet),
		invoices:      newInvoiceRegistry(chanDB),
		lnwallet:      wallet,
		identityPriv:  privKey,
		// TODO(roasbeef): derive proper onion key based on rotation
		// schedule
		sphinx:      sphinx.NewRouter(privKey, activeNetParams.Params),
		lightningID: fastsha256.Sum256(serializedPubKey),
		listeners:   listeners,
		peers:       make(map[int32]*peer),
		newPeers:    make(chan *peer, 100),
		donePeers:   make(chan *peer, 100),
		queries:     make(chan interface{}),
		quit:        make(chan struct{}),
	}

	// If the debug HTLC flag is on, then we invoice a "master debug"
	// invoice which all outgoing payments will be sent and all incoming
	// HTLC's with the debug R-Hash immediately settled.
	if cfg.DebugHTLC {
		kiloCoin := btcutil.Amount(btcutil.SatoshiPerBitcoin * 1000)
		s.invoices.AddDebugInvoice(kiloCoin, *debugPre)
		srvrLog.Debugf("Debug HTLC invoice inserted, preimage=%x, hash=%x",
			debugPre[:], debugHash[:])
	}

	s.utxoNursery = newUtxoNursery(notifier, wallet)

	// Create a new routing manager with ourself as the sole node within
	// the graph.
	selfVertex := hex.EncodeToString(serializedPubKey)
	s.routingMgr = routing.NewRoutingManager(graph.NewID(selfVertex), nil)
	s.htlcSwitch = newHtlcSwitch(serializedPubKey, s.routingMgr)

	s.rpcServer = newRpcServer(s)

	return s, nil
}
Exemple #4
0
func getRoutingTable(ctxb context.Context, client lnrpc.LightningClient) (*rt.RoutingTable, error) {
	req := &lnrpc.ShowRoutingTableRequest{}
	resp, err := client.ShowRoutingTable(ctxb, req)
	if err != nil {
		return nil, err
	}

	r := rt.NewRoutingTable()
	for _, channel := range resp.Channels {
		r.AddChannel(
			graph.NewID(channel.Id1),
			graph.NewID(channel.Id2),
			graph.NewEdgeID(channel.Outpoint),
			&rt.ChannelInfo{channel.Capacity, channel.Weight},
		)
	}
	return r, nil
}
// handleFundingOpen processes the final message when the daemon is the
// responder to a single funder channel workflow. The SPV proofs supplied by
// the initiating node is verified, which if correct, marks the channel as open
// to the source peer.
func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) {
	f.resMtx.RLock()
	resCtx := f.activeReservations[fmsg.peer.id][fmsg.msg.ChannelID]
	f.resMtx.RUnlock()

	// The channel initiator has claimed the channel is now open, so we'll
	// verify the contained SPV proof for validity.
	// TODO(roasbeef): send off to the spv proof verifier, in the routing
	// sub-module.

	// Now that we've verified the initiator's proof, we'll commit the
	// channel state to disk, and notify the source peer of a newly opened
	// channel.
	openChan, err := resCtx.reservation.FinalizeReservation()
	if err != nil {
		fndgLog.Errorf("unable to finalize reservation: %v", err)
		fmsg.peer.Disconnect()
		return
	}

	// The reservation has been completed, therefore we can stop tracking
	// it within our active reservations map.
	f.resMtx.Lock()
	delete(f.activeReservations[fmsg.peer.id], fmsg.msg.ChannelID)
	f.resMtx.Unlock()

	fndgLog.Infof("FundingOpen: ChannelPoint(%v) with peerID(%v) is now open",
		resCtx.reservation.FundingOutpoint, fmsg.peer.id)

	// Notify the L3 routing manager of the newly active channel link.
	capacity := int64(resCtx.reservation.OurContribution().FundingAmount +
		resCtx.reservation.TheirContribution().FundingAmount)
	vertex := hex.EncodeToString(fmsg.peer.addr.IdentityKey.SerializeCompressed())
	fmsg.peer.server.routingMgr.OpenChannel(
		graph.NewID(vertex),
		graph.NewEdgeID(resCtx.reservation.FundingOutpoint().String()),
		&rt.ChannelInfo{
			Cpt: capacity,
		},
	)

	// Finally, notify the target peer of the newly open channel.
	fmsg.peer.newChannels <- openChan
}
Exemple #6
0
func writeToTempFile(r *rt.RoutingTable, file *os.File, self string) error {
	slc := []graph.ID{graph.NewID(self)}
	viz := visualizer.New(r.G, slc, nil, nil)
	viz.ApplyToNode = func(s string) string { return s }
	viz.ApplyToEdge = func(info interface{}) string {
		if info, ok := info.(*rt.ChannelInfo); ok {
			return fmt.Sprintf(`"%v"`, info.Capacity())
		}
		return "nil"
	}
	// need to call method if plan to use shortcut, autocomplete, etc
	viz.BuildPrefixTree()
	viz.EnableShortcut(true)
	dot := viz.Draw()
	_, err := file.Write([]byte(dot))
	if err != nil {
		return err
	}
	err = file.Sync()
	if err != nil {
		return err
	}
	return nil
}
// handleFundingSignComplete processes the final message received in a single
// funder workflow. Once this message is processed, the funding transaction is
// broadcast. Once the funding transaction reaches a sufficient number of
// confirmations, a message is sent to the responding peer along with an SPV
// proofs of transaction inclusion.
func (f *fundingManager) handleFundingSignComplete(fmsg *fundingSignCompleteMsg) {
	chanID := fmsg.msg.ChannelID

	f.resMtx.RLock()
	resCtx := f.activeReservations[fmsg.peer.id][chanID]
	f.resMtx.RUnlock()

	// The remote peer has responded with a signature for our commitment
	// transaction. We'll verify the signature for validity, then commit
	// the state to disk as we can now open the channel.
	commitSig := fmsg.msg.CommitSignature.Serialize()
	if err := resCtx.reservation.CompleteReservation(nil, commitSig); err != nil {
		fndgLog.Errorf("unable to complete reservation sign complete: %v", err)
		fmsg.peer.Disconnect()
		resCtx.err <- err
		return
	}

	fundingPoint := resCtx.reservation.FundingOutpoint()
	fndgLog.Infof("Finalizing pendingID(%v) over ChannelPoint(%v), "+
		"waiting for channel open on-chain", chanID, fundingPoint)

	// Send an update to the upstream client that the negotiation process
	// is over.
	// TODO(roasbeef): add abstraction over updates to accomdate
	// long-polling, or SSE, etc.
	resCtx.updates <- &lnrpc.OpenStatusUpdate{
		Update: &lnrpc.OpenStatusUpdate_ChanPending{
			ChanPending: &lnrpc.PendingUpdate{
				Txid: fundingPoint.Hash[:],
			},
		},
	}

	// Spawn a goroutine which will send the newly open channel to the
	// source peer once the channel is open. A channel is considered "open"
	// once it reaches a sufficient number of confirmations.
	// TODO(roasbeef): semaphore to limit active chan open goroutines
	go func() {
		select {
		// TODO(roasbeef): need to persist pending broadcast channels,
		// send chan open proof during scan of blocks mined while down.
		case openChan := <-resCtx.reservation.DispatchChan():
			// This reservation is no longer pending as the funding
			// transaction has been fully confirmed.
			f.resMtx.Lock()
			delete(f.activeReservations[fmsg.peer.id], chanID)
			f.resMtx.Unlock()

			fndgLog.Infof("ChannelPoint(%v) with peerID(%v) is now active",
				fundingPoint, fmsg.peer.id)

			// Now that the channel is open, we need to notify a
			// number of parties of this event.

			// First we send the newly opened channel to the source
			// server peer.
			fmsg.peer.newChannels <- openChan

			// Next, we queue a message to notify the remote peer
			// that the channel is open. We additionally provide an
			// SPV proof allowing them to verify the transaction
			// inclusion.
			// TODO(roasbeef): obtain SPV proof from sub-system.
			//  * ChainNotifier constructs proof also?
			spvProof := []byte("fake proof")
			fundingOpen := lnwire.NewSingleFundingOpenProof(chanID, spvProof)
			fmsg.peer.queueMsg(fundingOpen, nil)

			// Register the new link with the L3 routing manager
			// so this new channel can be utilized during path
			// finding.
			chanInfo := openChan.StateSnapshot()
			capacity := int64(chanInfo.LocalBalance + chanInfo.RemoteBalance)
			pubSerialized := fmsg.peer.addr.IdentityKey.SerializeCompressed()
			vertex := hex.EncodeToString(pubSerialized)
			fmsg.peer.server.routingMgr.OpenChannel(
				graph.NewID(vertex),
				graph.NewEdgeID(fundingPoint.String()),
				&rt.ChannelInfo{
					Cpt: capacity,
				},
			)

			// Finally give the caller a final update notifying
			// them that the channel is now open.
			// TODO(roasbeef): helper funcs for proto construction
			resCtx.updates <- &lnrpc.OpenStatusUpdate{
				Update: &lnrpc.OpenStatusUpdate_ChanOpen{
					ChanOpen: &lnrpc.ChannelOpenUpdate{
						ChannelPoint: &lnrpc.ChannelPoint{
							FundingTxid: fundingPoint.Hash[:],
							OutputIndex: fundingPoint.Index,
						},
					},
				},
			}
			return
		case <-f.quit:
			return
		}
	}()
}
Exemple #8
0
// handleUnregisterLink unregisters a currently active link. If the deletion of
// this link leaves the interface empty, then the interface entry itself is
// also deleted.
func (h *htlcSwitch) handleUnregisterLink(req *unregisterLinkMsg) {
	hswcLog.Infof("unregistering active link, interface=%v, chan_point=%v",
		hex.EncodeToString(req.chanInterface[:]), req.chanPoint)

	chanInterface := req.chanInterface

	h.interfaceMtx.RLock()
	links := h.interfaces[chanInterface]
	h.interfaceMtx.RUnlock()

	// A request with a nil channel point indicates that all the current
	// links for this channel should be cleared.
	chansRemoved := make([]*wire.OutPoint, 0, len(links))
	if req.chanPoint == nil {
		hswcLog.Infof("purging all active links for interface %v",
			hex.EncodeToString(chanInterface[:]))

		for _, link := range links {
			h.chanIndexMtx.Lock()
			delete(h.chanIndex, *link.chanPoint)
			h.chanIndexMtx.Unlock()

			chansRemoved = append(chansRemoved, link.chanPoint)
		}
		links = nil
	} else {
		h.chanIndexMtx.Lock()
		delete(h.chanIndex, *req.chanPoint)
		h.chanIndexMtx.Unlock()

		for i := 0; i < len(links); i++ {
			chanLink := links[i]
			if chanLink.chanPoint == req.chanPoint {
				chansRemoved = append(chansRemoved, req.chanPoint)

				// We perform an in-place delete by sliding
				// every element down one, then slicing off the
				// last element. Additionally, we update the
				// slice reference within the source map to
				// ensure full deletion.
				copy(links[i:], links[i+1:])
				links[len(links)-1] = nil
				h.interfaceMtx.Lock()
				h.interfaces[chanInterface] = links[:len(links)-1]
				h.interfaceMtx.Unlock()

				break
			}
		}
	}

	// Purge the now inactive channels from the routing table.
	// TODO(roasbeef): routing layer should only see the links as a
	// summation of their capacity/etc
	//  * distinction between connection close and channel close
	for _, linkChan := range chansRemoved {
		err := h.router.RemoveChannel(
			graph.NewID(hex.EncodeToString(h.gateway)),
			graph.NewID(hex.EncodeToString(req.remoteID)),
			graph.NewEdgeID(linkChan.String()),
		)
		if err != nil {
			hswcLog.Errorf("unable to remove channel from "+
				"routing table: %v", err)
		}
	}

	// TODO(roasbeef): clean up/modify onion links
	//  * just have the interfaces index be keyed on hash160?

	if len(links) == 0 {
		hswcLog.Infof("interface %v has no active links, destroying",
			hex.EncodeToString(chanInterface[:]))
		h.interfaceMtx.Lock()
		delete(h.interfaces, chanInterface)
		h.interfaceMtx.Unlock()
	}

	if req.done != nil {
		req.done <- struct{}{}
	}
}
Exemple #9
0
// SendPayment dispatches a bi-directional streaming RPC for sending payments
// through the Lightning Network. A single RPC invocation creates a persistent
// bi-directional stream allowing clients to rapidly send payments through the
// Lightning Network with a single persistent connection.
func (r *rpcServer) SendPayment(paymentStream lnrpc.Lightning_SendPaymentServer) error {
	const queryTimeout = time.Duration(time.Second * 10)
	errChan := make(chan error, 1)
	payChan := make(chan *lnrpc.SendRequest)

	// Launch a new goroutine to handle reading new payment requests from
	// the client. This way we can handle errors independently of blocking
	// and waiting for the next payment request to come through.
	go func() {
		for {
			select {
			case <-r.quit:
				errChan <- nil
				return
			default:
				// Receive the next pending payment within the
				// stream sent by the client. If we read the
				// EOF sentinel, then the client has closed the
				// stream, and we can exit normally.
				nextPayment, err := paymentStream.Recv()
				if err == io.EOF {
					errChan <- nil
					return
				} else if err != nil {
					errChan <- err
					return
				}

				payChan <- nextPayment
			}
		}
	}()

	for {
		select {
		case err := <-errChan:
			return err
		case nextPayment := <-payChan:
			// Query the routing table for a potential path to the
			// destination node. If a path is ultimately
			// unavailable, then an error will be returned.
			destNode := hex.EncodeToString(nextPayment.Dest)
			targetVertex := graph.NewID(destNode)
			path, err := r.server.routingMgr.FindPath(targetVertex,
				queryTimeout)
			if err != nil {
				return err
			}
			rpcsLog.Tracef("[sendpayment] selected route: %v", path)

			// If we're in debug HTLC mode, then all outgoing
			// HTLC's will pay to the same debug rHash. Otherwise,
			// we pay to the rHash specified within the RPC
			// request.
			var rHash [32]byte
			if cfg.DebugHTLC {
				rHash = debugHash
			} else {
				copy(rHash[:], nextPayment.PaymentHash)
			}

			// Generate the raw encoded sphinx packet to be
			// included along with the HTLC add message.  We snip
			// off the first hop from the path as within the
			// routing table's star graph, we're always the first
			// hop.
			sphinxPacket, err := generateSphinxPacket(path[1:], rHash[:])
			if err != nil {
				return err
			}

			// Craft an HTLC packet to send to the routing
			// sub-system. The meta-data within this packet will be
			// used to route the payment through the network.
			htlcAdd := &lnwire.HTLCAddRequest{
				Amount:           lnwire.CreditsAmount(nextPayment.Amt),
				RedemptionHashes: [][32]byte{rHash},
				OnionBlob:        sphinxPacket,
			}
			firstHopPub, err := hex.DecodeString(path[1].String())
			if err != nil {
				return err
			}
			destAddr := wire.ShaHash(fastsha256.Sum256(firstHopPub))
			htlcPkt := &htlcPacket{
				dest: destAddr,
				msg:  htlcAdd,
			}

			// TODO(roasbeef): semaphore to limit num outstanding
			// goroutines.
			go func() {
				// Finally, send this next packet to the
				// routing layer in order to complete the next
				// payment.
				// TODO(roasbeef): this should go through the
				// L3 router once multi-hop is in place.
				if err := r.server.htlcSwitch.SendHTLC(htlcPkt); err != nil {
					errChan <- err
					return
				}

				// TODO(roasbeef): proper responses
				resp := &lnrpc.SendResponse{}
				if err := paymentStream.Send(resp); err != nil {
					errChan <- err
					return
				}
			}()
		}
	}

	return nil
}
Exemple #10
0
// readHandler is responsible for reading messages off the wire in series, then
// properly dispatching the handling of the message to the proper sub-system.
//
// NOTE: This method MUST be run as a goroutine.
func (p *peer) readHandler() {
out:
	for atomic.LoadInt32(&p.disconnect) == 0 {
		nextMsg, _, err := p.readNextMessage()
		if err != nil {
			peerLog.Infof("unable to read message: %v", err)
			break out
		}

		var isChanUpdate bool
		var targetChan *wire.OutPoint

		switch msg := nextMsg.(type) {
		// TODO(roasbeef): consolidate into predicate (single vs dual)
		case *lnwire.SingleFundingRequest:
			p.server.fundingMgr.processFundingRequest(msg, p)
		case *lnwire.SingleFundingResponse:
			p.server.fundingMgr.processFundingResponse(msg, p)
		case *lnwire.SingleFundingComplete:
			p.server.fundingMgr.processFundingComplete(msg, p)
		case *lnwire.SingleFundingSignComplete:
			p.server.fundingMgr.processFundingSignComplete(msg, p)
		case *lnwire.SingleFundingOpenProof:
			p.server.fundingMgr.processFundingOpenProof(msg, p)
		case *lnwire.CloseRequest:
			p.remoteCloseChanReqs <- msg
		// TODO(roasbeef): interface for htlc update msgs
		//  * .(CommitmentUpdater)

		case *lnwire.ErrorGeneric:
			switch msg.Code {
			case lnwire.ErrorMaxPendingChannels:
				p.server.fundingMgr.processErrorGeneric(msg, p)
			default:
				peerLog.Warnf("ErrorGeneric(%v) handling isn't"+
					" implemented.", msg.Code)
			}
		case *lnwire.HTLCAddRequest:
			isChanUpdate = true
			targetChan = msg.ChannelPoint
		case *lnwire.HTLCSettleRequest:
			isChanUpdate = true
			targetChan = msg.ChannelPoint
		case *lnwire.CommitRevocation:
			isChanUpdate = true
			targetChan = msg.ChannelPoint
		case *lnwire.CommitSignature:
			isChanUpdate = true
			targetChan = msg.ChannelPoint
		case *lnwire.NeighborAckMessage,
			*lnwire.NeighborHelloMessage,
			*lnwire.NeighborRstMessage,
			*lnwire.NeighborUpdMessage,
			*lnwire.RoutingTableRequestMessage,
			*lnwire.RoutingTableTransferMessage:

			// Convert to base routing message and set sender and receiver
			vertex := hex.EncodeToString(p.addr.IdentityKey.SerializeCompressed())
			p.server.routingMgr.ReceiveRoutingMessage(msg, graph.NewID(vertex))
		}

		if isChanUpdate {
			// We might be receiving an update to a newly funded
			// channel in which we were the responder. Therefore
			// we need to possibly block until the new channel has
			// propagated internally through the system.
			p.barrierMtx.RLock()
			barrier, ok := p.newChanBarriers[*targetChan]
			p.barrierMtx.RUnlock()
			if ok {
				peerLog.Tracef("waiting for chan barrier "+
					"signal for ChannelPoint(%v)", targetChan)
				select {
				case <-barrier:
				case <-p.quit: // TODO(roasbeef): add timer?
					break out
				}
				peerLog.Tracef("barrier for ChannelPoint(%v) "+
					"closed", targetChan)
			}

			// Dispatch the commitment update message to the proper
			// active goroutine dedicated to this channel.
			targetChan, ok := p.htlcManagers[*targetChan]
			if !ok {
				peerLog.Errorf("recv'd update for unknown channel %v",
					targetChan)
				continue
			}
			targetChan <- nextMsg
		}
	}

	p.Disconnect()

	p.wg.Done()
	peerLog.Tracef("readHandler for peer %v done", p)
}