func TestNeighborHelloMessageReadWrite(t *testing.T) { Id1 := graph.NewID(1) Id2 := graph.NewID(2) rt1 := rt.NewRoutingTable() rt1.AddChannel(Id1, Id2, graph.NewEdgeID("1"), &rt.ChannelInfo{1, 1}) b := new(bytes.Buffer) msg1 := &NeighborHelloMessage{RT: rt1} _, err := WriteMessage(b, msg1, 0, wire.SimNet) if err != nil { t.Fatalf("Can't write message %v", err) } _, msg2, _, err := ReadMessage(b, 0, wire.SimNet) if err != nil { t.Fatalf("Can't read message %v", err) } msg2c, ok := msg2.(*NeighborHelloMessage) if !ok { t.Fatalf("Can't convert to *NeighborHelloMessage") } if msg2c.RT == nil { t.Fatal("After decoding RT should not be nil") } if !msg2c.RT.HasChannel(Id1, Id2, graph.NewEdgeID("1")) { t.Errorf("msg2.RT.HasChannel(Id1, Id2) = false, want true") } if !msg2c.RT.HasChannel(Id2, Id1, graph.NewEdgeID("1")) { t.Errorf("msg2.RT.HasChannel(Id2, Id1) = false, want true") } }
func TestNeighborHelloMessageEncodeDecode(t *testing.T) { Id1 := graph.NewID(1) Id2 := graph.NewID(2) rt1 := rt.NewRoutingTable() rt1.AddChannel(Id1, Id2, graph.NewEdgeID("1"), &rt.ChannelInfo{1, 1}) b := new(bytes.Buffer) msg1 := NeighborHelloMessage{RT: rt1} err := msg1.Encode(b, 0) if err != nil { t.Fatalf("Can't encode message ", err) } msg2 := new(NeighborHelloMessage) err = msg2.Decode(b, 0) if err != nil { t.Fatalf("Can't decode message ", err) } if msg2.RT == nil { t.Fatal("After decoding RT should not be nil") } if !msg2.RT.HasChannel(Id1, Id2, graph.NewEdgeID("1")) { t.Errorf("msg2.RT.HasChannel(Id1, Id2) = false, want true") } if !msg2.RT.HasChannel(Id2, Id1, graph.NewEdgeID("1")) { t.Errorf("msg2.RT.HasChannel(Id2, Id1) = false, want true") } }
func getRoutingTable(ctxb context.Context, client lnrpc.LightningClient) (*rt.RoutingTable, error) { req := &lnrpc.ShowRoutingTableRequest{} resp, err := client.ShowRoutingTable(ctxb, req) if err != nil { return nil, err } r := rt.NewRoutingTable() for _, channel := range resp.Channels { r.AddChannel( graph.NewID(channel.Id1), graph.NewID(channel.Id2), graph.NewEdgeID(channel.Outpoint), &rt.ChannelInfo{channel.Capacity, channel.Weight}, ) } return r, nil }
// handleFundingOpen processes the final message when the daemon is the // responder to a single funder channel workflow. The SPV proofs supplied by // the initiating node is verified, which if correct, marks the channel as open // to the source peer. func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { f.resMtx.RLock() resCtx := f.activeReservations[fmsg.peer.id][fmsg.msg.ChannelID] f.resMtx.RUnlock() // The channel initiator has claimed the channel is now open, so we'll // verify the contained SPV proof for validity. // TODO(roasbeef): send off to the spv proof verifier, in the routing // sub-module. // Now that we've verified the initiator's proof, we'll commit the // channel state to disk, and notify the source peer of a newly opened // channel. openChan, err := resCtx.reservation.FinalizeReservation() if err != nil { fndgLog.Errorf("unable to finalize reservation: %v", err) fmsg.peer.Disconnect() return } // The reservation has been completed, therefore we can stop tracking // it within our active reservations map. f.resMtx.Lock() delete(f.activeReservations[fmsg.peer.id], fmsg.msg.ChannelID) f.resMtx.Unlock() fndgLog.Infof("FundingOpen: ChannelPoint(%v) with peerID(%v) is now open", resCtx.reservation.FundingOutpoint, fmsg.peer.id) // Notify the L3 routing manager of the newly active channel link. capacity := int64(resCtx.reservation.OurContribution().FundingAmount + resCtx.reservation.TheirContribution().FundingAmount) vertex := hex.EncodeToString(fmsg.peer.addr.IdentityKey.SerializeCompressed()) fmsg.peer.server.routingMgr.OpenChannel( graph.NewID(vertex), graph.NewEdgeID(resCtx.reservation.FundingOutpoint().String()), &rt.ChannelInfo{ Cpt: capacity, }, ) // Finally, notify the target peer of the newly open channel. fmsg.peer.newChannels <- openChan }
// handleFundingSignComplete processes the final message received in a single // funder workflow. Once this message is processed, the funding transaction is // broadcast. Once the funding transaction reaches a sufficient number of // confirmations, a message is sent to the responding peer along with an SPV // proofs of transaction inclusion. func (f *fundingManager) handleFundingSignComplete(fmsg *fundingSignCompleteMsg) { chanID := fmsg.msg.ChannelID f.resMtx.RLock() resCtx := f.activeReservations[fmsg.peer.id][chanID] f.resMtx.RUnlock() // The remote peer has responded with a signature for our commitment // transaction. We'll verify the signature for validity, then commit // the state to disk as we can now open the channel. commitSig := fmsg.msg.CommitSignature.Serialize() if err := resCtx.reservation.CompleteReservation(nil, commitSig); err != nil { fndgLog.Errorf("unable to complete reservation sign complete: %v", err) fmsg.peer.Disconnect() resCtx.err <- err return } fundingPoint := resCtx.reservation.FundingOutpoint() fndgLog.Infof("Finalizing pendingID(%v) over ChannelPoint(%v), "+ "waiting for channel open on-chain", chanID, fundingPoint) // Send an update to the upstream client that the negotiation process // is over. // TODO(roasbeef): add abstraction over updates to accomdate // long-polling, or SSE, etc. resCtx.updates <- &lnrpc.OpenStatusUpdate{ Update: &lnrpc.OpenStatusUpdate_ChanPending{ ChanPending: &lnrpc.PendingUpdate{ Txid: fundingPoint.Hash[:], }, }, } // Spawn a goroutine which will send the newly open channel to the // source peer once the channel is open. A channel is considered "open" // once it reaches a sufficient number of confirmations. // TODO(roasbeef): semaphore to limit active chan open goroutines go func() { select { // TODO(roasbeef): need to persist pending broadcast channels, // send chan open proof during scan of blocks mined while down. case openChan := <-resCtx.reservation.DispatchChan(): // This reservation is no longer pending as the funding // transaction has been fully confirmed. f.resMtx.Lock() delete(f.activeReservations[fmsg.peer.id], chanID) f.resMtx.Unlock() fndgLog.Infof("ChannelPoint(%v) with peerID(%v) is now active", fundingPoint, fmsg.peer.id) // Now that the channel is open, we need to notify a // number of parties of this event. // First we send the newly opened channel to the source // server peer. fmsg.peer.newChannels <- openChan // Next, we queue a message to notify the remote peer // that the channel is open. We additionally provide an // SPV proof allowing them to verify the transaction // inclusion. // TODO(roasbeef): obtain SPV proof from sub-system. // * ChainNotifier constructs proof also? spvProof := []byte("fake proof") fundingOpen := lnwire.NewSingleFundingOpenProof(chanID, spvProof) fmsg.peer.queueMsg(fundingOpen, nil) // Register the new link with the L3 routing manager // so this new channel can be utilized during path // finding. chanInfo := openChan.StateSnapshot() capacity := int64(chanInfo.LocalBalance + chanInfo.RemoteBalance) pubSerialized := fmsg.peer.addr.IdentityKey.SerializeCompressed() vertex := hex.EncodeToString(pubSerialized) fmsg.peer.server.routingMgr.OpenChannel( graph.NewID(vertex), graph.NewEdgeID(fundingPoint.String()), &rt.ChannelInfo{ Cpt: capacity, }, ) // Finally give the caller a final update notifying // them that the channel is now open. // TODO(roasbeef): helper funcs for proto construction resCtx.updates <- &lnrpc.OpenStatusUpdate{ Update: &lnrpc.OpenStatusUpdate_ChanOpen{ ChanOpen: &lnrpc.ChannelOpenUpdate{ ChannelPoint: &lnrpc.ChannelPoint{ FundingTxid: fundingPoint.Hash[:], OutputIndex: fundingPoint.Index, }, }, }, } return case <-f.quit: return } }() }
// handleUnregisterLink unregisters a currently active link. If the deletion of // this link leaves the interface empty, then the interface entry itself is // also deleted. func (h *htlcSwitch) handleUnregisterLink(req *unregisterLinkMsg) { hswcLog.Infof("unregistering active link, interface=%v, chan_point=%v", hex.EncodeToString(req.chanInterface[:]), req.chanPoint) chanInterface := req.chanInterface h.interfaceMtx.RLock() links := h.interfaces[chanInterface] h.interfaceMtx.RUnlock() // A request with a nil channel point indicates that all the current // links for this channel should be cleared. chansRemoved := make([]*wire.OutPoint, 0, len(links)) if req.chanPoint == nil { hswcLog.Infof("purging all active links for interface %v", hex.EncodeToString(chanInterface[:])) for _, link := range links { h.chanIndexMtx.Lock() delete(h.chanIndex, *link.chanPoint) h.chanIndexMtx.Unlock() chansRemoved = append(chansRemoved, link.chanPoint) } links = nil } else { h.chanIndexMtx.Lock() delete(h.chanIndex, *req.chanPoint) h.chanIndexMtx.Unlock() for i := 0; i < len(links); i++ { chanLink := links[i] if chanLink.chanPoint == req.chanPoint { chansRemoved = append(chansRemoved, req.chanPoint) // We perform an in-place delete by sliding // every element down one, then slicing off the // last element. Additionally, we update the // slice reference within the source map to // ensure full deletion. copy(links[i:], links[i+1:]) links[len(links)-1] = nil h.interfaceMtx.Lock() h.interfaces[chanInterface] = links[:len(links)-1] h.interfaceMtx.Unlock() break } } } // Purge the now inactive channels from the routing table. // TODO(roasbeef): routing layer should only see the links as a // summation of their capacity/etc // * distinction between connection close and channel close for _, linkChan := range chansRemoved { err := h.router.RemoveChannel( graph.NewID(hex.EncodeToString(h.gateway)), graph.NewID(hex.EncodeToString(req.remoteID)), graph.NewEdgeID(linkChan.String()), ) if err != nil { hswcLog.Errorf("unable to remove channel from "+ "routing table: %v", err) } } // TODO(roasbeef): clean up/modify onion links // * just have the interfaces index be keyed on hash160? if len(links) == 0 { hswcLog.Infof("interface %v has no active links, destroying", hex.EncodeToString(chanInterface[:])) h.interfaceMtx.Lock() delete(h.interfaces, chanInterface) h.interfaceMtx.Unlock() } if req.done != nil { req.done <- struct{}{} } }