func TestSendPings(t *testing.T) { d := newDefaultDaemon() defer shutdown(d) c := gnetConnection(addr) go d.Pool.Pool.ConnectionWriteLoop(c) d.Pool.Pool.Pool[1] = c assert.NotPanics(t, d.Pool.sendPings) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 1) if len(d.Pool.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-d.Pool.Pool.SendResults assert.Equal(t, sr.Connection, c) assert.Nil(t, sr.Error) _, ok := sr.Message.(*PingMessage) assert.True(t, ok) assert.False(t, c.LastSent.IsZero()) // No pings should be sent, since we just pinged lastSent := c.LastSent assert.NotPanics(t, d.Pool.sendPings) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.Equal(t, c.LastSent, lastSent) }
func TestCacheExtractorRemoteServer(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(testServeCache)) defer ts.Close() defer os.Remove(cacheExtractorArchive) defer os.Remove(cacheExtractorTestArchivedFile) os.Remove(cacheExtractorArchive) os.Remove(cacheExtractorTestArchivedFile) helpers.MakeFatalToPanic() cmd := CacheExtractorCommand{ File: cacheExtractorArchive, URL: ts.URL + "/cache.zip", } assert.NotPanics(t, func() { cmd.Execute(nil) }) _, err := os.Stat(cacheExtractorTestArchivedFile) assert.NoError(t, err) os.Chtimes(cacheExtractorArchive, time.Now().Add(time.Hour), time.Now().Add(time.Hour)) assert.NotPanics(t, func() { cmd.Execute(nil) }, "archive is up to date") }
// TestMiddleware runs func TestMiddleware(t *testing.T) { assert := assert.New(t) mw := Middleware{} mw.Func = func() { } assert.Panics(func() { mw.HandlerFunc() }) ProdModeClosure(func() { assert.NotPanics(func() { mw.HandlerFunc() }) }) mw.Func = func(c *gin.Context) { } assert.IsType(*new(gin.HandlerFunc), mw.HandlerFunc()) mw.Func = func(c Context) { } assert.IsType(*new(gin.HandlerFunc), mw.HandlerFunc()) assert.NotPanics(func() { mw.HandlerFunc()(nil) }) mw = LoggerMiddleware() assert.IsType(*new(gin.HandlerFunc), mw.HandlerFunc()) }
func TestVisorShutdown(t *testing.T) { defer cleanupVisor() c, _ := setupVisor() c.Disabled = true c.Config.BlockchainFile = testBlockchainFile c.Config.BlockSigsFile = testBlocksigsFile c.Config.WalletFile = testWalletFile v := NewVisor(c) assert.NotPanics(t, v.Shutdown) // Should not save anything assertFileNotExists(t, testBlockchainFile) assertFileNotExists(t, testBlocksigsFile) assertFileNotExists(t, testWalletFile) cleanupVisor() c.Disabled = false v = NewVisor(c) assert.NotPanics(t, v.Shutdown) assertFileExists(t, testBlockchainFile) assertFileExists(t, testBlocksigsFile) assertFileExists(t, testWalletFile) cleanupVisor() // If master, no wallet should be saved c = setupMasterVisor() c.Config.BlockchainFile = testBlockchainFile c.Config.BlockSigsFile = testBlocksigsFile c.Config.WalletFile = testWalletFile v = NewVisor(c) assert.NotPanics(t, v.Shutdown) assertFileExists(t, testBlockchainFile) assertFileExists(t, testBlocksigsFile) assertFileNotExists(t, testWalletFile) cleanupVisor() }
func TestConnectToRandomPeer(t *testing.T) { d := newDefaultDaemon() defer shutdown(d) defer cleanupPeers() // Disabled d.Config.DisableOutgoingConnections = true assert.NotPanics(t, d.connectToRandomPeer) assert.Equal(t, len(d.pendingConnections), 0) // Enabled, but only private peers addr := "192.168.1.196:30954" p, err := d.Peers.Peers.AddPeer(addr) assert.Nil(t, err) p.Private = true d.Config.DisableOutgoingConnections = false assert.NotPanics(t, d.connectToRandomPeer) assert.Equal(t, len(d.pendingConnections), 0) // Enabled, and we have a public peer p.Private = false assert.NotPanics(t, d.connectToRandomPeer) assert.Equal(t, len(d.pendingConnections), 1) assert.NotNil(t, d.pendingConnections[addr]) }
func TestAuthenticationSetUp(t *testing.T) { assert.Panics(t, func() { Authentication([]byte("")) }) assert.NotPanics(t, func() { Authentication(TEST_AUTH_KEY) }) validTokenText, err := mockLoginToken("10", time.Now().Add(10*time.Second), TEST_AUTH_KEY) assert.NoError(t, err) handler := Authentication(TEST_AUTH_KEY) // Run the handler with a valid token c := createGinContext(validTokenText) assert.NotPanics(t, func() { handler(c) }) assert.Empty(t, c.Errors) assert.Equal(t, "10", c.MustGet(AuthUserIDKey)) assert.IsType(t, &jwt.Token{}, c.MustGet(AuthTokenKey)) // Run the handler with invalid token // There is no easy way to create a gin writer to properly detect response // Go will panic when Authentication tries to add error to an empty Writer object // So we can assume that the validation failed and user is not allowed through :) c = createGinContext("RANDOM TOKEN TEXT") assert.Panics(t, func() { handler(c) }) }
func TestVisorBroadcastBlock(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() vc, _ := setupVisor() vc.Disabled = true v := NewVisor(vc) // Should not send anything if disabled assert.NotPanics(t, func() { v.broadcastBlock(visor.SignedBlock{}, p) }) assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // Sending gc.Conn = NewDummyConn(addr) vc.Disabled = false v = NewVisor(vc) sb := v.Visor.GetGenesisBlock() assert.NotPanics(t, func() { v.broadcastBlock(sb, p) }) go p.Pool.ConnectionWriteLoop(gc) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Equal(t, sr.Connection, gc) _, ok := sr.Message.(*GiveBlocksMessage) assert.True(t, ok) assert.Nil(t, sr.Error) assert.False(t, gc.LastSent.IsZero()) }
func TestVisorBroadcastTransaction(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() go p.Pool.ConnectionWriteLoop(gc) vc, _ := setupVisor() vc.Disabled = true v := NewVisor(vc) ut := createUnconfirmedTxn() assert.NotPanics(t, func() { v.broadcastTransaction(ut.Txn, p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // Sending vc.Disabled = false gc.Conn = NewDummyConn(addr) v = NewVisor(vc) assert.NotPanics(t, func() { v.broadcastTransaction(ut.Txn, p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, gc) _, ok := sr.Message.(*GiveTxnsMessage) assert.True(t, ok) assert.False(t, gc.LastSent.IsZero()) }
func TestInitPeers(t *testing.T) { defer cleanupPeers() c := NewPeersConfig() peers := NewPeers(c) // Write dummy peer db fn := "./" + pex.PeerDatabaseFilename cleanupPeers() f, err := os.Create(fn) assert.Nil(t, err) if err != nil { t.Fatalf("Error creating %s", fn) } _, err = f.Write([]byte(addr + " 0 0\n")) assert.Nil(t, err) f.Close() peers.Config.DataDirectory = "./" assert.NotPanics(t, func() { peers.Init() }) assert.Equal(t, len(peers.Peers.Peerlist), 1) assert.NotNil(t, peers.Peers.Peerlist[addr]) assert.False(t, peers.Peers.AllowLocalhost) peers.Config.AllowLocalhost = true assert.NotPanics(t, func() { peers.Init() }) assert.True(t, peers.Peers.AllowLocalhost) }
func TestVisorAnnounceBlocks(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() vc, _ := setupVisor() go p.Pool.ConnectionWriteLoop(gc) // Disabled vc.Disabled = true v := NewVisor(vc) assert.NotPanics(t, func() { v.AnnounceBlocks(p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // Valid send vc.Disabled = false gc.Conn = NewDummyConn(addr) v = NewVisor(vc) assert.False(t, v.Config.Disabled) assert.NotPanics(t, func() { v.AnnounceBlocks(p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, gc) _, ok := sr.Message.(*AnnounceBlocksMessage) assert.True(t, ok) assert.False(t, gc.LastSent.IsZero()) }
func TestOnDisconnect(t *testing.T) { gnet.EraseMessages() d := newDefaultDaemon() c := gnetConnection(addr) var mirror uint32 = 100 // Not blacklistable reason := gnet.DisconnectWriteFailed setupTestOnDisconnect(d, c, mirror) assert.NotPanics(t, func() { d.onGnetDisconnect(c, reason) }) // Should not be in blacklist assert.Equal(t, len(d.Peers.Peers.Blacklist), 0) // Should no longer be in OutgoingConnections assert.Equal(t, len(d.OutgoingConnections), 0) // Should no longer be in d.ExpectingIntroductions assert.Equal(t, len(d.ExpectingIntroductions), 0) // Should be removed from the mirror, and the mirror dict for this ip // should be removed assert.Equal(t, len(d.mirrorConnections), 0) assert.Equal(t, len(d.ConnectionMirrors), 0) // Blacklistable reason = DisconnectIntroductionTimeout setupTestOnDisconnect(d, c, mirror) assert.NotPanics(t, func() { d.onGnetDisconnect(c, reason) }) assert.Equal(t, len(d.Peers.Peers.Blacklist), 1) assert.NotNil(t, d.Peers.Peers.Blacklist[addr]) // Should be in blacklist assert.Equal(t, len(d.Peers.Peers.Blacklist), 1) assert.NotNil(t, d.Peers.Peers.Blacklist[addr]) // Should no longer be in OutgoingConnections assert.Equal(t, len(d.OutgoingConnections), 0) // Should no longer be in d.ExpectingIntroductions assert.Equal(t, len(d.ExpectingIntroductions), 0) // Should be removed from the mirror, and the mirror dict for this ip // should be removed assert.Equal(t, len(d.mirrorConnections), 0) assert.Equal(t, len(d.ConnectionMirrors), 0) // Cleanup delete(d.Peers.Peers.Blacklist, addr) // d.mirrorConnections should retain a submap if there are other ports // inside reason = gnet.DisconnectWriteFailed setupTestOnDisconnect(d, c, mirror) d.mirrorConnections[mirror][strings.Split(addrb, ":")[0]] = addrPort assert.NotPanics(t, func() { d.onGnetDisconnect(c, reason) }) // Should not be in blacklist assert.Equal(t, len(d.Peers.Peers.Blacklist), 0) // Should no longer be in OutgoingConnections assert.Equal(t, len(d.OutgoingConnections), 0) // Should no longer be in d.ExpectingIntroductions assert.Equal(t, len(d.ExpectingIntroductions), 0) // Should be removed from the mirror, and the mirror dict for this ip // should be removed assert.Equal(t, len(d.mirrorConnections), 1) assert.Equal(t, len(d.mirrorConnections[mirror]), 1) assert.Equal(t, len(d.ConnectionMirrors), 0) shutdown(d) }
func TestGetPeersMessage(t *testing.T) { d := newDefaultDaemon() defer shutdown(d) p := d.Pool m := NewGetPeersMessage() testSimpleMessageHandler(t, d, m) d.Peers.Peers.AddPeer(addr) q, err := d.Peers.Peers.AddPeer(addrb) assert.Nil(t, err) q.Private = true d.Peers.Config.ReplyCount = 100 m.c = messageContext(addr) // Peers disabled d.Peers.Config.Disabled = true assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) // Peers enabled d.Peers.Config.Disabled = false m.c = messageContext(addr) defer m.c.Conn.Close() go p.Pool.ConnectionWriteLoop(m.c.Conn) assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, m.c.Conn) msg, ok := sr.Message.(*GivePeersMessage) assert.True(t, ok) // Private peer should not be included ipAddr, err := NewIPAddr(addr) assert.Nil(t, err) assert.Equal(t, msg.Peers, []IPAddr{ipAddr}) assert.False(t, m.c.Conn.LastSent.IsZero()) // If no peers, nothing should happen m.c.Conn.LastSent = util.ZeroTime() delete(d.Peers.Peers.Peerlist, addr) assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) // Test serialization m = NewGetPeersMessage() b := encoder.Serialize(m) m2 := GetPeersMessage{} assert.Nil(t, encoder.DeserializeRaw(b, &m2)) assert.Equal(t, *m, m2) gnet.EraseMessages() }
func TestMustVersionByName(t *testing.T) { assert.Panics(t, func() { SetAddressVersion("bad") }) for k, _ := range addressVersions { assert.NotPanics(t, func() { SetAddressVersion(k) }) } assert.NotPanics(t, func() { SetAddressVersion("main") }) assert.NotPanics(t, func() { SetAddressVersion("test") }) }
func TestSingleUse(t *testing.T) { var s singleUse assert.NotPanics(t, func() { s.ensureUnused("foo") }) assert.NotPanics(t, func() { s.use("foo") }) assert.Panics(t, func() { s.ensureUnused("foo") }) assert.Panics(t, func() { s.use("foo") }) }
func TestVisorSpend(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() go p.Pool.ConnectionWriteLoop(gc) vc, mv := setupVisor() vc.Disabled = true v := NewVisor(vc) // Spending while disabled assert.NotPanics(t, func() { _, err := v.Spend(visor.Balance{10e6, 0}, 0, mv.Wallet.GetAddresses()[0], p) assert.NotNil(t, err) assert.Equal(t, err.Error(), "Visor disabled") wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) }) // Spending but spend fails (no money) vc.Disabled = false v = NewVisor(vc) assert.NotPanics(t, func() { _, err := v.Spend(visor.Balance{1000 * 10e6, 0}, 0, mv.Wallet.GetAddresses()[0], p) wait() assert.NotNil(t, err) assert.Equal(t, len(p.Pool.SendResults), 0) assert.Equal(t, len(v.Visor.Unconfirmed.Txns), 0) assert.True(t, gc.LastSent.IsZero()) }) // Spending succeeds, and announced vc, mv = setupVisor() vc.Disabled = false gc.Conn = NewDummyConn(addr) v = NewVisor(vc) assert.Nil(t, transferCoins(mv, v.Visor)) assert.NotPanics(t, func() { _, err := v.Spend(visor.Balance{10e6, 0}, 0, mv.Wallet.GetAddresses()[0], p) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Equal(t, sr.Connection, gc) assert.Nil(t, sr.Error) _, ok := sr.Message.(*GiveTxnsMessage) assert.True(t, ok) assert.Nil(t, err) assert.Equal(t, len(v.Visor.Unconfirmed.Txns), 1) assert.False(t, gc.LastSent.IsZero()) }) }
func TestRecover(t *testing.T) { var err error // Nil type, should recover assert.NotPanics(t, func() { defer Recover(&err) panic(nil) }) assert.Equal(t, nil, err) // Error type, should recover assert.NotPanics(t, func() { defer Recover(&err) panic(io.EOF) }) assert.Equal(t, io.EOF, err) // Non error type, should panic assert.Panics(t, func() { defer Recover(&err) panic(5) }) // Runtime error, should panic assert.Panics(t, func() { defer Recover(&err) var s string = "abc" _ = s[100] }) // Runtime error, should panic assert.Panics(t, func() { defer Recover(&err) var s interface{} = "abc" _ = s.(int) }) // Runtime error, should panic assert.Panics(t, func() { defer Recover(&err) cnt := -1 _ = make([]byte, cnt) }) // Non-error, should panic assert.Panics(t, func() { defer Recover(&err) panic((*int)(nil)) }) // Non-error, should panic assert.Panics(t, func() { defer Recover(&err) panic(([]byte)(nil)) }) }
func TestRecordBlockchainLength(t *testing.T) { defer cleanupVisor() vc, _ := setupVisor() v := NewVisor(vc) assert.NotPanics(t, func() { v.recordBlockchainLength(addr, uint64(6)) }) assert.Equal(t, v.blockchainLengths[addr], uint64(6)) v.blockchainLengths[addr] = uint64(7) assert.NotPanics(t, func() { v.recordBlockchainLength(addr, uint64(5)) }) assert.Equal(t, v.blockchainLengths[addr], uint64(5)) }
func TestContextRenderRedirectAll(t *testing.T) { c, _, _ := CreateTestContext() c.Request, _ = http.NewRequest("POST", "http://example.com", nil) assert.Panics(t, func() { c.Redirect(200, "/resource") }) assert.Panics(t, func() { c.Redirect(202, "/resource") }) assert.Panics(t, func() { c.Redirect(299, "/resource") }) assert.Panics(t, func() { c.Redirect(309, "/resource") }) assert.NotPanics(t, func() { c.Redirect(300, "/resource") }) assert.NotPanics(t, func() { c.Redirect(308, "/resource") }) }
func TestConfigPreprocess(t *testing.T) { c := NewConfig() a := "127.0.0.1" p := 7779 // Test that addr, port are copied to subconfigs c.Daemon.Port = p c.Daemon.Address = a d := c.preprocess() assert.Equal(t, d.Pool.port, p) assert.Equal(t, d.Pool.address, a) assert.Equal(t, d.DHT.port, p) // Test localhost only with localhost addr c = NewConfig() c.Daemon.LocalhostOnly = true c.Daemon.Address = a assert.NotPanics(t, func() { c.preprocess() }) d = c.preprocess() assert.True(t, d.DHT.Disabled) assert.Equal(t, d.Pool.address, a) assert.True(t, d.Peers.AllowLocalhost) // Test localhost only with unassigned addr c = NewConfig() c.Daemon.LocalhostOnly = true c.Daemon.Address = "" assert.NotPanics(t, func() { c.preprocess() }) d = c.preprocess() assert.True(t, IsLocalhost(d.Daemon.Address)) assert.True(t, IsLocalhost(d.Pool.address)) assert.True(t, d.Peers.AllowLocalhost) // Test localhost only with nonlocal addr c = NewConfig() c.Daemon.LocalhostOnly = true c.Daemon.Address = "11.22.33.44" assert.Panics(t, func() { c.preprocess() }) // Test disable networking disables all c = NewConfig() c.Daemon.DisableNetworking = true d = c.preprocess() assert.True(t, d.Daemon.DisableNetworking) assert.True(t, d.Daemon.DisableOutgoingConnections) assert.True(t, d.Daemon.DisableIncomingConnections) assert.True(t, d.DHT.Disabled) assert.True(t, d.Peers.Disabled) // Test coverage for logging statements c = NewConfig() c.Daemon.DisableNetworking = false c.Daemon.DisableIncomingConnections = true c.Daemon.DisableOutgoingConnections = true assert.NotPanics(t, func() { c.preprocess() }) }
// Should be able to close repeatedly without panic. func TestTarFileIteratorClose(t *testing.T) { _, filename, _, _ := runtime.Caller(0) tarFilePath, _ := filepath.Abs(path.Join(filepath.Dir(filename), "..", "..", "testdata", "unit_test_bags", "example.edu.tagsample_good.tar")) tfi, _ := fileutil.NewTarFileIterator(tarFilePath) if tfi == nil { assert.Fail(t, "Could not get TarFileIterator") } assert.NotPanics(t, tfi.Close, "TarFileIterator.Close() freaked out") assert.NotPanics(t, tfi.Close, "TarFileIterator.Close() freaked out") }
func TestRequestDHTPeers(t *testing.T) { d := NewDHT(NewDHTConfig()) assert.Panics(t, d.RequestPeers) e := d.Init() assert.Nil(t, e) assert.NotPanics(t, d.RequestPeers) wait() d.Config.Disabled = true assert.NotPanics(t, d.RequestPeers) wait() d.Shutdown() wait() }
func TestVisorBroadcastOurTransactions(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() vc, _ := setupVisor() go p.Pool.ConnectionWriteLoop(gc) // Disabled vc.Disabled = true v := NewVisor(vc) assert.NotPanics(t, func() { v.BroadcastOurTransactions(p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // With no transactions, nothing should be sent vc.Disabled = false vc.TransactionRebroadcastRate = time.Millisecond * 5 v = NewVisor(vc) time.Sleep(time.Millisecond * 20) assert.NotPanics(t, func() { v.BroadcastOurTransactions(p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // We have a stale owned unconfirmed txn, it should be sent gc.Conn = NewDummyConn(addr) vc.Disabled = false v = NewVisor(vc) tx := addUnconfirmedTxn(v) assert.Equal(t, len(v.Visor.Unconfirmed.Txns), 1) assert.NotPanics(t, func() { v.BroadcastOurTransactions(p) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, gc) msg, ok := sr.Message.(*AnnounceTxnsMessage) assert.True(t, ok) assert.Equal(t, msg.Txns, coin.Transactions{tx.Txn}.Hashes()) assert.False(t, gc.LastSent.IsZero()) }
func TestAnnounceBlocksMessageProcess(t *testing.T) { v, mv := setupVisor() d, _ := newVisorDaemon(v) defer shutdown(d) p := d.Pool gc := setupExistingPool(p) go p.Pool.ConnectionWriteLoop(gc) defer gc.Close() assert.Nil(t, transferCoins(mv, d.Visor.Visor)) assert.Equal(t, d.Visor.Visor.MostRecentBkSeq(), uint64(1)) // Disabled, nothing should happen d.Visor.Config.Disabled = true m := NewAnnounceBlocksMessage(uint64(2)) m.c = messageContext(addr) defer m.c.Conn.Close() go p.Pool.ConnectionWriteLoop(m.c.Conn) assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) assert.True(t, gc.LastSent.IsZero()) // We know all the blocks d.Visor.Config.Disabled = false m.MaxBkSeq = uint64(1) assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) assert.True(t, gc.LastSent.IsZero()) // We send a GetBlocksMessage in response to a higher MaxBkSeq m.MaxBkSeq = uint64(7) assert.False(t, d.Visor.Visor.MostRecentBkSeq() >= m.MaxBkSeq) assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, m.c.Conn) _, ok := sr.Message.(*GetBlocksMessage) assert.True(t, ok) assert.False(t, m.c.Conn.LastSent.IsZero()) assert.True(t, gc.LastSent.IsZero()) }
func TestAnnounceTxnsMessageProcess(t *testing.T) { v, _ := setupVisor() d, _ := newVisorDaemon(v) defer shutdown(d) gc := setupExistingPool(d.Pool) go d.Pool.Pool.ConnectionWriteLoop(gc) tx := createUnconfirmedTxn() txns := []coin.SHA256{tx.Txn.Hash()} m := NewAnnounceTxnsMessage(txns) m.c = messageContext(addr) go d.Pool.Pool.ConnectionWriteLoop(m.c.Conn) defer m.c.Conn.Close() // Disabled, nothing should happen d.Visor.Config.Disabled = true assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) assert.True(t, gc.LastSent.IsZero()) // We don't know some, request them d.Visor.Config.Disabled = false assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 1) if len(d.Pool.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-d.Pool.Pool.SendResults assert.Equal(t, sr.Connection, m.c.Conn) assert.Nil(t, sr.Error) _, ok := sr.Message.(*GetTxnsMessage) assert.True(t, ok) assert.False(t, m.c.Conn.LastSent.IsZero()) // Should not have been broadcast assert.True(t, gc.LastSent.IsZero()) // We know all the reported txns, nothing should be sent d.Visor.Visor.Unconfirmed.Txns[tx.Txn.Hash()] = tx m.c.Conn.Conn = NewDummyConn(addr) m.c.Conn.LastSent = util.ZeroTime() assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.True(t, m.c.Conn.LastSent.IsZero()) assert.True(t, gc.LastSent.IsZero()) }
func TestRemoveConnectionMirror(t *testing.T) { d := newDefaultDaemon() // No recorded addr should be noop assert.Equal(t, len(d.ConnectionMirrors), 0) assert.Equal(t, len(d.mirrorConnections), 0) assert.NotPanics(t, func() { d.removeConnectionMirror(addr) }) assert.Equal(t, len(d.ConnectionMirrors), 0) assert.Equal(t, len(d.mirrorConnections), 0) // Invalid addr should be rejected d.ConnectionMirrors["xcasca"] = d.Messages.Mirror d.mirrorConnections[d.Messages.Mirror] = make(map[string]uint16) d.mirrorConnections[d.Messages.Mirror]["xcasca"] = 0 assert.NotPanics(t, func() { d.removeConnectionMirror("xcasca") }) assert.Equal(t, len(d.ConnectionMirrors), 1) assert.Equal(t, len(d.mirrorConnections), 1) assert.Equal(t, len(d.mirrorConnections[d.Messages.Mirror]), 1) delete(d.mirrorConnections, d.Messages.Mirror) delete(d.ConnectionMirrors, "xcasca") // With no connectionMirror recorded, we can't clean up the // d.mirrorConnections d.mirrorConnections[d.Messages.Mirror] = make(map[string]uint16) d.mirrorConnections[d.Messages.Mirror][addrIP] = addrPort assert.NotPanics(t, func() { d.removeConnectionMirror(addr) }) assert.Equal(t, len(d.ConnectionMirrors), 0) assert.Equal(t, len(d.mirrorConnections), 1) assert.Equal(t, len(d.mirrorConnections[d.Messages.Mirror]), 1) // Should clean up if all valid d.ConnectionMirrors[addr] = d.Messages.Mirror assert.NotPanics(t, func() { d.removeConnectionMirror(addr) }) assert.Equal(t, len(d.ConnectionMirrors), 0) assert.Equal(t, len(d.mirrorConnections), 0) // Cleaning up should leave d.mirrorConnections[addr] intact if multiple d.ConnectionMirrors[addr] = d.Messages.Mirror d.mirrorConnections[d.Messages.Mirror] = make(map[string]uint16) d.mirrorConnections[d.Messages.Mirror][addrIP] = addrPort d.mirrorConnections[d.Messages.Mirror][addrbIP] = addrbPort assert.NotPanics(t, func() { d.removeConnectionMirror(addr) }) assert.Equal(t, len(d.ConnectionMirrors), 0) assert.Equal(t, len(d.mirrorConnections), 1) assert.Equal(t, d.mirrorConnections[d.Messages.Mirror][addrbIP], addrbPort) delete(d.mirrorConnections, d.Messages.Mirror) shutdown(d) }
func TestGiveTxnsMessageProcess(t *testing.T) { v, mv := setupVisor() d, _ := newVisorDaemon(v) defer shutdown(d) gc := setupExistingPool(d.Pool) go d.Pool.Pool.ConnectionWriteLoop(gc) utx := createUnconfirmedTxn() txns := coin.Transactions{utx.Txn} m := NewGiveTxnsMessage(txns) m.c = messageContext(addr) // No valid txns, nothing should be sent assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(mv.Unconfirmed.Txns), 0) assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) // Disabled, nothing should happen tx, err := makeValidTxn(mv) assert.Nil(t, err) m.Txns = coin.Transactions{tx} d.Visor.Config.Disabled = true assert.NotPanics(t, func() { m.Process(d) }) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 0) assert.Equal(t, len(mv.Unconfirmed.Txns), 0) assert.True(t, gc.LastSent.IsZero()) // A valid txn, we should broadcast. Txn's announce state should be updated d.Visor.Config.Disabled = false assert.True(t, gc.LastSent.IsZero()) assert.NotPanics(t, func() { m.Process(d) }) assert.Equal(t, len(d.Visor.Visor.Unconfirmed.Txns), 1) wait() assert.Equal(t, len(d.Pool.Pool.SendResults), 1) if len(d.Pool.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-d.Pool.Pool.SendResults assert.Equal(t, sr.Connection, gc) _, ok := sr.Message.(*AnnounceTxnsMessage) assert.True(t, ok) assert.Nil(t, err) assert.False(t, gc.LastSent.IsZero()) _, ok = d.Visor.Visor.Unconfirmed.Txns[tx.Hash()] assert.True(t, ok) }
func TestVisorRequestBlocksFromAddr(t *testing.T) { defer cleanupVisor() defer gnet.EraseMessages() p, gc := setupPool() vc, _ := setupVisor() go p.Pool.ConnectionWriteLoop(gc) // Disabled vc.Disabled = true v := NewVisor(vc) assert.NotPanics(t, func() { err := v.RequestBlocksFromAddr(p, addr) assert.NotNil(t, err) assert.Equal(t, err.Error(), "Visor disabled") }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) vc.Disabled = false v = NewVisor(vc) assert.NotPanics(t, func() { assert.Nil(t, v.RequestBlocksFromAddr(p, addr)) }) wait() assert.Equal(t, len(p.Pool.SendResults), 1) if len(p.Pool.SendResults) == 0 { t.Fatal("SendResults empty, would block") } sr := <-p.Pool.SendResults assert.Nil(t, sr.Error) assert.Equal(t, sr.Connection, gc) _, ok := sr.Message.(*GetBlocksMessage) assert.True(t, ok) assert.False(t, gc.LastSent.IsZero()) // No connection found for addr gc.LastSent = util.ZeroTime() gc.Conn = NewDummyConn(addr) delete(p.Pool.Pool, gc.Id) delete(p.Pool.Addresses, gc.Addr()) assert.NotPanics(t, func() { assert.NotNil(t, v.RequestBlocksFromAddr(p, addr)) }) wait() assert.Equal(t, len(p.Pool.SendResults), 0) assert.True(t, gc.LastSent.IsZero()) }
func TestCullInvalidConnections(t *testing.T) { d := newDefaultDaemon() // Is fine d.ExpectingIntroductions[addr] = time.Now() // Is expired d.ExpectingIntroductions[addrb] = util.ZeroTime() // Is not in pool d.ExpectingIntroductions[addrc] = util.ZeroTime() d.Peers.Peers.AddPeer(addr) d.Peers.Peers.AddPeer(addrb) d.Peers.Peers.AddPeer(addrc) d.Pool.Pool.Addresses[addr] = gnetConnection(addr) d.Pool.Pool.Addresses[addrb] = gnetConnection(addrb) d.Pool.Pool.Addresses[addrb].Id = 2 d.Pool.Pool.Pool[1] = d.Pool.Pool.Addresses[addr] d.Pool.Pool.Pool[2] = d.Pool.Pool.Addresses[addrb] assert.NotPanics(t, d.cullInvalidConnections) assert.Equal(t, len(d.ExpectingIntroductions), 1) assert.Equal(t, len(d.Peers.Peers.Peerlist), 2) assert.Equal(t, len(d.Pool.Pool.DisconnectQueue), 1) if len(d.Pool.Pool.DisconnectQueue) == 0 { t.Fatal("pool.Pool.DisconnectQueue not empty, would block") } de := <-d.Pool.Pool.DisconnectQueue assert.Equal(t, de.ConnId, 2) assert.Equal(t, de.Reason, DisconnectIntroductionTimeout) shutdown(d) }
func (s *ParseVcfLineSuite) TestAlternateFormatOptionalField() { var result []*Variant var err error assert.NotPanics(s.T(), func() { result, err = parseVcfLine("1\t847491\trs28407778\tG\tA\t745.77\tPASS\tSB=strong;AA\tGT:AD:DP:GQ:PL\t0/1:16,25:41:99:774,0,434", defaultHeader) }) assert.NoError(s.T(), err, "Valid VCF line should not return error") assert.NotNil(s.T(), result, "Valid VCF line should not return nil") info := result[0].Info assert.NotNil(s.T(), info, "Valid VCF should contain info map") assert.Exactly(s.T(), len(info), 2, "Info should contain 2 keys") sb, ok := info["SB"] assert.True(s.T(), ok, "SB key must be found") assert.Equal(s.T(), sb, "strong") aa, ok := info["AA"] assert.True(s.T(), ok, "AA key must be found") boolaa, isbool := aa.(bool) assert.True(s.T(), isbool, "AA value must be a boolean") assert.True(s.T(), boolaa) }
func TestVerifyMessages(t *testing.T) { // VerifyMessages either no-ops or panics. Make sure it doesnt panic EraseMessages() RegisterMessage(DummyPrefix, DummyMessage{}) RegisterMessage(ErrorPrefix, ErrorMessage{}) assert.NotPanics(t, VerifyMessages) }