func Test_MessagePartition_forConcurrentWriteAndReads(t *testing.T) { testutil.SkipIfShort(t) // testutil.PprofDebug() a := assert.New(t) dir, _ := ioutil.TempDir("", "guble_partition_store_test") defer os.RemoveAll(dir) store, _ := newMessagePartition(dir, "myMessages") n := 2000 * 100 nReaders := 7 writerDone := make(chan bool) go messagePartitionWriter(a, store, n, writerDone) readerDone := make(chan bool) for i := 1; i <= nReaders; i++ { go messagePartitionReader("reader"+strconv.Itoa(i), a, store, n, readerDone) } select { case <-writerDone: case <-time.After(time.Second * 30): a.Fail("writer timed out") } timeout := time.After(time.Second * 30) for i := 0; i < nReaders; i++ { select { case <-readerDone: case <-timeout: a.Fail("reader timed out") } } }
func Test_NoReceiving_After_Unsubscribe(t *testing.T) { testutil.SkipIfDisabled(t) testutil.SkipIfShort(t) a := assert.New(t) node1 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8086", NodeID: 1, NodePort: 20006, Remotes: "localhost:20006", }) a.NotNil(node1) defer node1.cleanup(true) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8087", NodeID: 2, NodePort: 20007, Remotes: "localhost:20006", }) a.NotNil(node2) defer node2.cleanup(true) node1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) node2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) // subscribe on first node node1.Subscribe(testTopic, "1") time.Sleep(50 * time.Millisecond) // connect a client and send a message client1, err := node1.client("user1", 1000, true) err = client1.Send(testTopic, "body", "{jsonHeader:1}") a.NoError(err) // only one message should be received but only on the first node. // Every message should be delivered only once. node1.FCM.checkReceived(1) node2.FCM.checkReceived(0) // Unsubscribe node2.Unsubscribe(testTopic, "1") time.Sleep(50 * time.Millisecond) // reset the counter node1.FCM.reset() // and send a message again. No one should receive it err = client1.Send(testTopic, "body", "{jsonHeader:1}") a.NoError(err) // only one message should be received but only on the second node. // Every message should be delivered only once. node1.FCM.checkReceived(0) node2.FCM.checkReceived(0) }
func Test_Subscribe_on_random_node(t *testing.T) { testutil.SkipIfShort(t) a := assert.New(t) node1 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8080", NodeID: 1, NodePort: 20000, Remotes: "localhost:20000", }) a.NotNil(node1) defer node1.cleanup(true) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8081", NodeID: 2, NodePort: 20001, Remotes: "localhost:20000", }) a.NotNil(node2) defer node2.cleanup(true) node1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) node2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) // subscribe on first node node1.Subscribe(testTopic, "1") // connect a client and send a message client1, err := node1.client("user1", 1000, true) a.NoError(err) err = client1.Send(testTopic, "body", "{jsonHeader:1}") a.NoError(err) // only one message should be received but only on the first node. // Every message should be delivered only once. node1.FCM.checkReceived(1) node2.FCM.checkReceived(0) }
func Test_Cluster_Subscribe_To_Random_Node(t *testing.T) { testutil.SkipIfShort(t) a := assert.New(t) node1 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8090", NodeID: 1, NodePort: 11000, Remotes: "localhost:11000", }) a.NotNil(node1) defer node1.cleanup(true) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8091", NodeID: 2, NodePort: 11001, Remotes: "localhost:11000", }) a.NotNil(node2) defer node2.cleanup(true) client1, err := node1.client("user1", 10, true) a.NoError(err) err = client1.Subscribe("/foo/bar") a.NoError(err, "Subscribe to first node should work") client1.Close() time.Sleep(50 * time.Millisecond) client1, err = node2.client("user1", 10, true) a.NoError(err, "Connection to second node should return no error") err = client1.Subscribe("/foo/bar") a.NoError(err, "Subscribe to second node should work") client1.Close() }
func Test_Cluster_Integration(t *testing.T) { testutil.SkipIfShort(t) defer testutil.ResetDefaultRegistryHealthCheck() a := assert.New(t) node1 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8092", NodeID: 1, NodePort: 11002, Remotes: "localhost:11002", }) a.NotNil(node1) defer node1.cleanup(true) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8093", NodeID: 2, NodePort: 11003, Remotes: "localhost:11002", }) a.NotNil(node2) defer node2.cleanup(true) client1, err := node1.client("user1", 10, false) a.NoError(err) client2, err := node2.client("user2", 10, false) a.NoError(err) err = client2.Subscribe("/testTopic/m") a.NoError(err) client3, err := node1.client("user3", 10, false) a.NoError(err) numSent := 3 for i := 0; i < numSent; i++ { err := client1.Send("/testTopic/m", "body", "{jsonHeader:1}") a.NoError(err) err = client3.Send("/testTopic/m", "body", "{jsonHeader:4}") a.NoError(err) } breakTimer := time.After(3 * time.Second) numReceived := 0 idReceived := make(map[uint64]bool) // see if the correct number of messages arrived at the other client, before timeout is reached WAIT: for { select { case incomingMessage := <-client2.Messages(): numReceived++ logger.WithFields(log.Fields{ "nodeID": incomingMessage.NodeID, "path": incomingMessage.Path, "incomingMsgUserId": incomingMessage.UserID, "headerJson": incomingMessage.HeaderJSON, "body": incomingMessage.BodyAsString(), "numReceived": numReceived, }).Info("Client2 received a message") a.Equal(protocol.Path("/testTopic/m"), incomingMessage.Path) a.Equal("body", incomingMessage.BodyAsString()) a.True(incomingMessage.ID > 0) idReceived[incomingMessage.ID] = true if 2*numReceived == numSent { break WAIT } case <-breakTimer: break WAIT } } }
// Test synchronizing messages when a new node is func TestSynchronizerIntegration(t *testing.T) { testutil.SkipIfShort(t) defer testutil.EnableDebugForMethod()() a := assert.New(t) node1 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8094", NodeID: 1, NodePort: 11004, Remotes: "localhost:11004", }) a.NotNil(node1) defer node1.cleanup(true) time.Sleep(2 * time.Second) client1, err := node1.client("client1", 10, true) a.NoError(err) client1.Send(syncTopic, "nobody", "") client1.Send(syncTopic, "nobody", "") client1.Send(syncTopic, "nobody", "") time.Sleep(2 * time.Second) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8095", NodeID: 2, NodePort: 11005, Remotes: "localhost:11004", }) a.NotNil(node2) defer node2.cleanup(true) client2, err := node2.client("client2", 10, true) a.NoError(err) cmd := &protocol.Cmd{ Name: protocol.CmdReceive, Arg: syncTopic + " -3", } doneC := make(chan struct{}) go func() { for { select { case m := <-client2.Messages(): log.WithField("m", m).Error("Message received from first cluster") case e := <-client2.Errors(): log.WithField("clientError", e).Error("Client error") case status := <-client2.StatusMessages(): log.WithField("status", status).Error("Client status messasge") case <-doneC: return } } }() log.Error(string(cmd.Bytes())) client2.WriteRawMessage(cmd.Bytes()) time.Sleep(10 * time.Second) close(doneC) }
func Test_Subscribe_working_After_Node_Restart(t *testing.T) { // defer testutil.EnableDebugForMethod()() testutil.SkipIfDisabled(t) testutil.SkipIfShort(t) a := assert.New(t) nodeConfig1 := testClusterNodeConfig{ HttpListen: "localhost:8082", NodeID: 1, NodePort: 20002, Remotes: "localhost:20002", } node1 := newTestClusterNode(t, nodeConfig1) a.NotNil(node1) node2 := newTestClusterNode(t, testClusterNodeConfig{ HttpListen: "localhost:8083", NodeID: 2, NodePort: 20003, Remotes: "localhost:20002", }) a.NotNil(node2) defer node2.cleanup(true) node1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) node2.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) // subscribe on first node node1.Subscribe(testTopic, "1") // connect a clinet and send a message client1, err := node1.client("user1", 1000, true) a.NoError(err) err = client1.Send(testTopic, "body", "{jsonHeader:1}") a.NoError(err) // one message should be received but only on the first node. // Every message should be delivered only once. node1.FCM.checkReceived(1) node2.FCM.checkReceived(0) // stop a node, cleanup without removing directories node1.cleanup(false) time.Sleep(time.Millisecond * 150) // restart the service restartedNode1 := newTestClusterNode(t, nodeConfig1) a.NotNil(restartedNode1) defer restartedNode1.cleanup(true) restartedNode1.FCM.setupRoundTripper(20*time.Millisecond, 10, fcm.SuccessFCMResponse) // send a message to the former subscription. client1, err = restartedNode1.client("user1", 1000, true) a.NoError(err) time.Sleep(time.Second) err = client1.Send(testTopic, "body", "{jsonHeader:1}") a.NoError(err, "Subscription should work even after node restart") // only one message should be received but only on the first node. // Every message should be delivered only once. restartedNode1.FCM.checkReceived(1) node2.FCM.checkReceived(0) }
func TestThroughput(t *testing.T) { // TODO: We disabled this test because the receiver implementation of fetching messages // should be reimplemented according to the new message store testutil.SkipIfDisabled(t) testutil.SkipIfShort(t) defer testutil.ResetDefaultRegistryHealthCheck() dir, _ := ioutil.TempDir("", "guble_benchmarking_test") *Config.HttpListen = "localhost:0" *Config.KVS = "memory" *Config.MS = "file" *Config.StoragePath = dir service := StartService() testgroupCount := 4 messagesPerGroup := 100 log.Printf("init the %v testgroups", testgroupCount) testgroups := make([]*testgroup, testgroupCount, testgroupCount) for i := range testgroups { testgroups[i] = newTestgroup(t, i, service.WebServer().GetAddr(), messagesPerGroup) } // init test log.Print("init the testgroups") for i := range testgroups { testgroups[i].Init() } defer func() { // cleanup tests log.Print("cleanup the testgroups") for i := range testgroups { testgroups[i].Clean() } service.Stop() os.RemoveAll(dir) }() // start test log.Print("start the testgroups") start := time.Now() for i := range testgroups { go testgroups[i].Start() } log.Print("wait for finishing") for i, test := range testgroups { select { case successFlag := <-test.done: if !successFlag { t.Logf("testgroup %v returned with error", i) t.FailNow() return } case <-time.After(time.Second * 20): t.Log("timeout. testgroups not ready before timeout") t.Fail() return } } end := time.Now() totalMessages := testgroupCount * messagesPerGroup throughput := float64(totalMessages) / end.Sub(start).Seconds() log.Printf("finished! Throughput: %v/sec (%v message in %v)", int(throughput), totalMessages, end.Sub(start)) time.Sleep(time.Second * 1) }