func TestHTTPmpubEmpty(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_mpub_empty" + strconv.Itoa(int(time.Now().Unix())) nsqd.GetTopicIgnPart(topicName) msg := []byte("test message") msgs := make([][]byte, 4) for i := range msgs { msgs[i] = msg } buf := bytes.NewBuffer(bytes.Join(msgs, []byte("\n"))) _, err := buf.Write([]byte("\n")) test.Equal(t, err, nil) url := fmt.Sprintf("http://%s/mpub?topic=%s", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, string(body), "OK") time.Sleep(5 * time.Millisecond) }
func TestTLSRequireVerifyExceptHTTP(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) opts.LogLevel = 3 opts.TLSCert = "./test/certs/server.pem" opts.TLSKey = "./test/certs/server.key" opts.TLSRootCAFile = "./test/certs/ca.pem" opts.TLSClientAuthPolicy = "require-verify" opts.TLSRequired = TLSRequiredExceptHTTP _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_req_verf_except_http" + strconv.Itoa(int(time.Now().Unix())) nsqd.GetTopicIgnPart(topicName) // no cert buf := bytes.NewBuffer([]byte("test message")) url := fmt.Sprintf("http://%s/pub?topic=%s", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, string(body), "OK") time.Sleep(5 * time.Millisecond) }
func TestStats(t *testing.T) { opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) tcpAddr, _, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_stats" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopicIgnPart(topicName) msg := nsqdNs.NewMessage(0, []byte("test body")) topic.PutMessage(msg) conn, err := mustConnectNSQD(tcpAddr) test.Equal(t, err, nil) defer conn.Close() identify(t, conn, nil, frameTypeResponse) sub(t, conn, topicName, "ch") stats := nsqd.GetStats(false) t.Logf("stats: %+v", stats) test.Equal(t, len(stats), 1) test.Equal(t, len(stats[0].Channels), 1) test.Equal(t, len(stats[0].Channels[0].Clients), 1) }
func TestHTTPmpubBinary(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_mpub_bin" + strconv.Itoa(int(time.Now().Unix())) nsqd.GetTopicIgnPart(topicName) mpub := make([][]byte, 5) for i := range mpub { mpub[i] = make([]byte, 100) } cmd, _ := nsq.MultiPublish(topicName, mpub) buf := bytes.NewBuffer(cmd.Body) url := fmt.Sprintf("http://%s/mpub?topic=%s&binary=true", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, string(body), "OK") time.Sleep(5 * time.Millisecond) }
func TestNsqdRPCClient(t *testing.T) { SetCoordLogger(newTestLogger(t), 2) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) if err != nil { panic(err) } defer os.RemoveAll(tmpDir) nsqdCoord := startNsqdCoord(t, "0", tmpDir, "", nil, true) nsqdCoord.Start() defer nsqdCoord.Stop() time.Sleep(time.Second * 2) client, err := NewNsqdRpcClient(nsqdCoord.rpcServer.rpcServer.Listener.ListenAddr().String(), time.Second) test.Nil(t, err) _, err = client.CallWithRetry("TestRpcCallNotExist", "req") test.NotNil(t, err) coordErr := client.CallRpcTestCoordErr("coorderr") test.NotNil(t, coordErr) test.NotEqual(t, coordErr.ErrType, CoordNetErr) test.Equal(t, coordErr.ErrMsg, "coorderr") test.Equal(t, coordErr.ErrCode, RpcCommonErr) test.Equal(t, coordErr.ErrType, CoordCommonErr) rsp, rpcErr := client.CallRpcTest("reqdata") test.NotNil(t, rpcErr) test.Equal(t, rsp, "reqdata") test.Equal(t, rpcErr.ErrCode, RpcNoErr) test.Equal(t, rpcErr.ErrMsg, "reqdata") test.Equal(t, rpcErr.ErrType, CoordCommonErr) timeoutErr := client.CallRpcTesttimeout("reqdata") test.NotNil(t, timeoutErr) test.Equal(t, timeoutErr.(*gorpc.ClientError).Timeout, true) time.Sleep(time.Second * 3) client.Close() }
func TestHTTPgetStatusText(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, _, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() url := fmt.Sprintf("http://%s/stats?format=text", httpAddr) resp, err := http.Get(url) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 200) test.NotNil(t, body) }
func TestHTTPpub(t *testing.T) { opts := nsqd.NewOptions() opts.LogLevel = 2 opts.Logger = newTestLogger(t) //opts.Logger = &levellogger.GLogger{} tcpAddr, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_pub" + strconv.Itoa(int(time.Now().Unix())) _ = nsqd.GetTopicIgnPart(topicName) conn, err := mustConnectNSQD(tcpAddr) test.Equal(t, err, nil) identify(t, conn, nil, frameTypeResponse) sub(t, conn, topicName, "ch") buf := bytes.NewBuffer([]byte("test message")) url := fmt.Sprintf("http://%s/pub?topic=%s", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, string(body), "OK") time.Sleep(5 * time.Millisecond) _, err = nsq.Ready(1).WriteTo(conn) test.Equal(t, err, nil) // sleep to allow the RDY state to take effect time.Sleep(50 * time.Millisecond) for { resp, _ := nsq.ReadResponse(conn) frameType, data, err := nsq.UnpackResponse(resp) test.Nil(t, err) test.NotEqual(t, frameTypeError, frameType) if frameType == frameTypeResponse { t.Logf("got response data: %v", string(data)) continue } msgOut, err := nsq.DecodeMessage(data) test.Equal(t, []byte("test message"), msgOut.Body) _, err = nsq.Finish(msgOut.ID).WriteTo(conn) test.Nil(t, err) break } conn.Close() }
func TestHTTPSRequire(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) //opts.LogLevel = 2 //opts.Logger = &levellogger.GLogger{} opts.LogLevel = 3 opts.TLSCert = "./test/certs/server.pem" opts.TLSKey = "./test/certs/server.key" opts.TLSClientAuthPolicy = "require" _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_pub_req" + strconv.Itoa(int(time.Now().Unix())) nsqd.GetTopicIgnPart(topicName) buf := bytes.NewBuffer([]byte("test message")) url := fmt.Sprintf("http://%s/pub?topic=%s", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, resp.StatusCode, 403) httpsAddr := nsqdServer.httpsListener.Addr().(*net.TCPAddr) cert, err := tls.LoadX509KeyPair("./test/certs/cert.pem", "./test/certs/key.pem") test.Equal(t, err, nil) tlsConfig := &tls.Config{ Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true, MinVersion: 0, } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{Transport: transport} buf = bytes.NewBuffer([]byte("test message")) url = fmt.Sprintf("https://%s/pub?topic=%s", httpsAddr, topicName) resp, err = client.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, string(body), "OK") time.Sleep(5 * time.Millisecond) }
func TestReconfigure(t *testing.T) { lopts := nsqlookupd.NewOptions() lopts.Logger = newTestLogger(t) nsqlookupd.SetLogger(lopts) _, _, lookupd1 := mustStartNSQLookupd(lopts) defer lookupd1.Exit() _, _, lookupd2 := mustStartNSQLookupd(lopts) defer lookupd2.Exit() _, _, lookupd3 := mustStartNSQLookupd(lopts) defer lookupd3.Exit() opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() time.Sleep(50 * time.Millisecond) newOpts := *opts newOpts.NSQLookupdTCPAddresses = []string{lookupd1.RealTCPAddr().String()} nsqd.SwapOpts(&newOpts) nsqd.TriggerOptsNotification() test.Equal(t, len(nsqd.GetOpts().NSQLookupdTCPAddresses), 1) time.Sleep(50 * time.Millisecond) numLookupPeers := len(nsqdServer.lookupPeers.Load().([]*clusterinfo.LookupPeer)) test.Equal(t, numLookupPeers, 1) newOpts = *opts newOpts.NSQLookupdTCPAddresses = []string{lookupd2.RealTCPAddr().String(), lookupd3.RealTCPAddr().String()} nsqd.SwapOpts(&newOpts) nsqd.TriggerOptsNotification() test.Equal(t, len(nsqd.GetOpts().NSQLookupdTCPAddresses), 2) time.Sleep(50 * time.Millisecond) var lookupPeers []string for _, lp := range nsqdServer.lookupPeers.Load().([]*clusterinfo.LookupPeer) { lookupPeers = append(lookupPeers, lp.String()) } test.Equal(t, len(lookupPeers), 2) test.Equal(t, lookupPeers, newOpts.NSQLookupdTCPAddresses) }
func TestHTTPconfig(t *testing.T) { lopts := nsqlookupd.NewOptions() lopts.Logger = newTestLogger(t) nsqlookupd.SetLogger(lopts) _, _, lookupd1 := mustStartNSQLookupd(lopts) defer lookupd1.Exit() _, _, lookupd2 := mustStartNSQLookupd(lopts) defer lookupd2.Exit() opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, _, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() url := fmt.Sprintf("http://%s/config/nsqlookupd_tcp_addresses", httpAddr) resp, err := http.Get(url) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 200) test.Equal(t, string(body), "[]") client := http.Client{} addrs := fmt.Sprintf(`["%s","%s"]`, lookupd1.RealTCPAddr().String(), lookupd2.RealTCPAddr().String()) url = fmt.Sprintf("http://%s/config/nsqlookupd_tcp_addresses", httpAddr) req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(addrs))) test.Equal(t, err, nil) resp, err = client.Do(req) test.Equal(t, err, nil) defer resp.Body.Close() body, _ = ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 200) test.Equal(t, string(body), addrs) }
func TestGetTopic(t *testing.T) { opts := NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqd.Exit() topic1 := nsqd.GetTopic("test", 0) test.NotNil(t, topic1) test.Equal(t, "test", topic1.GetTopicName()) topic2 := nsqd.GetTopic("test", 0) test.Equal(t, topic1, topic2) topic3 := nsqd.GetTopic("test2", 1) test.Equal(t, "test2", topic3.GetTopicName()) test.NotEqual(t, topic2, topic3) topic1_1 := nsqd.GetTopicIgnPart("test") test.Equal(t, "test", topic1_1.GetTopicName()) test.Equal(t, 0, topic1_1.GetTopicPart()) topic3_1 := nsqd.GetTopicIgnPart("test2") test.Equal(t, "test2", topic3_1.GetTopicName()) test.Equal(t, 1, topic3_1.GetTopicPart()) }
func TestGetChannel(t *testing.T) { opts := NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqd.Exit() topic := nsqd.GetTopic("test", 0) channel1 := topic.GetChannel("ch1") test.NotNil(t, channel1) test.Equal(t, "ch1", channel1.name) channel2 := topic.GetChannel("ch2") test.Equal(t, channel1, topic.channelMap["ch1"]) test.Equal(t, channel2, topic.channelMap["ch2"]) }
func TestHTTPgetStatusJSON(t *testing.T) { testTime := time.Now() opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() testTime = nsqd.GetStartTime() expectedJSON := fmt.Sprintf(`{"status_code":200,"status_txt":"OK","data":{"version":"%v","health":"OK","start_time":%v,"topics":[]}}`, version.Binary, testTime.Unix()) url := fmt.Sprintf("http://%s/stats?format=json", httpAddr) resp, err := http.Get(url) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 200) test.Equal(t, string(body), expectedJSON) }
func TestTopicBackendMaxMsgSize(t *testing.T) { opts := NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqd.Exit() topicName := "test_topic_backend_maxmsgsize" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopic(topicName, 0) test.Equal(t, topic.backend.maxMsgSize, int32(opts.MaxMsgSize+minValidMsgLength)) }
func TestDeletes(t *testing.T) { opts := NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqd.Exit() topic := nsqd.GetTopicIgnPart("test") oldMagicFile := path.Join(topic.dataPath, "magic"+strconv.Itoa(topic.partition)) channel1 := topic.GetChannel("ch1") test.NotNil(t, channel1) err := topic.SetMagicCode(time.Now().UnixNano()) _, err = os.Stat(oldMagicFile) test.Equal(t, nil, err) err = topic.DeleteExistingChannel("ch1") test.Equal(t, nil, err) test.Equal(t, 0, len(topic.channelMap)) channel2 := topic.GetChannel("ch2") test.NotNil(t, channel2) err = nsqd.DeleteExistingTopic("test", topic.GetTopicPart()) test.Equal(t, nil, err) test.Equal(t, 0, len(topic.channelMap)) test.Equal(t, 0, len(nsqd.topicMap)) _, err = os.Stat(oldMagicFile) test.NotNil(t, err) }
func TestHTTPpubEmpty(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_pub_empty" + strconv.Itoa(int(time.Now().Unix())) nsqd.GetTopicIgnPart(topicName) buf := bytes.NewBuffer([]byte("")) url := fmt.Sprintf("http://%s/pub?topic=%s", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 500) test.Equal(t, string(body), `{"status_code":500,"status_txt":"MSG_EMPTY","data":null}`) time.Sleep(5 * time.Millisecond) }
func TestChannelEmptyConsumer(t *testing.T) { opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) tcpAddr, _, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() conn, _ := mustConnectNSQD(tcpAddr) defer conn.Close() topicName := "test_channel_empty" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopicIgnPart(topicName) channel := topic.GetChannel("channel") client := nsqdNs.NewClientV2(0, conn, opts, nil) client.SetReadyCount(25) channel.AddClient(client.ID, client) for i := 0; i < 25; i++ { msg := nsqdNs.NewMessage(nsqdNs.MessageID(i), []byte("test")) channel.StartInFlightTimeout(msg, 0, "", opts.MsgTimeout) client.SendingMessage() } for _, cl := range channel.GetClients() { stats := cl.Stats() test.Equal(t, stats.InFlightCount, int64(25)) } channel.SetConsumeOffset(channel.GetChannelEnd().Offset(), channel.GetChannelEnd().TotalMsgCnt(), true) time.Sleep(time.Second) for _, cl := range channel.GetClients() { stats := cl.Stats() test.Equal(t, stats.InFlightCount, int64(0)) } }
func TestHTTPpubtrace(t *testing.T) { opts := nsqd.NewOptions() opts.LogLevel = 2 opts.Logger = newTestLogger(t) //opts.Logger = &levellogger.GLogger{} _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_pub_trace" + strconv.Itoa(int(time.Now().Unix())) _ = nsqd.GetTopicIgnPart(topicName) buf := bytes.NewBuffer([]byte("test message")) rawurl := fmt.Sprintf("http://%s/pubtrace?topic=%s", httpAddr, topicName) resp, err := http.Post(rawurl, "application/octet-stream", buf) test.Equal(t, err, nil) body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, resp.StatusCode, 400) test.Equal(t, string(body), `{"message":"INVALID_TRACE_ID"}`) time.Sleep(time.Second) // the buffer will be drained by the http post // so we need refill the buffer. buf = bytes.NewBuffer([]byte("test message 2")) rawurl = fmt.Sprintf("http://%s/pubtrace?topic=%s&partition=0&trace_id=11", httpAddr, topicName) resp, err = http.Post(rawurl, "application/octet-stream", buf) test.Equal(t, err, nil) body, _ = ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, resp.StatusCode, 200) type tmpResp struct { Status string `json:"status"` ID uint64 `json:"id"` TraceID string `json:"trace_id"` QueueOffset uint64 `json:"queue_offset"` DataRawSize uint32 `json:"rawsize"` } var ret tmpResp json.Unmarshal(body, &ret) test.Equal(t, ret.Status, "OK") test.Equal(t, ret.TraceID, "11") time.Sleep(5 * time.Millisecond) }
func TestDiskQueueSnapshotReader(t *testing.T) { l := newTestLogger(t) nsqLog.Logger = l dqName := "test_disk_queue" + strconv.Itoa(int(time.Now().Unix())) tmpDir, err := ioutil.TempDir("", fmt.Sprintf("nsq-test-%d", time.Now().UnixNano())) test.Nil(t, err) defer os.RemoveAll(tmpDir) queue, _ := newDiskQueueWriter(dqName, tmpDir, 1024, 4, 1<<10, 1) dqWriter := queue.(*diskQueueWriter) defer dqWriter.Close() test.NotNil(t, dqWriter) msg := []byte("test") msgNum := 2000 var midEnd BackendQueueEnd var midEnd2 BackendQueueEnd for i := 0; i < msgNum; i++ { dqWriter.Put(msg) if i == msgNum/2 { midEnd = dqWriter.GetQueueWriteEnd() } if i == msgNum/4 { midEnd2 = dqWriter.GetQueueWriteEnd() } } dqWriter.Flush() end := dqWriter.GetQueueWriteEnd() test.Nil(t, err) dqReader := NewDiskQueueSnapshot(dqName, tmpDir, end) defer dqReader.Close() queueStart := dqReader.queueStart test.Equal(t, BackendOffset(0), queueStart.Offset()) dqReader.SetQueueStart(midEnd2) test.Equal(t, midEnd2.Offset(), dqReader.readPos.Offset()) result := dqReader.ReadOne() test.Nil(t, result.Err) test.Equal(t, midEnd2.Offset(), result.Offset) err = dqReader.SeekTo(midEnd.Offset()) test.Nil(t, err) test.Equal(t, midEnd.Offset(), dqReader.readPos.virtualEnd) result = dqReader.ReadOne() test.Nil(t, result.Err) test.Equal(t, midEnd.Offset(), result.Offset) data, err := dqReader.ReadRaw(100) test.Nil(t, err) test.Equal(t, 100, len(data)) // remove some begin of queue, and test queue start }
func TestDeleteLast(t *testing.T) { opts := NewOptions() opts.Logger = newTestLogger(t) _, _, nsqd := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqd.Exit() topic := nsqd.GetTopic("test", 0) channel1 := topic.GetChannel("ch1") test.NotNil(t, channel1) err := topic.DeleteExistingChannel("ch1") test.Nil(t, err) test.Equal(t, 0, len(topic.channelMap)) msg := NewMessage(0, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaa")) _, _, _, _, err = topic.PutMessage(msg) time.Sleep(100 * time.Millisecond) test.Nil(t, err) }
func TestHTTPpubpartition(t *testing.T) { opts := nsqd.NewOptions() opts.LogLevel = 2 opts.Logger = newTestLogger(t) //opts.Logger = &levellogger.GLogger{} _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_pub_partition" + strconv.Itoa(int(time.Now().Unix())) _ = nsqd.GetTopicIgnPart(topicName) buf := bytes.NewBuffer([]byte("test message")) // should failed pub to not exist partition url := fmt.Sprintf("http://%s/pub?topic=%s&partition=2", httpAddr, topicName) resp, err := http.Post(url, "application/octet-stream", buf) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.NotEqual(t, string(body), "OK") time.Sleep(5 * time.Millisecond) }
func TestClientAttributes(t *testing.T) { userAgent := "Test User Agent" opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) opts.LogLevel = 3 opts.SnappyEnabled = true tcpAddr, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() conn, err := mustConnectNSQD(tcpAddr) test.Equal(t, err, nil) defer conn.Close() data := identify(t, conn, map[string]interface{}{ "snappy": true, "user_agent": userAgent, }, frameTypeResponse) resp := struct { Snappy bool `json:"snappy"` UserAgent string `json:"user_agent"` }{} err = json.Unmarshal(data, &resp) test.Equal(t, err, nil) test.Equal(t, resp.Snappy, true) r := snappystream.NewReader(conn, snappystream.SkipVerifyChecksum) w := snappystream.NewWriter(conn) readValidate(t, r, frameTypeResponse, "OK") topicName := "test_client_attributes" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopicIgnPart(topicName) topic.GetChannel("ch") sub(t, readWriter{r, w}, topicName, "ch") testURL := fmt.Sprintf("http://127.0.0.1:%d/stats?format=json", httpAddr.Port) statsData, err := API(testURL) test.Equal(t, err, nil) client := statsData.Get("topics").GetIndex(0).Get("channels").GetIndex(0).Get("clients").GetIndex(0) test.Equal(t, client.Get("user_agent").MustString(), userAgent) test.Equal(t, client.Get("snappy").MustBool(), true) }
func TestHTTPerrors(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, _, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() url := fmt.Sprintf("http://%s/stats", httpAddr) resp, err := http.Post(url, "text/plain", nil) test.Equal(t, err, nil) defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 405) test.Equal(t, string(body), `{"message":"METHOD_NOT_ALLOWED"}`) url = fmt.Sprintf("http://%s/not_found", httpAddr) resp, err = http.Get(url) test.Equal(t, err, nil) defer resp.Body.Close() body, _ = ioutil.ReadAll(resp.Body) test.Equal(t, resp.StatusCode, 404) test.Equal(t, string(body), `{"message":"NOT_FOUND"}`) }
func testNsqLookupNsqdNodesChange(t *testing.T, useFakeLeadership bool) { if testing.Verbose() { SetCoordLogger(&levellogger.GLogger{}, levellogger.LOG_INFO) glog.SetFlags(0, "", "", true, true, 1) glog.StartWorker(time.Second) } else { SetCoordLogger(newTestLogger(t), levellogger.LOG_DEBUG) } idList := []string{"id1", "id2", "id3", "id4", "id5"} lookupCoord1, nodeInfoList := prepareCluster(t, idList, useFakeLeadership) for _, n := range nodeInfoList { defer os.RemoveAll(n.dataPath) defer n.localNsqd.Exit() defer n.nsqdCoord.Stop() } topic := "test-nsqlookup-topic-unit-test" lookupLeadership := lookupCoord1.leadership lookupCoord1.DeleteTopic(topic, "**") topic3 := topic + topic lookupCoord1.DeleteTopic(topic3, "**") time.Sleep(time.Second) defer func() { lookupCoord1.DeleteTopic(topic, "**") lookupCoord1.DeleteTopic(topic3, "**") time.Sleep(time.Second * 3) lookupCoord1.Stop() }() // test new topic create err := lookupCoord1.CreateTopic(topic, TopicMetaInfo{2, 2, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*3) pmeta, _, err := lookupLeadership.GetTopicMetaInfo(topic) pn := pmeta.PartitionNum test.Nil(t, err) test.Equal(t, pn, 2) t0, err := lookupLeadership.GetTopicInfo(topic, 0) test.Nil(t, err) t1, err := lookupLeadership.GetTopicInfo(topic, 1) test.Nil(t, err) test.Equal(t, len(t0.ISR), 2) test.Equal(t, len(t1.ISR), 2) t.Log(t0) t.Log(t1) test.NotEqual(t, t0.Leader, t1.Leader) t0LeaderCoord := nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr := t0LeaderCoord.getTopicCoord(topic, 0) test.Nil(t, coordErr) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) test.Equal(t, len(tc0.topicInfo.ISR), 2) t1LeaderCoord := nodeInfoList[t1.Leader].nsqdCoord test.NotNil(t, t1LeaderCoord) tc1, coordErr := t1LeaderCoord.getTopicCoord(topic, 1) test.Nil(t, coordErr) test.Equal(t, tc1.topicInfo.Leader, t1.Leader) test.Equal(t, len(tc1.topicInfo.ISR), 2) coordLog.Warningf("============= begin test isr node failed ====") // test isr node lost lostNodeID := t0.ISR[1] atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 1) nodeInfoList[lostNodeID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic, 0) if len(t0.ISR) < t0.Replica { waitClusterStable(lookupCoord1, time.Second*3) } t0, err = lookupLeadership.GetTopicInfo(topic, 0) test.Nil(t, err) test.Equal(t, FindSlice(t0.ISR, lostNodeID) == -1, true) test.Equal(t, len(t0.ISR), t0.Replica) test.Equal(t, t0.Leader, t0.ISR[0]) // clear topic info on failed node, test the reload for failed node nodeInfoList[lostNodeID].nsqdCoord.topicCoords = make(map[string]map[int]*TopicCoordinator) // test new catchup and new isr atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 0) nodeInfoList[lostNodeID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) if len(t0.ISR) < t0.Replica { waitClusterStable(lookupCoord1, time.Second*3) } t0, _ = lookupLeadership.GetTopicInfo(topic, 0) test.Equal(t, len(t0.CatchupList), 0) test.Equal(t, len(t0.ISR) >= t0.Replica, true) test.Equal(t, len(tc0.topicInfo.ISR), len(t0.ISR)) test.Equal(t, t0.Leader, t0.ISR[0]) lookupCoord1.triggerCheckTopics("", 0, time.Second) time.Sleep(time.Second) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) // should remove the unnecessary node test.Equal(t, len(t0.ISR), t0.Replica) coordLog.Warningf("============= begin test leader failed ====") // test leader node lost lostNodeID = t0.Leader atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 1) nodeInfoList[lostNodeID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) if len(t0.ISR) < t0.Replica { waitClusterStable(lookupCoord1, time.Second*3) } t0, _ = lookupLeadership.GetTopicInfo(topic, 0) t.Log(t0) test.Equal(t, t0.Replica, len(t0.ISR)) test.Equal(t, t0.Leader, t0.ISR[0]) test.NotEqual(t, t0.Leader, lostNodeID) //test.Equal(t, len(t0.CatchupList), 1) test.Equal(t, FindSlice(t0.ISR, lostNodeID) == -1, true) t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic, 0) test.Nil(t, coordErr) test.Equal(t, len(tc0.topicInfo.ISR), len(t0.ISR)) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) // test lost leader node rejoin atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 0) nodeInfoList[lostNodeID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) if len(t0.ISR) < t0.Replica { waitClusterStable(lookupCoord1, time.Second*3) } t0, _ = lookupLeadership.GetTopicInfo(topic, 0) t.Log(t0) test.Equal(t, len(t0.CatchupList), 0) test.Equal(t, len(t0.ISR) >= t0.Replica, true) t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic, 0) test.Nil(t, coordErr) test.Equal(t, len(tc0.topicInfo.ISR), len(t0.ISR)) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) waitClusterStable(lookupCoord1, time.Second*3) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) // should remove the unnecessary node test.Equal(t, len(t0.ISR), t0.Replica) // test old leader failed and begin elect new and then new leader failed coordLog.Warningf("============= begin test old leader failed and then new leader failed ====") lostNodeID = t0.Leader lostISRID := t0.ISR[1] if lostISRID == lostNodeID { lostISRID = t0.ISR[0] } atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 1) nodeInfoList[lostNodeID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostNodeID].nodeInfo) time.Sleep(time.Millisecond) atomic.StoreInt32(&nodeInfoList[lostISRID].nsqdCoord.stopping, 1) nodeInfoList[lostISRID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostISRID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 0) atomic.StoreInt32(&nodeInfoList[lostISRID].nsqdCoord.stopping, 0) nodeInfoList[lostNodeID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostNodeID].nodeInfo) nodeInfoList[lostISRID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostISRID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*3) waitClusterStable(lookupCoord1, time.Second*5) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) test.Equal(t, true, len(t0.ISR) >= t0.Replica) test.Equal(t, t0.Leader == t0.ISR[0] || t0.Leader == t0.ISR[1], true) t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic, 0) test.Nil(t, coordErr) test.Equal(t, len(tc0.topicInfo.ISR), len(t0.ISR)) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) waitClusterStable(lookupCoord1, time.Second*5) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) // should remove the unnecessary node test.Equal(t, t0.Replica, len(t0.ISR)) // test join isr timeout lostNodeID = t1.ISR[1] atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 1) nodeInfoList[lostNodeID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*5) atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 0) nodeInfoList[lostNodeID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*5) // with only 2 replica, the isr join fail should not change the isr list nodeInfoList[lostNodeID].nsqdCoord.rpcServer.toggleDisableRpcTest(true) waitClusterStable(lookupCoord1, time.Second*10) t1, _ = lookupLeadership.GetTopicInfo(topic, 1) test.Equal(t, true, len(t1.ISR)+len(t1.CatchupList) >= t1.Replica) test.Equal(t, t1.Leader == t1.ISR[0] || t1.Leader == t1.ISR[1], true) nodeInfoList[lostNodeID].nsqdCoord.rpcServer.toggleDisableRpcTest(false) waitClusterStable(lookupCoord1, time.Second*5) // test new topic create coordLog.Warningf("============= begin test 3 replicas ====") err = lookupCoord1.CreateTopic(topic3, TopicMetaInfo{1, 3, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*5) // with 3 replica, the isr join timeout will change the isr list if the isr has the quorum nodes t3, err := lookupLeadership.GetTopicInfo(topic3, 0) test.Nil(t, err) test.Equal(t, len(t3.ISR), t3.Replica) lostNodeID = t3.ISR[1] atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 1) nodeInfoList[lostNodeID].nsqdCoord.leadership.UnregisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*5) atomic.StoreInt32(&nodeInfoList[lostNodeID].nsqdCoord.stopping, 0) nodeInfoList[lostNodeID].nsqdCoord.leadership.RegisterNsqd(nodeInfoList[lostNodeID].nodeInfo) waitClusterStable(lookupCoord1, time.Second*5) nodeInfoList[lostNodeID].nsqdCoord.rpcServer.toggleDisableRpcTest(true) waitClusterStable(lookupCoord1, time.Second*5) t3, _ = lookupLeadership.GetTopicInfo(topic3, 0) test.Equal(t, true, len(t3.ISR) >= t3.Replica-1) test.Equal(t, true, len(t3.ISR) <= t3.Replica) test.Equal(t, t3.Leader == t3.ISR[0] || t3.Leader == t3.ISR[1], true) nodeInfoList[lostNodeID].nsqdCoord.rpcServer.toggleDisableRpcTest(false) waitClusterStable(lookupCoord1, time.Second*5) glog.Flush() t0, _ = lookupLeadership.GetTopicInfo(topic, 0) test.Equal(t, true, len(t0.ISR) >= t0.Replica) t1, _ = lookupLeadership.GetTopicInfo(topic, 1) test.Equal(t, true, len(t1.ISR) >= t0.Replica) // before migrate really start, the isr should not reach the replica factor // however, catch up may start early while check leadership or enable topic write t3, _ = lookupLeadership.GetTopicInfo(topic3, 0) test.Equal(t, true, len(t3.ISR)+len(t3.CatchupList) >= t3.Replica) t0IsrNum := 2 t1IsrNum := 2 coordLog.Warningf("========== begin test quit ====") quitList := make([]*NsqdCoordinator, 0) quitList = append(quitList, nodeInfoList[t0.Leader].nsqdCoord) if t1.Leader != t0.Leader { quitList = append(quitList, nodeInfoList[t1.Leader].nsqdCoord) } if t3.Leader != t0.Leader && t3.Leader != t1.Leader { quitList = append(quitList, nodeInfoList[t3.Leader].nsqdCoord) } for id, n := range nodeInfoList { if id == t0.Leader || id == t1.Leader || id == t3.Leader { continue } quitList = append(quitList, n.nsqdCoord) } test.Equal(t, len(nodeInfoList), len(quitList)) for _, nsqdCoord := range quitList { failedID := nsqdCoord.myNode.GetID() delete(nodeInfoList, failedID) nsqdCoord.Stop() if t0IsrNum > 1 { if FindSlice(t0.ISR, failedID) != -1 { t0IsrNum-- } } if t1IsrNum > 1 { if FindSlice(t1.ISR, failedID) != -1 { t1IsrNum-- } } waitClusterStable(lookupCoord1, time.Second*5) t0, _ = lookupLeadership.GetTopicInfo(topic, 0) // we have no failed node in isr or we got the last failed node leaving in isr. t.Log(t0) test.Equal(t, FindSlice(t0.ISR, failedID) == -1 || (len(t0.ISR) == 1 && t0.ISR[0] == failedID), true) test.Equal(t, true, len(t0.ISR) >= t0IsrNum) t1, _ = lookupLeadership.GetTopicInfo(topic, 1) t.Log(t1) test.Equal(t, FindSlice(t1.ISR, failedID) == -1 || (len(t1.ISR) == 1 && t1.ISR[0] == failedID), true) test.Equal(t, true, len(t1.ISR) >= t1IsrNum) t3, _ = lookupLeadership.GetTopicInfo(topic3, 0) t.Log(t3) test.Equal(t, FindSlice(t3.ISR, failedID) == -1 || (len(t3.ISR) == 1 && t3.ISR[0] == failedID), true) } }
func TestCluster(t *testing.T) { lopts := nsqlookupd.NewOptions() lopts.Logger = newTestLogger(t) lopts.BroadcastAddress = "127.0.0.1" nsqlookupd.SetLogger(lopts) _, _, lookupd := mustStartNSQLookupd(lopts) opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) opts.NSQLookupdTCPAddresses = []string{lookupd.RealTCPAddr().String()} opts.BroadcastAddress = "127.0.0.1" tcpAddr, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "cluster_test" + strconv.Itoa(int(time.Now().Unix())) partitionStr := "0" hostname, err := os.Hostname() test.Equal(t, err, nil) nsqd.GetTopicIgnPart(topicName) url := fmt.Sprintf("http://%s/channel/create?topic=%s&channel=ch", httpAddr, topicName) _, err = http_api.NewClient(nil).POSTV1(url) test.Equal(t, err, nil) // allow some time for nsqd to push info to nsqlookupd time.Sleep(350 * time.Millisecond) endpoint := fmt.Sprintf("http://%s/debug", lookupd.RealHTTPAddr()) data, err := API(endpoint) test.Equal(t, err, nil) t.Logf("debug data: %v", data) topicData := data.Get("topic:" + topicName) producers, _ := topicData.Array() test.Equal(t, len(producers), 1) producer := topicData.GetIndex(0) test.Equal(t, producer.Get("hostname").MustString(), hostname) test.Equal(t, producer.Get("broadcast_address").MustString(), "127.0.0.1") test.Equal(t, producer.Get("tcp_port").MustInt(), tcpAddr.Port) test.Equal(t, producer.Get("tombstoned").MustBool(), false) channelData := data.Get("channel:" + topicName + ":" + partitionStr) producers, _ = channelData.Array() test.Equal(t, len(producers), 1) producer = topicData.GetIndex(0) test.Equal(t, producer.Get("hostname").MustString(), hostname) test.Equal(t, producer.Get("broadcast_address").MustString(), "127.0.0.1") test.Equal(t, producer.Get("tcp_port").MustInt(), tcpAddr.Port) test.Equal(t, producer.Get("tombstoned").MustBool(), false) endpoint = fmt.Sprintf("http://%s/lookup?topic=%s", lookupd.RealHTTPAddr(), topicName) data, err = API(endpoint) producers, _ = data.Get("producers").Array() test.Equal(t, len(producers), 1) producer = data.Get("producers").GetIndex(0) test.Equal(t, producer.Get("hostname").MustString(), hostname) test.Equal(t, producer.Get("broadcast_address").MustString(), "127.0.0.1") test.Equal(t, producer.Get("tcp_port").MustInt(), tcpAddr.Port) channels, _ := data.Get("channels").Array() test.Equal(t, len(channels), 1) channel := channels[0].(string) test.Equal(t, channel, "ch") nsqd.DeleteExistingTopic(topicName, 0) // allow some time for nsqd to push info to nsqlookupd time.Sleep(350 * time.Millisecond) endpoint = fmt.Sprintf("http://%s/lookup?topic=%s", lookupd.RealHTTPAddr(), topicName) data, err = API(endpoint) test.Equal(t, err, nil) producers, _ = data.Get("producers").Array() test.Equal(t, len(producers), 0) endpoint = fmt.Sprintf("http://%s/debug", lookupd.RealHTTPAddr()) data, err = API(endpoint) test.Equal(t, err, nil) producers, _ = data.Get("topic:" + topicName).Array() test.Equal(t, len(producers), 0) producers, _ = data.Get("channel:" + topicName + ":" + partitionStr).Array() test.Equal(t, len(producers), 0) }
func TestHTTPV1TopicChannel(t *testing.T) { opts := nsqd.NewOptions() opts.Logger = newTestLogger(t) _, httpAddr, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_http_topic_channel2" + strconv.Itoa(int(time.Now().Unix())) topicPart := 0 channelName := "ch2" nsqd.GetTopicIgnPart(topicName) url := fmt.Sprintf("http://%s/channel/create?topic=%s&channel=%s", httpAddr, topicName, channelName) resp, err := http.Post(url, "application/json", nil) test.Equal(t, err, nil) test.Equal(t, resp.StatusCode, 200) body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, string(body), "") test.Equal(t, resp.Header.Get("X-NSQ-Content-Type"), "nsq; version=1.0") topic, err := nsqd.GetExistingTopic(topicName, topicPart) test.Equal(t, err, nil) test.NotNil(t, topic) channel, err := topic.GetExistingChannel(channelName) test.Equal(t, err, nil) test.NotNil(t, channel) url = fmt.Sprintf("http://%s/channel/pause?topic=%s&channel=%s", httpAddr, topicName, channelName) resp, err = http.Post(url, "application/json", nil) test.Equal(t, err, nil) test.Equal(t, resp.StatusCode, 200) body, _ = ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, string(body), "") test.Equal(t, resp.Header.Get("X-NSQ-Content-Type"), "nsq; version=1.0") test.Equal(t, channel.IsPaused(), true) url = fmt.Sprintf("http://%s/channel/unpause?topic=%s&channel=%s", httpAddr, topicName, channelName) resp, err = http.Post(url, "application/json", nil) test.Equal(t, err, nil) test.Equal(t, resp.StatusCode, 200) body, _ = ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, string(body), "") test.Equal(t, resp.Header.Get("X-NSQ-Content-Type"), "nsq; version=1.0") test.Equal(t, channel.IsPaused(), false) url = fmt.Sprintf("http://%s/channel/delete?topic=%s&channel=%s", httpAddr, topicName, channelName) resp, err = http.Post(url, "application/json", nil) test.Equal(t, err, nil) test.Equal(t, resp.StatusCode, 200) body, _ = ioutil.ReadAll(resp.Body) resp.Body.Close() test.Equal(t, string(body), "") test.Equal(t, resp.Header.Get("X-NSQ-Content-Type"), "nsq; version=1.0") _, err = topic.GetExistingChannel(channelName) test.NotNil(t, err) nsqd.DeleteExistingTopic(topicName, topicPart) _, err = nsqd.GetExistingTopic(topicName, topicPart) test.NotNil(t, err) }
func TestNsqLookupUpdateTopicMeta(t *testing.T) { if testing.Verbose() { SetCoordLogger(&levellogger.GLogger{}, levellogger.LOG_WARN) glog.SetFlags(0, "", "", true, true, 1) glog.StartWorker(time.Second) } else { SetCoordLogger(newTestLogger(t), levellogger.LOG_DEBUG) } idList := []string{"id1", "id2", "id3", "id4"} lookupCoord, nodeInfoList := prepareCluster(t, idList, false) for _, n := range nodeInfoList { defer os.RemoveAll(n.dataPath) defer n.localNsqd.Exit() defer n.nsqdCoord.Stop() } topic_p1_r1 := "test-nsqlookup-topic-unit-test-updatemeta-p1-r1" topic_p2_r1 := "test-nsqlookup-topic-unit-test-updatemeta-p2-r1" lookupLeadership := lookupCoord.leadership checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p2_r1, "**")) time.Sleep(time.Second * 3) defer func() { checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p2_r1, "**")) time.Sleep(time.Second * 3) lookupCoord.Stop() }() err := lookupCoord.CreateTopic(topic_p1_r1, TopicMetaInfo{1, 1, 0, 0, 0, 0}) test.Nil(t, err) time.Sleep(time.Second) err = lookupCoord.CreateTopic(topic_p2_r1, TopicMetaInfo{2, 1, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*5) // test increase replicator and decrease the replicator err = lookupCoord.ChangeTopicMetaParam(topic_p1_r1, -1, -1, 3) lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second * 5) tmeta, _, _ := lookupLeadership.GetTopicMetaInfo(topic_p1_r1) test.Equal(t, 3, tmeta.Replica) for i := 0; i < tmeta.PartitionNum; i++ { info, err := lookupLeadership.GetTopicInfo(topic_p1_r1, i) test.Nil(t, err) test.Equal(t, tmeta.Replica, len(info.ISR)) } err = lookupCoord.ChangeTopicMetaParam(topic_p1_r1, -1, -1, 2) lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second * 3) tmeta, _, _ = lookupLeadership.GetTopicMetaInfo(topic_p1_r1) test.Equal(t, 2, tmeta.Replica) for i := 0; i < tmeta.PartitionNum; i++ { info, err := lookupLeadership.GetTopicInfo(topic_p1_r1, i) test.Nil(t, err) test.Equal(t, tmeta.Replica, len(info.ISR)) } err = lookupCoord.ChangeTopicMetaParam(topic_p2_r1, -1, -1, 2) lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second * 3) tmeta, _, _ = lookupLeadership.GetTopicMetaInfo(topic_p2_r1) test.Equal(t, 2, tmeta.Replica) for i := 0; i < tmeta.PartitionNum; i++ { info, err := lookupLeadership.GetTopicInfo(topic_p2_r1, i) test.Nil(t, err) test.Equal(t, tmeta.Replica, len(info.ISR)) } // should fail err = lookupCoord.ChangeTopicMetaParam(topic_p2_r1, -1, -1, 3) test.NotNil(t, err) err = lookupCoord.ChangeTopicMetaParam(topic_p2_r1, -1, -1, 1) lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second * 3) tmeta, _, _ = lookupLeadership.GetTopicMetaInfo(topic_p2_r1) test.Equal(t, 1, tmeta.Replica) for i := 0; i < tmeta.PartitionNum; i++ { info, err := lookupLeadership.GetTopicInfo(topic_p2_r1, i) test.Nil(t, err) test.Equal(t, tmeta.Replica, len(info.ISR)) } // test update the sync and retention , all partition and replica should be updated err = lookupCoord.ChangeTopicMetaParam(topic_p1_r1, 1234, 3, -1) time.Sleep(time.Second) tmeta, _, _ = lookupLeadership.GetTopicMetaInfo(topic_p1_r1) test.Equal(t, 1234, tmeta.SyncEvery) test.Equal(t, int32(3), tmeta.RetentionDay) for i := 0; i < tmeta.PartitionNum; i++ { info, err := lookupLeadership.GetTopicInfo(topic_p1_r1, i) test.Nil(t, err) for _, nid := range info.ISR { localNsqd := nodeInfoList[nid].localNsqd localTopic, err := localNsqd.GetExistingTopic(topic_p1_r1, i) test.Nil(t, err) dinfo := localTopic.GetDynamicInfo() test.Equal(t, int64(1234), dinfo.SyncEvery) test.Equal(t, int32(3), dinfo.RetentionDay) } } }
func TestNsqLookupExpandPartition(t *testing.T) { if testing.Verbose() { SetCoordLogger(&levellogger.GLogger{}, levellogger.LOG_WARN) glog.SetFlags(0, "", "", true, true, 1) glog.StartWorker(time.Second) } else { SetCoordLogger(newTestLogger(t), levellogger.LOG_DEBUG) } idList := []string{"id1", "id2", "id3", "id4", "id5", "id6"} lookupCoord, nodeInfoList := prepareCluster(t, idList, false) for _, n := range nodeInfoList { defer os.RemoveAll(n.dataPath) defer n.localNsqd.Exit() defer n.nsqdCoord.Stop() } topic_p1_r1 := "test-nsqlookup-topic-unit-test-expand-p1-r1" topic_p1_r2 := "test-nsqlookup-topic-unit-test-expand-p1-r2" topic_p1_r3 := "test-nsqlookup-topic-unit-test-expand-p1-r3" lookupLeadership := lookupCoord.leadership checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r2, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r3, "**")) time.Sleep(time.Second * 3) defer func() { checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r2, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r3, "**")) time.Sleep(time.Second * 3) lookupCoord.Stop() }() err := lookupCoord.CreateTopic(topic_p1_r1, TopicMetaInfo{1, 1, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second) err = lookupCoord.CreateTopic(topic_p1_r2, TopicMetaInfo{1, 2, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second) err = lookupCoord.CreateTopic(topic_p1_r3, TopicMetaInfo{1, 3, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second) waitClusterStable(lookupCoord, time.Second) waitClusterStable(lookupCoord, time.Second*3) err = lookupCoord.ExpandTopicPartition(topic_p1_r1, 3) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err := lookupLeadership.GetTopicInfo(topic_p1_r1, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 1) t1, err := lookupLeadership.GetTopicInfo(topic_p1_r1, 1) test.Nil(t, err) test.Equal(t, len(t1.ISR), 1) t2, err := lookupLeadership.GetTopicInfo(topic_p1_r1, 2) test.Nil(t, err) test.Equal(t, len(t2.ISR), 1) lookupCoord.triggerCheckTopics("", 0, 0) waitClusterStable(lookupCoord, time.Second*3) err = lookupCoord.ExpandTopicPartition(topic_p1_r2, 2) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p1_r2, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), t0.Replica) t1, err = lookupLeadership.GetTopicInfo(topic_p1_r2, 1) test.Nil(t, err) test.Equal(t, len(t1.ISR), t1.Replica) lookupCoord.triggerCheckTopics("", 0, 0) waitClusterStable(lookupCoord, time.Second*3) err = lookupCoord.ExpandTopicPartition(topic_p1_r2, 3) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p1_r2, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), t0.Replica) t1, err = lookupLeadership.GetTopicInfo(topic_p1_r2, 1) test.Nil(t, err) test.Equal(t, len(t1.ISR), t1.Replica) t2, err = lookupLeadership.GetTopicInfo(topic_p1_r2, 2) test.Nil(t, err) test.Equal(t, len(t2.ISR), t2.Replica) waitClusterStable(lookupCoord, time.Second*3) // should fail err = lookupCoord.ExpandTopicPartition(topic_p1_r2, 4) test.NotNil(t, err) err = lookupCoord.ExpandTopicPartition(topic_p1_r3, 2) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p1_r3, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), t0.Replica) t1, err = lookupLeadership.GetTopicInfo(topic_p1_r3, 1) test.Nil(t, err) test.Equal(t, len(t1.ISR), t1.Replica) waitClusterStable(lookupCoord, time.Second*3) // should fail err = lookupCoord.ExpandTopicPartition(topic_p1_r3, 3) test.NotNil(t, err) }
func TestNsqLookupMovePartition(t *testing.T) { if testing.Verbose() { SetCoordLogger(&levellogger.GLogger{}, levellogger.LOG_WARN) glog.SetFlags(0, "", "", true, true, 1) glog.StartWorker(time.Second) } else { SetCoordLogger(newTestLogger(t), levellogger.LOG_DEBUG) } idList := []string{"id1", "id2", "id3", "id4", "id5"} lookupCoord, nodeInfoList := prepareCluster(t, idList, false) for _, n := range nodeInfoList { defer os.RemoveAll(n.dataPath) defer n.localNsqd.Exit() defer n.nsqdCoord.Stop() } topic_p1_r1 := "test-nsqlookup-topic-unit-test-move-p1-r1" topic_p2_r2 := "test-nsqlookup-topic-unit-test-move-p2-r2" lookupLeadership := lookupCoord.leadership checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p2_r2, "**")) time.Sleep(time.Second * 3) defer func() { checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord.DeleteTopic(topic_p2_r2, "**")) time.Sleep(time.Second * 3) lookupCoord.Stop() }() // test new topic create err := lookupCoord.CreateTopic(topic_p1_r1, TopicMetaInfo{1, 1, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) err = lookupCoord.CreateTopic(topic_p2_r2, TopicMetaInfo{2, 2, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) lookupCoord.triggerCheckTopics("", 0, 0) waitClusterStable(lookupCoord, time.Second*3) // test move leader to other isr; // test move leader to other catchup; // test move non-leader to other node; t0, err := lookupLeadership.GetTopicInfo(topic_p1_r1, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 1) // move p1_r1 leader to other node toNode := "" for _, node := range nodeInfoList { if node.nodeInfo.GetID() == t0.Leader { continue } toNode = node.nodeInfo.GetID() break } lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second) err = lookupCoord.MoveTopicPartitionDataByManual(topic_p1_r1, 0, true, t0.Leader, toNode) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p1_r1, 0) test.Nil(t, err) // it may be two nodes in isr if the moved leader rejoin as isr test.Equal(t, len(t0.ISR) >= 1, true) test.Equal(t, t0.Leader, toNode) t0, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 2) toNode = "" for _, nid := range t0.ISR { if nid == t0.Leader { continue } toNode = nid break } waitClusterStable(lookupCoord, time.Second*3) // move leader to other isr node oldLeader := t0.Leader err = lookupCoord.MoveTopicPartitionDataByManual(topic_p2_r2, 0, true, t0.Leader, toNode) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR) >= 2, true) test.NotEqual(t, t0.Leader, oldLeader) test.Equal(t, t0.Leader, toNode) // move leader to other non-isr node toNode = "" for _, node := range nodeInfoList { if FindSlice(t0.ISR, node.nodeInfo.GetID()) != -1 { continue } // check other partition t1, err := lookupLeadership.GetTopicInfo(topic_p2_r2, 1) if err == nil { if FindSlice(t1.ISR, node.nodeInfo.GetID()) != -1 { continue } } toNode = node.nodeInfo.GetID() break } lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second) err = lookupCoord.MoveTopicPartitionDataByManual(topic_p2_r2, 0, true, t0.Leader, toNode) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 0) test.Nil(t, err) test.Equal(t, t0.Leader, toNode) // move non-leader to other non-isr node toNode = "" toNodeInvalid := "" fromNode := "" for _, nid := range t0.ISR { if nid != t0.Leader { fromNode = nid } } for _, node := range nodeInfoList { if FindSlice(t0.ISR, node.nodeInfo.GetID()) != -1 { continue } // check other partition t1, err := lookupLeadership.GetTopicInfo(topic_p2_r2, 1) if err == nil { toNodeInvalid = t1.Leader if FindSlice(t1.ISR, node.nodeInfo.GetID()) != -1 { continue } } toNode = node.nodeInfo.GetID() break } lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second) err = lookupCoord.MoveTopicPartitionDataByManual(topic_p2_r2, 0, false, fromNode, toNodeInvalid) test.NotNil(t, err) test.Equal(t, ErrNodeIsExcludedForTopicData, err) lookupCoord.triggerCheckTopics("", 0, 0) time.Sleep(time.Second) err = lookupCoord.MoveTopicPartitionDataByManual(topic_p2_r2, 0, false, fromNode, toNode) test.Nil(t, err) waitClusterStable(lookupCoord, time.Second*3) t0, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 0) test.Nil(t, err) test.Equal(t, FindSlice(t0.ISR, toNode) != -1, true) test.Equal(t, -1, FindSlice(t0.ISR, fromNode)) }
func TestNsqLookupNsqdCreateTopic(t *testing.T) { // on 4 nodes, we should test follow cases // 1 partition 1 replica // 1 partition 3 replica // 3 partition 1 replica // 2 partition 2 replica if testing.Verbose() { SetCoordLogger(&levellogger.GLogger{}, levellogger.LOG_WARN) glog.SetFlags(0, "", "", true, true, 1) glog.StartWorker(time.Second) } else { SetCoordLogger(newTestLogger(t), levellogger.LOG_DEBUG) } idList := []string{"id1", "id2", "id3", "id4"} lookupCoord1, nodeInfoList := prepareCluster(t, idList, false) for _, n := range nodeInfoList { defer os.RemoveAll(n.dataPath) defer n.localNsqd.Exit() defer n.nsqdCoord.Stop() } test.Equal(t, 4, len(nodeInfoList)) topic_p1_r1 := "test-nsqlookup-topic-unit-testcreate-p1-r1" topic_p1_r3 := "test-nsqlookup-topic-unit-testcreate-p1-r3" topic_p3_r1 := "test-nsqlookup-topic-unit-testcreate-p3-r1" topic_p2_r2 := "test-nsqlookup-topic-unit-testcreate-p2-r2" lookupLeadership := lookupCoord1.leadership time.Sleep(time.Second) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p1_r3, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p3_r1, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p2_r2, "**")) time.Sleep(time.Second * 3) defer func() { checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p1_r1, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p1_r3, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p3_r1, "**")) checkDeleteErr(t, lookupCoord1.DeleteTopic(topic_p2_r2, "**")) time.Sleep(time.Second * 3) lookupCoord1.Stop() }() // test new topic create err := lookupCoord1.CreateTopic(topic_p1_r1, TopicMetaInfo{1, 1, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*3) pmeta, _, err := lookupLeadership.GetTopicMetaInfo(topic_p1_r1) pn := pmeta.PartitionNum test.Nil(t, err) test.Equal(t, pn, 1) t0, err := lookupLeadership.GetTopicInfo(topic_p1_r1, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 1) t.Logf("t0 leader is: %v", t0.Leader) if nodeInfoList[t0.Leader] == nil { t.Fatalf("no leader: %v, %v", t0, nodeInfoList) } t0LeaderCoord := nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr := t0LeaderCoord.getTopicCoord(topic_p1_r1, 0) test.Nil(t, coordErr) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) test.Equal(t, len(tc0.topicInfo.ISR), 1) err = lookupCoord1.CreateTopic(topic_p1_r3, TopicMetaInfo{1, 3, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*5) lookupCoord1.triggerCheckTopics("", 0, 0) waitClusterStable(lookupCoord1, time.Second*5) pmeta, _, err = lookupLeadership.GetTopicMetaInfo(topic_p1_r3) pn = pmeta.PartitionNum test.Nil(t, err) test.Equal(t, pn, 1) t0, err = lookupLeadership.GetTopicInfo(topic_p1_r3, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 3) t.Logf("t0 leader is: %v", t0.Leader) if nodeInfoList[t0.Leader] == nil { t.Fatalf("no leader: %v, %v", t0, nodeInfoList) } t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic_p1_r3, 0) test.Nil(t, coordErr) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) test.Equal(t, len(tc0.topicInfo.ISR), 3) err = lookupCoord1.CreateTopic(topic_p3_r1, TopicMetaInfo{3, 1, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*2) waitClusterStable(lookupCoord1, time.Second*5) pmeta, _, err = lookupLeadership.GetTopicMetaInfo(topic_p3_r1) pn = pmeta.PartitionNum test.Nil(t, err) test.Equal(t, pn, 3) t0, err = lookupLeadership.GetTopicInfo(topic_p3_r1, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 1) t.Logf("t0 leader is: %v", t0.Leader) if nodeInfoList[t0.Leader] == nil { t.Fatalf("no leader: %v, %v", t0, nodeInfoList) } t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic_p3_r1, 0) test.Nil(t, coordErr) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) test.Equal(t, len(tc0.topicInfo.ISR), 1) t1, err := lookupLeadership.GetTopicInfo(topic_p3_r1, 1) t1LeaderCoord := nodeInfoList[t1.Leader].nsqdCoord test.NotNil(t, t1LeaderCoord) tc1, coordErr := t1LeaderCoord.getTopicCoord(topic_p3_r1, 1) test.Nil(t, coordErr) test.Equal(t, tc1.topicInfo.Leader, t1.Leader) test.Equal(t, len(tc1.topicInfo.ISR), 1) err = lookupCoord1.CreateTopic(topic_p2_r2, TopicMetaInfo{2, 2, 0, 0, 0, 0}) test.Nil(t, err) waitClusterStable(lookupCoord1, time.Second*3) waitClusterStable(lookupCoord1, time.Second*5) pmeta, _, err = lookupLeadership.GetTopicMetaInfo(topic_p2_r2) pn = pmeta.PartitionNum test.Nil(t, err) test.Equal(t, pn, 2) t0, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 0) test.Nil(t, err) test.Equal(t, len(t0.ISR), 2) t.Logf("t0 leader is: %v", t0.Leader) if nodeInfoList[t0.Leader] == nil { t.Fatalf("no leader: %v, %v", t0, nodeInfoList) } t0LeaderCoord = nodeInfoList[t0.Leader].nsqdCoord test.NotNil(t, t0LeaderCoord) tc0, coordErr = t0LeaderCoord.getTopicCoord(topic_p2_r2, 0) test.Nil(t, coordErr) test.Equal(t, tc0.topicInfo.Leader, t0.Leader) test.Equal(t, len(tc0.topicInfo.ISR), 2) t1, err = lookupLeadership.GetTopicInfo(topic_p2_r2, 1) t1LeaderCoord = nodeInfoList[t1.Leader].nsqdCoord test.NotNil(t, t1LeaderCoord) tc1, coordErr = t1LeaderCoord.getTopicCoord(topic_p2_r2, 1) test.Nil(t, coordErr) test.Equal(t, tc1.topicInfo.Leader, t1.Leader) test.Equal(t, len(tc1.topicInfo.ISR), 2) // test create on exist topic, create on partial partition oldMeta, _, err := lookupCoord1.leadership.GetTopicMetaInfo(topic_p2_r2) test.Nil(t, err) err = lookupCoord1.CreateTopic(topic_p2_r2, TopicMetaInfo{2, 2, 0, 0, 1, 1}) test.NotNil(t, err) waitClusterStable(lookupCoord1, time.Second) waitClusterStable(lookupCoord1, time.Second*5) newMeta, _, err := lookupCoord1.leadership.GetTopicMetaInfo(topic_p2_r2) test.Nil(t, err) test.Equal(t, oldMeta, newMeta) }