func (s *httpServer) deleteTopicHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } registrations := s.context.nsqlookupd.DB.FindRegistrations("channel", topicName, "*") for _, registration := range registrations { log.Printf("DB: removing channel(%s) from topic(%s)", registration.SubKey, topicName) s.context.nsqlookupd.DB.RemoveRegistration(registration) } registrations = s.context.nsqlookupd.DB.FindRegistrations("topic", topicName, "") for _, registration := range registrations { log.Printf("DB: removing topic(%s)", topicName) s.context.nsqlookupd.DB.RemoveRegistration(registration) } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) deleteChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } topic, err := s.context.nsqd.GetExistingTopic(topicName) if err != nil { util.ApiResponse(w, 500, "INVALID_TOPIC", nil) return } err = topic.DeleteExistingChannel(channelName) if err != nil { util.ApiResponse(w, 500, "INVALID_CHANNEL", nil) return } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) tombstoneTopicProducerHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } node, err := reqParams.Get("node") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_NODE", nil) return } log.Printf("DB: setting tombstone for producer@%s of topic(%s)", node, topicName) producers := s.context.nsqlookupd.DB.FindProducers("topic", topicName, "") for _, p := range producers { thisNode := fmt.Sprintf("%s:%d", p.peerInfo.BroadcastAddress, p.peerInfo.HttpPort) if thisNode == node { p.Tombstone() } } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) deleteChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } registrations := s.context.nsqlookupd.DB.FindRegistrations("channel", topicName, channelName) if len(registrations) == 0 { util.ApiResponse(w, 404, "NOT_FOUND", nil) return } log.Printf("DB: removing channel(%s) from topic(%s)", channelName, topicName) for _, registration := range registrations { s.context.nsqlookupd.DB.RemoveRegistration(registration) } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) lookupHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } registration := s.context.nsqlookupd.DB.FindRegistrations("topic", topicName, "") if len(registration) == 0 { util.ApiResponse(w, 500, "INVALID_ARG_TOPIC", nil) return } channels := s.context.nsqlookupd.DB.FindRegistrations("channel", topicName, "*").SubKeys() producers := s.context.nsqlookupd.DB.FindProducers("topic", topicName, "") producers = producers.FilterByActive(s.context.nsqlookupd.inactiveProducerTimeout, s.context.nsqlookupd.tombstoneLifetime) data := make(map[string]interface{}) data["channels"] = channels data["producers"] = producers.PeerInfo() util.ApiResponse(w, 200, "OK", data) }
// this endpoint works by giving out an ID that maps to a stats dictionary // The initial request is the number of messages processed since each nsqd started up. // Subsequent requsts pass that ID and get an updated delta based on each individual channel/nsqd message count // That ID must be re-requested or it will be expired. func counterDataHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } statsID, _ := reqParams.Get("id") now := time.Now() if statsID == "" { // make a new one statsID = fmt.Sprintf("%d.%d", now.Unix(), now.UnixNano()) } stats, ok := globalCounters[statsID] if !ok { stats = make(map[string]int64) } newStats := make(map[string]int64) newStats["time"] = now.Unix() producers, _ := lookupd.GetLookupdProducers(lookupdHTTPAddrs) addresses := make([]string, len(producers)) for i, p := range producers { addresses[i] = p.HTTPAddress() } _, channelStats, _ := lookupd.GetNSQDStats(addresses, "") var newMessages int64 var totalMessages int64 for _, channelStats := range channelStats { for _, hostChannelStats := range channelStats.HostStats { key := fmt.Sprintf("%s:%s:%s", channelStats.TopicName, channelStats.ChannelName, hostChannelStats.HostAddress) d, ok := stats[key] if ok && d <= hostChannelStats.MessageCount { newMessages += (hostChannelStats.MessageCount - d) } totalMessages += hostChannelStats.MessageCount newStats[key] = hostChannelStats.MessageCount } } globalCounters[statsID] = newStats data := make(map[string]interface{}) data["new_messages"] = newMessages data["total_messages"] = totalMessages data["id"] = statsID util.ApiResponse(w, 200, "OK", data) }
func (s *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/ping": s.pingHandler(w, req) case "/info": s.infoHandler(w, req) case "/lookup": s.lookupHandler(w, req) case "/topics": s.topicsHandler(w, req) case "/channels": s.channelsHandler(w, req) case "/nodes": s.nodesHandler(w, req) case "/delete_topic": s.deleteTopicHandler(w, req) case "/delete_channel": s.deleteChannelHandler(w, req) case "/tombstone_topic_producer": s.tombstoneTopicProducerHandler(w, req) case "/create_topic": s.createTopicHandler(w, req) case "/create_channel": s.createChannelHandler(w, req) case "/debug": s.debugHandler(w, req) default: util.ApiResponse(w, 404, "NOT_FOUND", nil) } }
func (s *httpServer) debugHandler(w http.ResponseWriter, req *http.Request) { s.context.nsqlookupd.DB.RLock() defer s.context.nsqlookupd.DB.RUnlock() data := make(map[string][]map[string]interface{}) for r, producers := range s.context.nsqlookupd.DB.registrationMap { key := r.Category + ":" + r.Key + ":" + r.SubKey data[key] = make([]map[string]interface{}, 0) for _, p := range producers { m := make(map[string]interface{}) m["id"] = p.peerInfo.id m["address"] = p.peerInfo.Address //TODO: remove for 1.0 m["hostname"] = p.peerInfo.Hostname m["broadcast_address"] = p.peerInfo.BroadcastAddress m["tcp_port"] = p.peerInfo.TcpPort m["http_port"] = p.peerInfo.HttpPort m["version"] = p.peerInfo.Version m["last_update"] = p.peerInfo.lastUpdate.UnixNano() m["tombstoned"] = p.tombstoned m["tombstoned_at"] = p.tombstonedAt.UnixNano() data[key] = append(data[key], m) } } util.ApiResponse(w, 200, "OK", data) }
func (s *httpServer) infoHandler(w http.ResponseWriter, req *http.Request) { util.ApiResponse(w, 200, "OK", struct { Version string `json:"version"` }{ Version: util.BINARY_VERSION, }) }
func (s *httpServer) channelsHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } channels := s.context.nsqlookupd.DB.FindRegistrations("channel", topicName, "*").SubKeys() data := make(map[string]interface{}) data["channels"] = channels util.ApiResponse(w, 200, "OK", data) }
func (s *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/pub": fallthrough case "/put": s.putHandler(w, req) case "/mpub": fallthrough case "/mput": s.mputHandler(w, req) case "/stats": s.statsHandler(w, req) case "/ping": s.pingHandler(w, req) case "/info": s.infoHandler(w, req) case "/empty_topic": s.emptyTopicHandler(w, req) case "/delete_topic": s.deleteTopicHandler(w, req) case "/empty_channel": s.emptyChannelHandler(w, req) case "/delete_channel": s.deleteChannelHandler(w, req) case "/pause_channel": s.pauseChannelHandler(w, req) case "/unpause_channel": s.pauseChannelHandler(w, req) case "/create_topic": s.createTopicHandler(w, req) case "/create_channel": s.createChannelHandler(w, req) case "/debug/pprof": httpprof.Index(w, req) case "/debug/pprof/cmdline": httpprof.Cmdline(w, req) case "/debug/pprof/symbol": httpprof.Symbol(w, req) case "/debug/pprof/heap": httpprof.Handler("heap").ServeHTTP(w, req) case "/debug/pprof/goroutine": httpprof.Handler("goroutine").ServeHTTP(w, req) case "/debug/pprof/profile": httpprof.Profile(w, req) case "/debug/pprof/block": httpprof.Handler("block").ServeHTTP(w, req) case "/debug/pprof/threadcreate": httpprof.Handler("threadcreate").ServeHTTP(w, req) default: log.Printf("ERROR: 404 %s", req.URL.Path) util.ApiResponse(w, 404, "NOT_FOUND", nil) } }
func (s *httpServer) emptyTopicHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } if !nsq.IsValidTopicName(topicName) { util.ApiResponse(w, 500, "INVALID_TOPIC", nil) return } topic, err := s.context.nsqd.GetExistingTopic(topicName) if err != nil { util.ApiResponse(w, 500, "INVALID_TOPIC", nil) return } err = topic.Empty() if err != nil { util.ApiResponse(w, 500, "INTERNAL_ERROR", nil) return } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) createChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } log.Printf("DB: adding channel(%s) in topic(%s)", channelName, topicName) key := Registration{"channel", topicName, channelName} s.context.nsqlookupd.DB.AddRegistration(key) log.Printf("DB: adding topic(%s)", topicName) key = Registration{"topic", topicName, ""} s.context.nsqlookupd.DB.AddRegistration(key) util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) pauseChannelHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, channelName, err := util.GetTopicChannelArgs(reqParams) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } topic, err := s.context.nsqd.GetExistingTopic(topicName) if err != nil { util.ApiResponse(w, 500, "INVALID_TOPIC", nil) return } channel, err := topic.GetExistingChannel(channelName) if err != nil { util.ApiResponse(w, 500, "INVALID_CHANNEL", nil) return } if strings.HasPrefix(req.URL.Path, "/pause") { err = channel.Pause() } else { err = channel.UnPause() } if err != nil { log.Printf("ERROR: failure in %s - %s", req.URL.Path, err.Error()) } util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) createTopicHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } topicName, err := reqParams.Get("topic") if err != nil { util.ApiResponse(w, 500, "MISSING_ARG_TOPIC", nil) return } if !nsq.IsValidTopicName(topicName) { util.ApiResponse(w, 500, "INVALID_TOPIC", nil) return } log.Printf("DB: adding topic(%s)", topicName) key := Registration{"topic", topicName, ""} s.context.nsqlookupd.DB.AddRegistration(key) util.ApiResponse(w, 200, "OK", nil) }
func (s *httpServer) putHandler(w http.ResponseWriter, req *http.Request) { if req.Method != "POST" { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } // TODO: one day I'd really like to just error on chunked requests // to be able to fail "too big" requests before we even read if req.ContentLength > s.context.nsqd.options.maxMessageSize { util.ApiResponse(w, 500, "MSG_TOO_BIG", nil) return } // add 1 so that it's greater than our max when we test for it // (LimitReader returns a "fake" EOF) readMax := s.context.nsqd.options.maxMessageSize + 1 body, err := ioutil.ReadAll(io.LimitReader(req.Body, readMax)) if err != nil { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } if int64(len(body)) == readMax { log.Printf("ERROR: /put hit max message size") util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } if len(body) == 0 { util.ApiResponse(w, 500, "MSG_EMPTY", nil) return } _, topic, err := s.getTopicFromQuery(req) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } msg := nsq.NewMessage(<-s.context.nsqd.idChan, body) err = topic.PutMessage(msg) if err != nil { util.ApiResponse(w, 500, "NOK", nil) return } w.Header().Set("Content-Length", "2") io.WriteString(w, "OK") }
func (s *httpServer) nodesHandler(w http.ResponseWriter, req *http.Request) { // dont filter out tombstoned nodes producers := s.context.nsqlookupd.DB.FindProducers("client", "", "").FilterByActive(s.context.nsqlookupd.inactiveProducerTimeout, 0) nodes := make([]*node, len(producers)) for i, p := range producers { topics := s.context.nsqlookupd.DB.LookupRegistrations(p.peerInfo.id).Filter("topic", "*", "").Keys() // for each topic find the producer that matches this peer // to add tombstone information tombstones := make([]bool, len(topics)) for j, t := range topics { topicProducers := s.context.nsqlookupd.DB.FindProducers("topic", t, "") for _, tp := range topicProducers { if tp.peerInfo == p.peerInfo { tombstones[j] = tp.IsTombstoned(s.context.nsqlookupd.tombstoneLifetime) } } } nodes[i] = &node{ RemoteAddress: p.peerInfo.RemoteAddress, Address: p.peerInfo.Address, //TODO: drop for 1.0 Hostname: p.peerInfo.Hostname, BroadcastAddress: p.peerInfo.BroadcastAddress, TcpPort: p.peerInfo.TcpPort, HttpPort: p.peerInfo.HttpPort, Version: p.peerInfo.Version, Tombstones: tombstones, Topics: topics, } } data := make(map[string]interface{}) data["producers"] = nodes util.ApiResponse(w, 200, "OK", data) }
func (s *httpServer) topicsHandler(w http.ResponseWriter, req *http.Request) { topics := s.context.nsqlookupd.DB.FindRegistrations("topic", "*", "").Keys() data := make(map[string]interface{}) data["topics"] = topics util.ApiResponse(w, 200, "OK", data) }
func (s *httpServer) mputHandler(w http.ResponseWriter, req *http.Request) { var msgs []*nsq.Message var exit bool if req.Method != "POST" { util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } // TODO: one day I'd really like to just error on chunked requests // to be able to fail "too big" requests before we even read if req.ContentLength > s.context.nsqd.options.maxBodySize { util.ApiResponse(w, 500, "BODY_TOO_BIG", nil) return } reqParams, topic, err := s.getTopicFromQuery(req) if err != nil { util.ApiResponse(w, 500, err.Error(), nil) return } _, ok := reqParams["binary"] if ok { tmp := make([]byte, 4) msgs, err = readMPUB(req.Body, tmp, s.context.nsqd.idChan, s.context.nsqd.options.maxMessageSize) if err != nil { util.ApiResponse(w, 500, err.(*util.FatalClientErr).Code[2:], nil) return } } else { // add 1 so that it's greater than our max when we test for it // (LimitReader returns a "fake" EOF) readMax := s.context.nsqd.options.maxBodySize + 1 rdr := bufio.NewReader(io.LimitReader(req.Body, readMax)) total := 0 for !exit { block, err := rdr.ReadBytes('\n') if err != nil { if err != io.EOF { util.ApiResponse(w, 500, "INTERNAL_ERROR", nil) return } exit = true } total += len(block) if int64(total) == readMax { log.Printf("ERROR: /mput hit max body size") continue } if len(block) > 0 && block[len(block)-1] == '\n' { block = block[:len(block)-1] } // silently discard 0 length messages // this maintains the behavior pre 0.2.22 if len(block) == 0 { continue } if int64(len(block)) > s.context.nsqd.options.maxMessageSize { util.ApiResponse(w, 500, "MSG_TOO_BIG", nil) return } msg := nsq.NewMessage(<-s.context.nsqd.idChan, block) msgs = append(msgs, msg) } } err = topic.PutMessages(msgs) if err != nil { util.ApiResponse(w, 500, "NOK", nil) return } w.Header().Set("Content-Length", "2") io.WriteString(w, "OK") }
func (s *httpServer) statsHandler(w http.ResponseWriter, req *http.Request) { reqParams, err := util.NewReqParams(req) if err != nil { log.Printf("ERROR: failed to parse request params - %s", err.Error()) util.ApiResponse(w, 500, "INVALID_REQUEST", nil) return } formatString, _ := reqParams.Get("format") jsonFormat := formatString == "json" now := time.Now() if !jsonFormat { io.WriteString(w, fmt.Sprintf("%s\n", util.Version("nsqd"))) } stats := s.context.nsqd.getStats() if jsonFormat { util.ApiResponse(w, 200, "OK", struct { Topics []TopicStats `json:"topics"` }{stats}) } else { if len(stats) == 0 { io.WriteString(w, "\nNO_TOPICS\n") return } for _, t := range stats { io.WriteString(w, fmt.Sprintf("\n[%-15s] depth: %-5d be-depth: %-5d msgs: %-8d e2e%%: %s\n", t.TopicName, t.Depth, t.BackendDepth, t.MessageCount, t.E2eProcessingLatency)) for _, c := range t.Channels { var pausedPrefix string if c.Paused { pausedPrefix = " *P " } else { pausedPrefix = " " } io.WriteString(w, fmt.Sprintf("%s[%-25s] depth: %-5d be-depth: %-5d inflt: %-4d def: %-4d re-q: %-5d timeout: %-5d msgs: %-8d e2e%%: %s\n", pausedPrefix, c.ChannelName, c.Depth, c.BackendDepth, c.InFlightCount, c.DeferredCount, c.RequeueCount, c.TimeoutCount, c.MessageCount, c.E2eProcessingLatency)) for _, client := range c.Clients { connectTime := time.Unix(client.ConnectTime, 0) // truncate to the second duration := time.Duration(int64(now.Sub(connectTime).Seconds())) * time.Second _, port, _ := net.SplitHostPort(client.RemoteAddress) io.WriteString(w, fmt.Sprintf(" [%s %-21s] state: %d inflt: %-4d rdy: %-4d fin: %-8d re-q: %-8d msgs: %-8d connected: %s\n", client.Version, fmt.Sprintf("%s:%s", client.Name, port), client.State, client.InFlightCount, client.ReadyCount, client.FinishCount, client.RequeueCount, client.MessageCount, duration, )) } } } } }