Beispiel #1
0
// handleGetTopicConsumers is an HTTP request handler for `GET /topic/{topic}/consumers`
func (as *T) handleGetTopicConsumers(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()
	var err error

	topic := mux.Vars(r)[paramTopic]

	group := ""
	r.ParseForm()
	groups := r.Form[paramGroup]
	if len(groups) > 1 {
		err = fmt.Errorf("One consumer group is expected, but %d provided", len(groups))
		respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{err.Error()})
		return
	}
	if len(groups) == 1 {
		group = groups[0]
	}

	var consumers map[string]map[string][]int32
	if group == "" {
		consumers, err = as.admin.GetAllTopicConsumers(topic)
		if err != nil {
			respondWithJSON(w, http.StatusInternalServerError, errorHTTPResponse{err.Error()})
			return
		}
	} else {
		groupConsumers, err := as.admin.GetTopicConsumers(group, topic)
		if err != nil {
			if _, ok := err.(admin.ErrInvalidParam); ok {
				respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{err.Error()})
				return
			}
			respondWithJSON(w, http.StatusInternalServerError, errorHTTPResponse{err.Error()})
			return
		}
		consumers = make(map[string]map[string][]int32)
		if len(groupConsumers) != 0 {
			consumers[group] = groupConsumers
		}
	}

	encodedRes, err := json.MarshalIndent(consumers, "", "  ")
	if err != nil {
		log.Errorf("Failed to send HTTP reponse: status=%d, body=%v, reason=%v", http.StatusOK, encodedRes, err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	encodedRes = prettyfmt.CollapseJSON(encodedRes)

	w.Header().Add(headerContentType, "application/json")
	w.WriteHeader(http.StatusOK)
	if _, err := w.Write(encodedRes); err != nil {
		log.Errorf("Failed to send HTTP reponse: status=%d, body=%v, reason=%v", http.StatusOK, encodedRes, err)
	}
}
Beispiel #2
0
// respondWithJSON marshals `body` to a JSON string and sends it as an HTTP
// response body along with the specified `status` code.
func respondWithJSON(w http.ResponseWriter, status int, body interface{}) {
	encodedRes, err := json.MarshalIndent(body, "", "  ")
	if err != nil {
		log.Errorf("Failed to send HTTP reponse: status=%d, body=%v, reason=%v", status, body, err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	w.Header().Add(headerContentType, "application/json")
	w.WriteHeader(status)
	if _, err := w.Write(encodedRes); err != nil {
		log.Errorf("Failed to send HTTP reponse: status=%d, body=%v, reason=%v", status, body, err)
	}
}
Beispiel #3
0
// supervisor takes care of the service graceful shutdown.
func (s *Service) supervisor() {
	defer sarama.RootCID.NewChild("supervisor").LogScope()()
	var unixServerErrorCh <-chan error

	s.tcpServer.Start()
	if s.unixServer != nil {
		s.unixServer.Start()
		unixServerErrorCh = s.unixServer.ErrorCh()
	}
	// Block to wait for quit signal or an API server crash.
	select {
	case <-s.quitCh:
	case err, ok := <-s.tcpServer.ErrorCh():
		if ok {
			log.Errorf("Unix socket based HTTP API crashed, err=(%s)", err)
		}
	case err, ok := <-unixServerErrorCh:
		if ok {
			log.Errorf("TCP socket based HTTP API crashed, err=(%s)", err)
		}
	}
	// Initiate stop of all API servers.
	s.tcpServer.AsyncStop()
	if s.unixServer != nil {
		s.unixServer.AsyncStop()
	}
	// Wait until all API servers are stopped.
	for range s.tcpServer.ErrorCh() {
		// Drain the errors channel until it is closed.
	}
	if s.unixServer != nil {
		for range s.unixServer.ErrorCh() {
			// Drain the errors channel until it is closed.
		}
	}
	// There are no more requests in flight at this point so it is safe to stop
	// all Kafka clients.
	var wg sync.WaitGroup
	spawn(&wg, s.producer.Stop)
	spawn(&wg, s.consumer.Stop)
	spawn(&wg, s.admin.Stop)
	wg.Wait()
}
Beispiel #4
0
func main() {
	// Make go runtime execute in parallel as many goroutines as there are CPUs.
	runtime.GOMAXPROCS(runtime.NumCPU())

	if err := initLogging(); err != nil {
		fmt.Printf("Failed to initialize logger: err=(%s)\n", err)
		os.Exit(1)
	}

	if pidFile != "" {
		if err := writePID(pidFile); err != nil {
			log.Errorf("Failed to write PID file: err=(%s)", err)
			os.Exit(1)
		}
	}

	// Clean up the unix domain socket file in case we failed to clean up on
	// shutdown the last time. Otherwise the service won't be able to listen
	// on this address and as a result will fail to start up.
	if config.UnixAddr != "" {
		if err := os.Remove(config.UnixAddr); err != nil && !os.IsNotExist(err) {
			log.Errorf("Cannot remove %s: err=(%s)", config.UnixAddr, err)
		}
	}

	log.Infof("Starting with config: %+v", config)
	svc, err := pixy.SpawnService(config)
	if err != nil {
		log.Errorf("Failed to start service: err=(%s)", err)
		os.Exit(1)
	}

	// Spawn OS signal listener to ensure graceful stop.
	osSigCh := make(chan os.Signal, 1)
	signal.Notify(osSigCh, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)

	// Wait for a quit signal and terminate the service when it is received.
	<-osSigCh
	svc.Stop()
}
Beispiel #5
0
// handleProduceResult inspects a production results and if it is an error
// then logs it and flushes it down the `deadMessageCh` if one had been
// configured.
func (gp *GracefulProducer) handleProduceResult(cid *sarama.ContextID, result produceResult) {
	if replyCh, ok := result.Msg.Metadata.(chan produceResult); ok {
		replyCh <- result
	}
	if result.Err == nil {
		return
	}
	prodMsgRepr := fmt.Sprintf(`{Topic: "%s", Key: "%s", Value: "%s"}`,
		result.Msg.Topic, encoderRepr(result.Msg.Key), encoderRepr(result.Msg.Value))
	log.Errorf("<%v> Failed to submit message: msg=%v, err=(%s)",
		cid, prodMsgRepr, result.Err)
	if gp.deadMessageCh != nil {
		gp.deadMessageCh <- result.Msg
	}
}
Beispiel #6
0
// retry keeps calling the `f` function until it succeeds. `shouldRetry` is
// used to check the error code returned by `f` to decide whether it should be
// retried. If `shouldRetry` is not specified then any non `nil` error will
// result in retry.
func retry(f func() error, shouldRetry func(err error) bool, errorMsg string,
	delay time.Duration, cancelCh <-chan none) (canceled bool) {

	err := f()
	if shouldRetry == nil {
		shouldRetry = func(err error) bool { return err != nil }
	}
	for shouldRetry(err) {
		log.Errorf("%s: err=(%s), retryIn=%v", errorMsg, err, delay)
		select {
		case <-time.After(delay):
		case <-cancelCh:
			return true
		}
		err = f()
	}
	return false
}
Beispiel #7
0
func (gc *groupConsumer) managePartitions() {
	cid := gc.baseCID.NewChild("managePartitions")
	defer cid.LogScope()()
	var (
		topicConsumers                = make(map[string]*topicConsumer)
		topics                        []string
		memberSubscriptions           map[string][]string
		ok                            = true
		nilOrRetryCh                  <-chan time.Time
		nilOrRegistryTopicsCh         chan<- []string
		shouldRebalance, canRebalance = false, true
		rebalanceResultCh             = make(chan error, 1)
	)
	for {
		select {
		case tc := <-gc.addTopicConsumerCh:
			topicConsumers[tc.topic] = tc
			topics = listTopics(topicConsumers)
			nilOrRegistryTopicsCh = gc.registry.topics()
			continue
		case tc := <-gc.deleteTopicConsumerCh:
			delete(topicConsumers, tc.topic)
			topics = listTopics(topicConsumers)
			nilOrRegistryTopicsCh = gc.registry.topics()
			continue
		case nilOrRegistryTopicsCh <- topics:
			nilOrRegistryTopicsCh = nil
			continue
		case memberSubscriptions, ok = <-gc.registry.membershipChanges():
			if !ok {
				goto done
			}
			nilOrRetryCh = nil
			shouldRebalance = true
		case err := <-rebalanceResultCh:
			canRebalance = true
			if err != nil {
				log.Errorf("<%s> rebalance failed: err=(%s)", cid, err)
				nilOrRetryCh = time.After(gc.config.Consumer.BackOffTimeout)
				continue
			}
		case <-nilOrRetryCh:
			shouldRebalance = true
		}

		if shouldRebalance && canRebalance {
			// Copy topicConsumers to make sure `rebalance` doesn't see any
			// changes we make while it is running.
			topicConsumerCopy := make(map[string]*topicConsumer, len(topicConsumers))
			for topic, tc := range topicConsumers {
				topicConsumerCopy[topic] = tc
			}
			go gc.rebalance(topicConsumerCopy, memberSubscriptions, rebalanceResultCh)
			shouldRebalance, canRebalance = false, false
		}
	}
done:
	var wg sync.WaitGroup
	for _, tg := range gc.topicGears {
		tg := tg
		spawn(&wg, func() { gc.rewireMultiplexer(tg, nil) })
	}
	wg.Wait()
}
Beispiel #8
0
func (gc *groupConsumer) managePartitions() {
	cid := gc.baseCID.NewChild("managePartitions")
	defer cid.LogScope()()
	var (
		topicConsumers                = make(map[string]*topicConsumer)
		topics                        []string
		subscriptions                 map[string][]string
		ok                            = true
		nilOrRetryCh                  <-chan time.Time
		nilOrRegistryTopicsCh         chan<- []string
		shouldRebalance, canRebalance = false, true
		rebalanceResultCh             = make(chan error, 1)
	)
	for {
		select {
		case tc := <-gc.topicConsumerLifespanCh:
			// It is assumed that only one topicConsumer can exist for a
			// particular topic at a time.
			if topicConsumers[tc.topic] == tc {
				delete(topicConsumers, tc.topic)
			} else {
				topicConsumers[tc.topic] = tc
			}
			topics = listTopics(topicConsumers)
			nilOrRegistryTopicsCh = gc.registry.topics()
			continue
		case nilOrRegistryTopicsCh <- topics:
			nilOrRegistryTopicsCh = nil
			continue
		case subscriptions, ok = <-gc.registry.membershipChanges():
			if !ok {
				goto done
			}
			nilOrRetryCh = nil
			shouldRebalance = true
		case err := <-rebalanceResultCh:
			canRebalance = true
			if err != nil {
				log.Errorf("<%s> rebalance failed: err=(%s)", cid, err)
				nilOrRetryCh = time.After(gc.cfg.Consumer.BackOffTimeout)
				continue
			}
		case <-nilOrRetryCh:
			shouldRebalance = true
		}

		if shouldRebalance && canRebalance {
			// Copy topicConsumers to make sure `rebalance` doesn't see any
			// changes we make while it is running.
			topicConsumerCopy := make(map[string]*topicConsumer, len(topicConsumers))
			for topic, tc := range topicConsumers {
				topicConsumerCopy[topic] = tc
			}
			go gc.rebalance(topicConsumerCopy, subscriptions, rebalanceResultCh)
			shouldRebalance, canRebalance = false, false
		}
	}
done:
	var wg sync.WaitGroup
	for _, tcg := range gc.topicConsumerGears {
		spawn(&wg, tcg.stop)
	}
	wg.Wait()
}