コード例 #1
0
ファイル: handler_man_pub.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest GET /v1/raw/msgs/:topic/:ver
// tells client how to pub in raw mode: how to connect directly to kafka
func (this *manServer) pubRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	ver := params.ByName(UrlParamVersion)
	topic := params.ByName(UrlParamTopic)
	appid := r.Header.Get(HttpHeaderAppid)
	realIp := getHttpRemoteIp(r)

	if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {
		log.Error("pub raw[%s] %s(%s) {topic:%s, ver:%s}: %s",
			appid, r.RemoteAddr, realIp, topic, ver, err)

		writeAuthFailure(w, err)
		return
	}

	cluster, found := manager.Default.LookupCluster(appid)
	if !found {
		writeBadRequest(w, "invalid appid")
		return
	}

	log.Info("pub raw[%s] %s(%s) {topic:%s ver:%s}", appid, r.RemoteAddr, realIp, topic, ver)

	var out = map[string]string{
		"store":   store.DefaultPubStore.Name(),
		"topic":   manager.Default.KafkaTopic(appid, topic, ver),
		"brokers": strings.Join(meta.Default.ZkCluster(cluster).OnlyNamedBrokerList(), ","),
	}
	b, _ := json.Marshal(out)
	w.Write(b)
}
コード例 #2
0
ファイル: handler_job.go プロジェクト: funkygao/gafka
// DELETE /v1/jobs/:topic/:ver?id=22323
func (this *pubServer) deleteJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	appid := r.Header.Get(HttpHeaderAppid)
	topic := params.ByName(UrlParamTopic)
	ver := params.ByName(UrlParamVersion)
	realIp := getHttpRemoteIp(r)
	if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {
		log.Error("-job[%s] %s(%s) {topic:%s, ver:%s} %s",
			appid, r.RemoteAddr, realIp, topic, ver, err)

		writeAuthFailure(w, err)
		return
	}

	_, found := manager.Default.LookupCluster(appid)
	if !found {
		log.Error("-job[%s] %s(%s) {topic:%s, ver:%s} cluster not found",
			appid, r.RemoteAddr, realIp, topic, ver)

		writeBadRequest(w, "invalid appid")
		return
	}

	jobId := r.URL.Query().Get("id")
	if len(jobId) < 18 { // jobId e,g. 341647700585877504
		writeBadRequest(w, "invalid job id")
		return
	}

	if err := job.Default.Delete(appid, manager.Default.KafkaTopic(appid, topic, ver), jobId); err != nil {
		if err == job.ErrNothingDeleted {
			// race failed, actor worker wins
			log.Warn("-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v",
				appid, r.RemoteAddr, realIp, topic, ver, jobId, err)

			w.WriteHeader(http.StatusConflict)
			w.Write([]byte{})
			return
		}

		log.Error("-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v",
			appid, r.RemoteAddr, realIp, topic, ver, jobId, err)

		writeServerError(w, err.Error())
		return
	}

	if Options.AuditPub {
		this.auditor.Trace("-job[%s] %s(%s) {topic:%s ver:%s UA:%s jid:%s}",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), jobId)
	}

	w.Write(ResponseOk)
}
コード例 #3
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest POST /v1/jobs/:appid/:topic/:ver
func (this *manServer) createJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	topic := params.ByName(UrlParamTopic)
	if !manager.Default.ValidateTopicName(topic) {
		log.Warn("illegal topic: %s", topic)

		writeBadRequest(w, "illegal topic")
		return
	}

	realIp := getHttpRemoteIp(r)

	if !this.throttleAddTopic.Pour(realIp, 1) {
		writeQuotaExceeded(w)
		return
	}

	hisAppid := params.ByName(UrlParamAppid)
	appid := r.Header.Get(HttpHeaderAppid)
	pubkey := r.Header.Get(HttpHeaderPubkey)
	ver := params.ByName(UrlParamVersion)
	if !manager.Default.AuthAdmin(appid, pubkey) {
		log.Warn("suspicous create job %s(%s) {appid:%s pubkey:%s topic:%s ver:%s}",
			r.RemoteAddr, realIp, appid, pubkey, topic, ver)

		writeAuthFailure(w, manager.ErrAuthenticationFail)
		return
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("create job %s(%s) {appid:%s topic:%s ver:%s} invalid appid",
			r.RemoteAddr, realIp, hisAppid, topic, ver)

		writeBadRequest(w, "invalid appid")
		return
	}

	log.Info("create job[%s] %s(%s) {appid:%s topic:%s ver:%s}",
		appid, r.RemoteAddr, realIp, hisAppid, topic, ver)

	rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	if err := job.Default.CreateJobQueue(Options.AssignJobShardId, hisAppid, rawTopic); err != nil {
		log.Error("create job[%s] %s(%s) {shard:%d appid:%s topic:%s ver:%s} %v",
			appid, r.RemoteAddr, realIp, Options.AssignJobShardId, hisAppid, topic, ver, err)

		writeServerError(w, err.Error())
		return
	}

	if err := this.gw.zkzone.CreateJobQueue(rawTopic, cluster); err != nil {
		log.Error("app[%s] %s(%s) create job: {shard:%d appid:%s topic:%s ver:%s} %v",
			appid, r.RemoteAddr, realIp, Options.AssignJobShardId, hisAppid, topic, ver, err)

		writeServerError(w, err.Error())
		return
	}

	w.WriteHeader(http.StatusCreated)
	w.Write(ResponseOk)
}
コード例 #4
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest GET /v1/schema/:appid/:topic/:ver
func (this *manServer) schemaHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	hisAppid := params.ByName(UrlParamAppid)
	myAppid := r.Header.Get(HttpHeaderAppid)
	topic := params.ByName(UrlParamTopic)
	ver := params.ByName(UrlParamVersion)
	realIp := getHttpRemoteIp(r)

	log.Info("schema[%s] %s(%s) {app:%s topic:%s ver:%s UA:%s}",
		myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

	// TODO authorization

	_, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		writeBadRequest(w, "invalid appid")
		return
	}

	// TODO lookup from manager and send reponse
	schema, err := manager.Default.TopicSchema(hisAppid, topic, ver)
	if err != nil {
		writeBadRequest(w, err.Error())
		return
	}

	w.Write([]byte(strings.TrimSpace(schema)))
}
コード例 #5
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest GET /v1/partitions/:appid/:topic/:ver
func (this *manServer) partitionsHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	topic := params.ByName(UrlParamTopic)
	hisAppid := params.ByName(UrlParamAppid)
	appid := r.Header.Get(HttpHeaderAppid)
	pubkey := r.Header.Get(HttpHeaderPubkey)
	ver := params.ByName(UrlParamVersion)
	realIp := getHttpRemoteIp(r)

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("partitions[%s] %s(%s) {app:%s topic:%s ver:%s} invalid appid",
			appid, r.RemoteAddr, realIp, hisAppid, topic, ver)

		writeBadRequest(w, "invalid appid")
		return
	}

	if !manager.Default.AuthAdmin(appid, pubkey) {
		log.Warn("suspicous partitions call from %s(%s) {cluster:%s app:%s key:%s topic:%s ver:%s}",
			r.RemoteAddr, realIp, cluster, appid, pubkey, topic, ver)

		writeAuthFailure(w, manager.ErrAuthenticationFail)
		return
	}

	log.Info("partitions[%s] %s(%s) {cluster:%s app:%s topic:%s ver:%s}",
		appid, r.RemoteAddr, realIp, cluster, hisAppid, topic, ver)

	zkcluster := meta.Default.ZkCluster(cluster)
	if zkcluster == nil {
		log.Error("suspicous partitions call from %s(%s) {cluster:%s app:%s key:%s topic:%s ver:%s} undefined cluster",
			r.RemoteAddr, realIp, cluster, appid, pubkey, topic, ver)

		writeBadRequest(w, "undefined cluster")
		return
	}

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), sarama.NewConfig())
	if err != nil {
		log.Error("cluster[%s] %v", zkcluster.Name(), err)

		writeServerError(w, err.Error())
		return
	}
	defer kfk.Close()

	partitions, err := kfk.Partitions(manager.Default.KafkaTopic(hisAppid, topic, ver))
	if err != nil {
		log.Error("cluster[%s] from %s(%s) {app:%s topic:%s ver:%s} %v",
			zkcluster.Name(), r.RemoteAddr, realIp, hisAppid, topic, ver, err)

		writeServerError(w, err.Error())
		return
	}

	w.Write([]byte(fmt.Sprintf(`{"num": %d}`, len(partitions))))
}
コード例 #6
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest DELETE /v1/jobs/:appid/:topic/:ver?group=xx
func (this *manServer) deleteWebhookHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	topic := params.ByName(UrlParamTopic)
	if !manager.Default.ValidateTopicName(topic) {
		log.Warn("illegal topic: %s", topic)

		writeBadRequest(w, "illegal topic")
		return
	}

	query := r.URL.Query()
	group := query.Get("group")
	realIp := getHttpRemoteIp(r)
	hisAppid := params.ByName(UrlParamAppid)
	myAppid := r.Header.Get(HttpHeaderAppid)
	ver := params.ByName(UrlParamVersion)

	if err := manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		log.Error("+webhook[%s/%s] -(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeAuthFailure(w, err)
		return
	}

	/*
		cluster, found := manager.Default.LookupCluster(hisAppid)
		if !found {
			log.Error("+webhook[%s/%s] -(%s): {%s.%s.%s UA:%s} undefined cluster",
				myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

			writeBadRequest(w, "invalid appid")
			return
		}

		log.Info("+webhook[%s/%s] %s(%s): {%s.%s.%s UA:%s}",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

		rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	*/

}
コード例 #7
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest PUT /v1/options/:option/:value
func (this *manServer) setOptionHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	option := params.ByName("option")
	value := params.ByName("value")
	boolVal := value == "true"

	// TODO auth

	switch option {
	case "debug":
		Options.Debug = boolVal

	case "nometrics":
		Options.DisableMetrics = boolVal

	case "gzip":
		Options.EnableGzip = boolVal

	case "badgroup_rater":
		Options.BadGroupRateLimit = boolVal

	case "badpub_rater":
		Options.BadPubAppRateLimit = boolVal

	case "refreshdb":
		manager.Default.ForceRefresh()

	case "ratelimit":
		Options.Ratelimit = boolVal

	case "resethh":
		hh.Default.ResetCounters()

	case "hh":
		Options.EnableHintedHandoff = boolVal
		if !boolVal {
			hh.Default.Stop()
			w.Write([]byte(fmt.Sprintf("id:%s hh[%s] stopped", Options.Id, hh.Default.Name())))
			return
		} else {
			if err := hh.Default.Start(); err != nil {
				writeServerError(w, err.Error())
				return
			} else {
				w.Write([]byte(fmt.Sprintf("id:%s hh[%s] started", Options.Id, hh.Default.Name())))
				return
			}
		}

	case "hhflush":
		if boolVal {
			if hh.Default == nil {
				writeServerError(w, "no underlying hinted handoff")
			} else if Options.EnableHintedHandoff {
				writeBadRequest(w, "turn off hinted handoff first")
			} else {
				hh.Default.FlushInflights()
				w.Write([]byte(fmt.Sprintf("id:%s hh[%s] inflights flushed", Options.Id, hh.Default.Name())))
			}
			return
		}

	case "jobshardid":
		shardId, err := strconv.Atoi(value)
		if err != nil {
			writeBadRequest(w, "invalid job shard id")
			return
		} else {
			Options.AssignJobShardId = shardId
		}

	case "punish":
		d, err := time.ParseDuration(value)
		if err != nil {
			writeBadRequest(w, err.Error())
			return
		} else {
			Options.BadClientPunishDuration = d
		}

	case "500backoff":
		d, err := time.ParseDuration(value)
		if err != nil {
			writeBadRequest(w, err.Error())
			return
		} else {
			Options.InternalServerErrorBackoff = d
		}

	case "auditpub":
		Options.AuditPub = boolVal

	case "auditsub":
		Options.AuditSub = boolVal

	case "allhh":
		Options.AllwaysHintedHandoff = boolVal

	case "standbysub":
		Options.PermitStandbySub = boolVal

	case "unregroup":
		Options.PermitUnregisteredGroup = boolVal
		manager.Default.AllowSubWithUnregisteredGroup(boolVal)

	case "loglevel":
		logLevel = toLogLevel(value)
		for _, filter := range log.Global {
			filter.Level = logLevel
		}

	case "maxreq":
		Options.MaxRequestPerConn, _ = strconv.Atoi(value)

	case "accesslog":
		if Options.EnableAccessLog != boolVal {
			// on/off switching
			if boolVal {
				this.gw.accessLogger.Start()
			} else {
				this.gw.accessLogger.Stop()
			}
		}
		Options.EnableAccessLog = boolVal

	default:
		log.Warn("invalid option:%s=%s", option, value)
		writeBadRequest(w, "invalid option")
		return
	}

	log.Info("option %s(%s) %s to %s, %#v", r.RemoteAddr, getHttpRemoteIp(r), option, value, Options)

	w.Write(ResponseOk)
}
コード例 #8
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest PUT /v1/topics/:appid/:topic/:ver?partitions=1&retention.hours=72&retention.bytes=-1
func (this *manServer) alterTopicHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	topic := params.ByName(UrlParamTopic)
	if !manager.Default.ValidateTopicName(topic) {
		log.Warn("illegal topic: %s", topic)

		writeBadRequest(w, "illegal topic")
		return
	}

	realIp := getHttpRemoteIp(r)

	if !this.throttleAddTopic.Pour(realIp, 1) {
		writeQuotaExceeded(w)
		return
	}

	hisAppid := params.ByName(UrlParamAppid)
	appid := r.Header.Get(HttpHeaderAppid)
	pubkey := r.Header.Get(HttpHeaderPubkey)
	ver := params.ByName(UrlParamVersion)
	if !manager.Default.AuthAdmin(appid, pubkey) {
		log.Warn("suspicous alter topic from %s(%s) {appid:%s pubkey:%s topic:%s ver:%s}",
			r.RemoteAddr, realIp, appid, pubkey, topic, ver)

		writeAuthFailure(w, manager.ErrAuthenticationFail)
		return
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("alter topic[%s] %s(%s) {app:%s topic:%s ver:%s} invalid appid",
			appid, r.RemoteAddr, realIp, hisAppid, topic, ver)

		writeBadRequest(w, "invalid appid")
		return
	}

	zkcluster := meta.Default.ZkCluster(cluster)
	if zkcluster == nil {
		log.Error("alter topic from %s(%s) {appid:%s pubkey:%s cluster:%s topic:%s ver:%s} undefined cluster",
			r.RemoteAddr, realIp, appid, pubkey, cluster, topic, ver)

		writeBadRequest(w, "undefined cluster")
		return
	}

	info := zkcluster.RegisteredInfo()
	if !info.Public {
		log.Warn("app[%s] alter topic:%s in non-public cluster: %+v", hisAppid, topic, params)

		writeBadRequest(w, "invalid cluster")
		return
	}

	ts := sla.DefaultSla()
	query := r.URL.Query()
	if partitionsArg := query.Get(sla.SlaKeyPartitions); partitionsArg != "" {
		ts.Partitions, _ = strconv.Atoi(partitionsArg)
	}
	if retentionBytes := query.Get(sla.SlaKeyRetentionBytes); retentionBytes != "" {
		ts.RetentionBytes, _ = strconv.Atoi(retentionBytes)
	}
	ts.ParseRetentionHours(query.Get(sla.SlaKeyRetentionHours))

	// validate the sla
	if err := ts.Validate(); err != nil {
		log.Error("app[%s] alter topic:%s %s: %+v", hisAppid, topic, query.Encode(), err)

		writeBadRequest(w, err.Error())
		return
	}

	log.Info("app[%s] from %s(%s) alter topic: {appid:%s cluster:%s topic:%s ver:%s query:%s}",
		appid, r.RemoteAddr, realIp, hisAppid, cluster, topic, ver, query.Encode())

	rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	alterConfig := ts.DumpForAlterTopic()
	if len(alterConfig) == 0 {
		log.Warn("app[%s] from %s(%s) alter topic: {appid:%s cluster:%s topic:%s ver:%s query:%s} nothing updated",
			appid, r.RemoteAddr, realIp, hisAppid, cluster, topic, ver, query.Encode())

		writeBadRequest(w, "nothing updated")
		return
	}

	lines, err := zkcluster.AlterTopic(rawTopic, ts)
	if err != nil {
		log.Error("app[%s] from %s(%s) alter topic: {appid:%s cluster:%s topic:%s ver:%s query:%s} %v",
			appid, r.RemoteAddr, realIp, hisAppid, cluster, topic, ver, query.Encode(), err)

		writeServerError(w, err.Error())
		return
	}

	for _, l := range lines {
		log.Trace("app[%s] alter topic[%s] in cluster %s: %s", appid, rawTopic, cluster, l)
	}

	w.Write(ResponseOk)
}
コード例 #9
0
ファイル: handler_man.go プロジェクト: funkygao/gafka
// @rest PUT /v1/webhook/:appid/:topic/:ver?group=xx
func (this *manServer) createWebhookHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	topic := params.ByName(UrlParamTopic)
	if !manager.Default.ValidateTopicName(topic) {
		log.Warn("illegal topic: %s", topic)

		writeBadRequest(w, "illegal topic")
		return
	}

	query := r.URL.Query()
	group := query.Get("group")
	realIp := getHttpRemoteIp(r)
	hisAppid := params.ByName(UrlParamAppid)
	myAppid := r.Header.Get(HttpHeaderAppid)
	ver := params.ByName(UrlParamVersion)

	if err := manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		log.Error("+webhook[%s/%s] -(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeAuthFailure(w, err)
		return
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("+webhook[%s/%s] -(%s): {%s.%s.%s UA:%s} undefined cluster",
			myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

		writeBadRequest(w, "invalid appid")
		return
	}

	log.Info("+webhook[%s/%s] %s(%s): {%s.%s.%s UA:%s}",
		myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

	rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	var hook zk.WebhookMeta
	decoder := json.NewDecoder(r.Body)
	if err := decoder.Decode(&hook); err != nil {
		log.Error("+webhook[%s/%s] %s(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeBadRequest(w, err.Error())
		return
	}
	r.Body.Close()

	// validate the url
	for _, ep := range hook.Endpoints {
		_, err := url.ParseRequestURI(ep)
		if err != nil {
			log.Error("+webhook[%s/%s] %s(%s): {%s.%s.%s UA:%s} %+v %v",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), hook.Endpoints, err)

			writeBadRequest(w, err.Error())
			return
		}
	}

	hook.Cluster = cluster // cluster is decided by server
	if err := this.gw.zkzone.CreateOrUpdateWebhook(rawTopic, hook); err != nil {
		log.Error("+webhook[%s/%s] %s(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeServerError(w, err.Error())
		return
	}

	w.Write(ResponseOk)
}
コード例 #10
0
ファイル: handler_sub_ack.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest PUT /v1/offsets/:appid/:topic/:ver/:group with json body
func (this *subServer) ackHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		topic    string
		ver      string
		myAppid  string
		hisAppid string
		group    string
		err      error
	)

	group = params.ByName(UrlParamGroup)
	ver = params.ByName(UrlParamVersion)
	topic = params.ByName(UrlParamTopic)
	hisAppid = params.ByName(UrlParamAppid)
	myAppid = r.Header.Get(HttpHeaderAppid)

	if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		writeAuthFailure(w, err)
		return
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		writeBadRequest(w, "invalid appid")
		return
	}

	msgLen := int(r.ContentLength)
	switch {
	case int64(msgLen) > Options.MaxPubSize:
		writeBadRequest(w, ErrTooBigMessage.Error())
		return

	case msgLen < Options.MinPubSize:
		writeBadRequest(w, ErrTooSmallMessage.Error())
		return
	}

	var msg *mpool.Message
	msg = mpool.NewMessage(msgLen)
	msg.Body = msg.Body[0:msgLen]
	lbr := io.LimitReader(r.Body, Options.MaxPubSize+1)
	if _, err = io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {
		msg.Free()

		writeBadRequest(w, ErrTooBigMessage.Error())
		return
	}

	var acks ackOffsets
	if err = json.Unmarshal(msg.Body, &acks); err != nil {
		msg.Free()

		writeBadRequest(w, "invalid ack json body")
		return
	}

	msg.Free()

	realIp := getHttpRemoteIp(r)
	realGroup := myAppid + "." + group
	rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	for i := 0; i < len(acks); i++ {
		acks[i].cluster = cluster
		acks[i].topic = rawTopic
		acks[i].group = realGroup
	}

	log.Debug("ack[%s/%s] %s(%s) {%s.%s.%s UA:%s} %+v",
		myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), acks)

	if atomic.AddInt32(&this.ackShutdown, 1) == 0 {
		// kateway is shutting down, ackCh is already closed
		log.Warn("ack[%s/%s] %s(%s) {%s.%s.%s UA:%s} server is shutting down %+v ",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), acks)

		writeServerError(w, "server is shutting down")
		return
	}

	this.ackCh <- acks
	atomic.AddInt32(&this.ackShutdown, -1)

	w.Write(ResponseOk)
}
コード例 #11
0
ファイル: handler_sub_raw.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest GET /v1/raw/msgs/:cluster/:topic?group=xx&batch=10&reset=<newest|oldest>
func (this *subServer) subRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		cluster string
		topic   string
		myAppid string
		reset   string
		group   string
		limit   int // max messages to include in the message set
		err     error
	)

	if !Options.DisableMetrics {
		this.subMetrics.SubTryQps.Mark(1)
	}

	query := r.URL.Query()
	group = query.Get("group")
	reset = query.Get("reset")
	realIp := getHttpRemoteIp(r)

	if !manager.Default.ValidateGroupName(r.Header, group) {
		log.Error("sub raw -(%s): illegal group: %s", realIp, group)
		this.subMetrics.ClientError.Mark(1)
		writeBadRequest(w, "illegal group")
		return
	}

	limit, err = getHttpQueryInt(&query, "batch", 1)
	if err != nil {
		log.Error("sub raw -(%s): illegal batch: %v", realIp, err)
		this.subMetrics.ClientError.Mark(1)
		writeBadRequest(w, "illegal batch")
		return
	}
	if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 {
		limit = Options.MaxSubBatchSize
	}

	topic = params.ByName(UrlParamTopic)
	cluster = params.ByName("cluster")
	myAppid = r.Header.Get(HttpHeaderAppid)

	log.Debug("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s}",
		myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"))

	if !Options.DisableMetrics {
		this.subMetrics.SubQps.Mark(1)
	}

	fetcher, err := store.DefaultSubStore.Fetch(cluster, topic,
		myAppid+"."+group, r.RemoteAddr, realIp, reset, Options.PermitStandbySub)
	if err != nil {
		// e,g. kafka was totally shutdown
		// e,g. too many consumers for the same group
		log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err)

		if store.DefaultSubStore.IsSystemError(err) {
			writeServerError(w, err.Error())
		} else {
			writeBadRequest(w, err.Error())
		}

		return
	}

	var gz *gzip.Writer
	w, gz = gzipWriter(w, r)
	err = this.pumpRawMessages(w, r, realIp, fetcher, limit, myAppid, topic, group)
	if err != nil {
		// e,g. broken pipe, io timeout, client gone
		// e,g. kafka: error while consuming app1.foobar.v1/0: EOF (kafka was shutdown)
		log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err)

		if err != ErrClientGone {
			if store.DefaultSubStore.IsSystemError(err) {
				writeServerError(w, err.Error())
			} else {
				writeBadRequest(w, err.Error())
			}
		}

		// fetch.Close might be called by subServer.closedConnCh
		if err = fetcher.Close(); err != nil {
			log.Error("sub raw[%s/%s] %s(%s) {%s/%s batch:%d UA:%s} %v",
				myAppid, group, r.RemoteAddr, realIp, cluster, topic, limit, r.Header.Get("User-Agent"), err)
		}
	}

	if gz != nil {
		gz.Close()
	}
}
コード例 #12
0
ファイル: handler_sub.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest GET /v1/msgs/:appid/:topic/:ver?group=xx&batch=10&reset=<newest|oldest>&ack=1&q=<dead|retry>
func (this *subServer) subHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		topic      string
		ver        string
		myAppid    string
		hisAppid   string
		reset      string
		group      string
		realGroup  string
		shadow     string
		rawTopic   string
		partition  string
		partitionN int = -1
		offset     string
		offsetN    int64 = -1
		limit      int   // max messages to include in the message set
		delayedAck bool  // last acked partition/offset piggybacked on this request
		err        error
	)

	if !Options.DisableMetrics {
		this.subMetrics.SubTryQps.Mark(1)
	}

	query := r.URL.Query()
	group = query.Get("group")
	myAppid = r.Header.Get(HttpHeaderAppid)
	realGroup = myAppid + "." + group
	reset = query.Get("reset")
	realIp := getHttpRemoteIp(r)

	if !manager.Default.ValidateGroupName(r.Header, group) {
		log.Error("sub -(%s): illegal group: %s", realIp, group)
		this.subMetrics.ClientError.Mark(1)
		writeBadRequest(w, "illegal group")
		return
	}

	if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 0) {
		this.goodGroupLock.RLock()
		_, good := this.goodGroupClients[r.RemoteAddr]
		this.goodGroupLock.RUnlock()

		if !good {
			// this bad group client is in confinement period
			log.Error("sub -(%s): group[%s] failure quota exceeded %s", realIp, realGroup, r.Header.Get("User-Agent"))
			writeQuotaExceeded(w)
			return
		}
	}

	limit, err = getHttpQueryInt(&query, "batch", 1)
	if err != nil {
		log.Error("sub -(%s): illegal batch: %v", realIp, err)
		this.subMetrics.ClientError.Mark(1)
		writeBadRequest(w, "illegal batch")
		return
	}
	if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 {
		limit = Options.MaxSubBatchSize
	}

	ver = params.ByName(UrlParamVersion)
	topic = params.ByName(UrlParamTopic)
	hisAppid = params.ByName(UrlParamAppid)

	// auth
	if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		this.subMetrics.ClientError.Mark(1)
		writeAuthFailure(w, err)
		return
	}

	// fetch the client ack partition and offset
	delayedAck = query.Get("ack") == "1"
	if delayedAck {
		// consumers use explicit acknowledges in order to signal a message as processed successfully
		// if consumers fail to ACK, the message hangs and server will refuse to move ahead

		// get the partitionN and offsetN from client header
		// client will ack with partition=-1, offset=-1:
		// 1. handshake phase
		// 2. when 204 No Content
		partition = r.Header.Get(HttpHeaderPartition)
		offset = r.Header.Get(HttpHeaderOffset)
		if partition != "" && offset != "" {
			// convert partition and offset to int
			offsetN, err = strconv.ParseInt(offset, 10, 64)
			if err != nil {
				log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} offset:%s",
					myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), offset)

				this.subMetrics.ClientError.Mark(1)
				writeBadRequest(w, "ack with bad offset")
				return
			}
			partitionN, err = strconv.Atoi(partition)
			if err != nil {
				log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} partition:%s",
					myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), partition)

				this.subMetrics.ClientError.Mark(1)
				writeBadRequest(w, "ack with bad partition")
				return
			}
		} else if len(partition+offset) != 0 {
			log.Error("sub[%s/%s] %s(%s) {%s.%s.%s P:%s O:%s UA:%s} partial ack",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, partition, offset, r.Header.Get("User-Agent"))

			this.subMetrics.ClientError.Mark(1)
			writeBadRequest(w, "partial ack not allowed")
			return
		}
	}

	shadow = query.Get("q")

	log.Debug("sub[%s/%s] %s(%s) {%s.%s.%s q:%s batch:%d ack:%s P:%s O:%s UA:%s}",
		myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow,
		limit, query.Get("ack"), partition, offset, r.Header.Get("User-Agent"))

	if !Options.DisableMetrics {
		this.subMetrics.SubQps.Mark(1)
	}

	// calculate raw topic according to shadow
	if shadow != "" {
		if !sla.ValidateShadowName(shadow) {
			log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} invalid shadow name",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent"))

			this.subMetrics.ClientError.Mark(1)
			writeBadRequest(w, "invalid shadow name")
			return
		}

		if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) {
			log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} not a shadowed topic",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent"))

			this.subMetrics.ClientError.Mark(1)
			writeBadRequest(w, "register shadow first")
			return
		}

		rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group)
	} else {
		rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver)
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} cluster not found",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

		this.subMetrics.ClientError.Mark(1)
		writeBadRequest(w, "invalid appid")
		return
	}

	fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic,
		realGroup, r.RemoteAddr, realIp, reset, Options.PermitStandbySub)
	if err != nil {
		// e,g. kafka was totally shutdown
		// e,g. too many consumers for the same group
		log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v",
			myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		if store.DefaultSubStore.IsSystemError(err) {
			writeServerError(w, err.Error())
		} else {
			this.subMetrics.ClientError.Mark(1)
			if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) {
				writeQuotaExceeded(w)
			} else {
				writeBadRequest(w, err.Error())
			}
		}

		return
	}

	// commit the acked offset
	if delayedAck && partitionN >= 0 && offsetN >= 0 {
		if err = fetcher.CommitUpto(&sarama.ConsumerMessage{
			Topic:     rawTopic,
			Partition: int32(partitionN),
			Offset:    offsetN,
		}); err != nil {
			// during rebalance, this might happen, but with no bad effects
			log.Trace("sub land[%s/%s] %s(%s) {%s/%s ack:1 O:%s UA:%s} %v",
				myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset, r.Header.Get("User-Agent"), err)
		} else {
			log.Debug("sub land[%s/%s] %s(%s) {T:%s/%s, O:%s}",
				myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset)
		}
	}

	var gz *gzip.Writer
	w, gz = gzipWriter(w, r)
	err = this.pumpMessages(w, r, realIp, fetcher, limit, myAppid, hisAppid, topic, ver, group, delayedAck)
	if err != nil {
		// e,g. broken pipe, io timeout, client gone
		// e,g. kafka: error while consuming app1.foobar.v1/0: EOF (kafka was shutdown)
		log.Error("sub[%s/%s] %s(%s) {%s ack:%s P:%s O:%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, rawTopic, query.Get("ack"), partition, offset, r.Header.Get("User-Agent"), err)

		if err != ErrClientGone {
			if store.DefaultSubStore.IsSystemError(err) {
				writeServerError(w, err.Error())
			} else {
				this.subMetrics.ClientError.Mark(1)
				if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) {
					writeQuotaExceeded(w)
				} else {
					writeBadRequest(w, err.Error())
				}
			}
		} else if Options.BadGroupRateLimit && !store.DefaultSubStore.IsSystemError(err) {
			this.throttleBadGroup.Pour(realGroup, 1)
		}

		// fetch.Close might be called by subServer.closedConnCh
		if err = fetcher.Close(); err != nil {
			log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err)
		}
	} else if w.Header().Get("Connection") == "close" {
		// max req reached, synchronously close this connection
		if err = fetcher.Close(); err != nil {
			log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err)
		}
	}

	if Options.BadGroupRateLimit {
		// record the good consumer group client
		this.goodGroupLock.Lock()
		this.goodGroupClients[r.RemoteAddr] = struct{}{}
		this.goodGroupLock.Unlock()
	}

	if gz != nil {
		gz.Close()
	}
}
コード例 #13
0
ファイル: handler_pub_raw.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest POST /v1/raw/msgs/:cluster/:topic?key=mykey&async=1&ack=all
func (this *pubServer) pubRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		cluster      string
		topic        string
		partitionKey string
		t1           = time.Now()
	)

	if !Options.DisableMetrics {
		this.pubMetrics.PubTryQps.Mark(1)
	}

	realIp := getHttpRemoteIp(r)
	topic = params.ByName(UrlParamTopic)
	cluster = params.ByName("cluster")

	buf := bytes.NewBuffer(make([]byte, 0, 1<<10))
	_, err := buf.ReadFrom(r.Body)
	if err != nil {
		log.Error("pub raw %s(%s) {C:%s T:%s UA:%s} %s",
			r.RemoteAddr, realIp, cluster, topic, r.Header.Get("User-Agent"), err)

		this.pubMetrics.ClientError.Inc(1)
		writeBadRequest(w, err.Error())
		return
	}

	body := buf.Bytes()

	if !Options.DisableMetrics {
		this.pubMetrics.PubQps.Mark(1)
		this.pubMetrics.PubMsgSize.Update(int64(len(body)))
	}

	query := r.URL.Query() // reuse the query will save 100ns
	partitionKey = query.Get("key")

	pubMethod := store.DefaultPubStore.SyncPub
	if query.Get("async") == "1" {
		pubMethod = store.DefaultPubStore.AsyncPub
	}
	if query.Get("ack") == "all" {
		pubMethod = store.DefaultPubStore.SyncAllPub
	}

	_, _, err = pubMethod(cluster, topic, []byte(partitionKey), body)
	if err != nil {
		log.Error("pub raw %s(%s) {C:%s T:%s UA:%s} %s",
			r.RemoteAddr, realIp, cluster, topic, r.Header.Get("User-Agent"), err)

		writeServerError(w, err.Error())
		return
	}

	w.WriteHeader(http.StatusCreated)

	if _, err = w.Write(ResponseOk); err != nil {
		log.Error("pub raw %s(%s) {C:%s T:%s UA:%s} %s",
			r.RemoteAddr, realIp, cluster, topic, r.Header.Get("User-Agent"), err)

		this.pubMetrics.ClientError.Inc(1)
	}

	if !Options.DisableMetrics {
		this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms
	}

}
コード例 #14
0
ファイル: handler_sub_bury.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest PUT /v1/msgs/:appid/:topic/:ver?group=xx&q=<dead|retry>
// q=retry&X-Bury=dead means bury from retry queue to dead queue
func (this *subServer) buryHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		topic      string
		ver        string
		myAppid    string
		hisAppid   string
		group      string
		rawTopic   string
		shadow     string
		bury       string
		partition  string
		partitionN int = -1
		offset     string
		offsetN    int64 = -1
		err        error
	)

	query := r.URL.Query()
	group = query.Get("group")
	if !manager.Default.ValidateGroupName(r.Header, group) {
		writeBadRequest(w, "illegal group")
		return
	}

	ver = params.ByName(UrlParamVersion)
	topic = params.ByName(UrlParamTopic)
	hisAppid = params.ByName(UrlParamAppid)
	myAppid = r.Header.Get(HttpHeaderAppid)
	realIp := getHttpRemoteIp(r)

	bury = r.Header.Get(HttpHeaderMsgBury)
	if !sla.ValidateShadowName(bury) {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} illegal bury: %s",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), bury)

		writeBadRequest(w, "illegal bury")
		return
	}

	// auth
	if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeAuthFailure(w, err)
		return
	}

	partition = r.Header.Get(HttpHeaderPartition)
	offset = r.Header.Get(HttpHeaderOffset)
	if partition == "" || offset == "" {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} empty offset or partition",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"))

		writeBadRequest(w, "empty offset or partition")
		return
	}

	offsetN, err = strconv.ParseInt(offset, 10, 64)
	if err != nil || offsetN < 0 {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} illegal offset:%s",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), offset)

		writeBadRequest(w, "bad offset")
		return
	}
	partitionN, err = strconv.Atoi(partition)
	if err != nil || partitionN < 0 {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} illegal partition:%s",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), partition)

		writeBadRequest(w, "bad partition")
		return
	}

	shadow = query.Get("q")

	log.Debug("bury[%s/%s] %s(%s) {%s.%s.%s bury:%s shadow=%s partition:%s offset:%s UA:%s}",
		myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, bury, shadow, partition, offset, r.Header.Get("User-Agent"))

	msgLen := int(r.ContentLength)
	msg := make([]byte, msgLen)
	if _, err = io.ReadAtLeast(r.Body, msg, msgLen); err != nil {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err)

		writeBadRequest(w, err.Error())
		return
	}

	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("bury[%s/%s] %s(%s) {%s.%s.%s UA:%s} invalid appid:%s",
			myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), hisAppid)

		writeBadRequest(w, "invalid appid")
		return
	}

	// calculate raw topic according to shadow
	if shadow != "" {
		if !sla.ValidateShadowName(shadow) {
			log.Error("bury[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} invalid shadow name",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent"))

			writeBadRequest(w, "invalid shadow name")
			return
		}

		if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) {
			log.Error("bury[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} not a shadowed topic",
				myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent"))

			writeBadRequest(w, "register shadow first")
			return
		}

		rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group)
	} else {
		rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver)
	}

	fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic,
		myAppid+"."+group, r.RemoteAddr, realIp, "", Options.PermitStandbySub)
	if err != nil {
		log.Error("bury[%s/%s] %s(%s) {%s UA:%s} %v",
			myAppid, group, r.RemoteAddr, realIp, rawTopic, r.Header.Get("User-Agent"), err)

		writeBadRequest(w, err.Error())
		return
	}

	// step1: pub
	shadowTopic := manager.Default.ShadowTopic(bury, myAppid, hisAppid, topic, ver, group)
	_, _, err = store.DefaultPubStore.SyncPub(cluster, shadowTopic, nil, msg)
	if err != nil {
		log.Error("bury[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, shadowTopic, err)

		writeServerError(w, err.Error())
		return
	}

	// step2: skip this message in the master topic TODO atomic with step1
	if err = fetcher.CommitUpto(&sarama.ConsumerMessage{
		Topic:     rawTopic,
		Partition: int32(partitionN),
		Offset:    offsetN,
	}); err != nil {
		log.Error("bury[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err)

		writeServerError(w, err.Error())
		return
	}

	w.Write(ResponseOk)
}
コード例 #15
0
ファイル: handler_pub.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest POST /v1/msgs/:topic/:ver?key=mykey&async=1&ack=all&hh=n
func (this *pubServer) pubHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		appid        string
		topic        string
		ver          string
		tag          string
		partitionKey string
		async        bool
		hhDisabled   bool // hh enabled by default
		t1           = time.Now()
	)

	if !Options.DisableMetrics {
		this.pubMetrics.PubTryQps.Mark(1)
	}

	realIp := getHttpRemoteIp(r)
	if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) {
		log.Warn("pub[%s] %s(%s) rate limit reached: %d/s", appid, r.RemoteAddr, realIp, Options.PubQpsLimit)

		this.pubMetrics.ClientError.Inc(1)
		writeQuotaExceeded(w)
		return
	}

	appid = r.Header.Get(HttpHeaderAppid)
	topic = params.ByName(UrlParamTopic)
	ver = params.ByName(UrlParamVersion)

	if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {
		log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, err.Error(), http.StatusUnauthorized)
		return
	}

	msgLen := int(r.ContentLength)
	switch {
	case int64(msgLen) > Options.MaxPubSize:
		log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big content length: %d",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, ErrTooBigMessage.Error(), http.StatusBadRequest)
		return

	case msgLen < Options.MinPubSize:
		log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too small content length: %d",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, ErrTooSmallMessage.Error(), http.StatusBadRequest)
		return
	}

	query := r.URL.Query() // reuse the query will save 100ns

	partitionKey = query.Get("key")
	if len(partitionKey) > MaxPartitionKeyLen {
		log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big key: %s",
			appid, r.RemoteAddr, realIp, topic, ver,
			r.Header.Get("User-Agent"), partitionKey)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, "too big key", http.StatusBadRequest)
		return
	}

	var msg *mpool.Message
	tag = r.Header.Get(HttpHeaderMsgTag)
	if tag != "" {
		if len(tag) > Options.MaxMsgTagLen {
			this.respond4XX(appid, w, "too big tag", http.StatusBadRequest)
			return
		}

		msgSz := tagLen(tag) + msgLen
		msg = mpool.NewMessage(msgSz)
		msg.Body = msg.Body[0:msgSz]
	} else {
		msg = mpool.NewMessage(msgLen)
		msg.Body = msg.Body[0:msgLen]
	}

	// get the raw POST message, if body more than content-length ignore the extra payload
	lbr := io.LimitReader(r.Body, Options.MaxPubSize+1)
	if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {
		msg.Free()

		log.Error("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, err.Error(), http.StatusBadRequest) // TODO http.StatusRequestEntityTooLarge
		return
	}

	if tag != "" {
		AddTagToMessage(msg, tag)
	}

	if !Options.DisableMetrics {
		this.pubMetrics.PubQps.Mark(1)
		this.pubMetrics.PubMsgSize.Update(int64(len(msg.Body)))
	}

	cluster, found := manager.Default.LookupCluster(appid)
	if !found {
		log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} cluster not found",
			appid, r.RemoteAddr, realIp, topic, r.Header.Get("User-Agent"), ver)

		this.pubMetrics.ClientError.Inc(1)
		this.respond4XX(appid, w, "invalid appid", http.StatusBadRequest)
		return
	}

	var (
		partition int32
		offset    int64
		err       error
		rawTopic  = manager.Default.KafkaTopic(appid, topic, ver)
	)

	pubMethod := store.DefaultPubStore.SyncPub
	async = query.Get("async") == "1"
	if async {
		pubMethod = store.DefaultPubStore.AsyncPub
	}

	ackAll := query.Get("ack") == "all"
	if ackAll {
		pubMethod = store.DefaultPubStore.SyncAllPub
	}

	hhDisabled = query.Get("hh") == "n" // yes | no

	msgKey := []byte(partitionKey)
	if ackAll {
		// hh not applied
		partition, offset, err = pubMethod(cluster, rawTopic, msgKey, msg.Body)
	} else if Options.AllwaysHintedHandoff {
		err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body)
	} else if !hhDisabled && Options.EnableHintedHandoff && !hh.Default.Empty(cluster, rawTopic) {
		err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body)
	} else if async {
		if !hhDisabled && Options.EnableHintedHandoff {
			// async uses hinted handoff mechanism to save memory overhead
			err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body)
		} else {
			// message pool can't be applied on async pub because
			// we don't know when to recycle the memory
			body := make([]byte, 0, len(msg.Body))
			copy(body, msg.Body)
			partition, offset, err = pubMethod(cluster, rawTopic, msgKey, body)
		}
	} else {
		// hack byte string conv TODO
		partition, offset, err = pubMethod(cluster, rawTopic, msgKey, msg.Body)
		if err != nil && store.DefaultPubStore.IsSystemError(err) && !hhDisabled && Options.EnableHintedHandoff {
			log.Warn("pub[%s] %s(%s) {%s.%s.%s UA:%s} resort hh for: %v", appid, r.RemoteAddr, realIp,
				appid, topic, ver, r.Header.Get("User-Agent"), err)
			err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body)
		}
	}

	// in case of request panic, mem pool leakage
	msg.Free()

	if Options.AuditPub {
		this.auditor.Trace("pub[%s] %s(%s) {%s.%s.%s UA:%s} {P:%d O:%d}",
			appid, r.RemoteAddr, realIp, appid, topic, ver, r.Header.Get("User-Agent"), partition, offset)
	}

	if err != nil {
		log.Error("pub[%s] %s(%s) {topic:%s ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err)

		if !Options.DisableMetrics {
			this.pubMetrics.PubFail(appid, topic, ver)
		}

		if store.DefaultPubStore.IsSystemError(err) {
			writeServerError(w, err.Error())
		} else {
			this.respond4XX(appid, w, err.Error(), http.StatusBadRequest)
		}
		return
	}

	w.Header().Set(HttpHeaderPartition, strconv.FormatInt(int64(partition), 10))
	w.Header().Set(HttpHeaderOffset, strconv.FormatInt(offset, 10))
	if async {
		w.WriteHeader(http.StatusAccepted)
	} else {
		w.WriteHeader(http.StatusCreated)
	}

	if _, err = w.Write(ResponseOk); err != nil {
		log.Error("%s: %v", r.RemoteAddr, err)
		this.pubMetrics.ClientError.Inc(1)
	}

	if !Options.DisableMetrics {
		this.pubMetrics.PubOk(appid, topic, ver)
		this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms
	}

}
コード例 #16
0
ファイル: handler_job.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest POST /v1/jobs/:topic/:ver?delay=100|due=1471565204
// TODO tag, partitionKey
// TODO use dedicated metrics
func (this *pubServer) addJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	if !Options.DisableMetrics {
		this.pubMetrics.JobTryQps.Mark(1)
	}

	t1 := time.Now()
	realIp := getHttpRemoteIp(r)
	appid := r.Header.Get(HttpHeaderAppid)

	var due int64
	q := r.URL.Query()
	dueParam := q.Get("due") // due has higher priority than delay
	if dueParam != "" {
		d, err := strconv.ParseInt(dueParam, 10, 64)
		if err != nil {
			log.Error("+job[%s] %s(%s) due:%s %s", appid, r.RemoteAddr, realIp, dueParam, err)

			writeBadRequest(w, "invalid due param")
			return
		}

		due = d
	} else {
		delayParam := q.Get("delay") // in sec
		delay, err := strconv.ParseInt(delayParam, 10, 64)
		if err != nil {
			log.Error("+job[%s] %s(%s) delay:%s %s", appid, r.RemoteAddr, realIp, delayParam, err)

			writeBadRequest(w, "invalid delay param")
			return
		}

		due = t1.Unix() + delay
	}

	if due <= t1.Unix() {
		log.Error("+job[%s] %s(%s) due=%d before now?", appid, r.RemoteAddr, realIp, due)

		writeBadRequest(w, "invalid param")
		return
	}

	if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) {
		log.Warn("+job[%s] %s(%s) rate limit reached", appid, r.RemoteAddr, realIp)

		writeQuotaExceeded(w)
		return
	}

	topic := params.ByName(UrlParamTopic)
	ver := params.ByName(UrlParamVersion)
	if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {
		log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err)

		writeAuthFailure(w, err)
		return
	}

	// get the raw POST message
	msgLen := int(r.ContentLength)
	switch {
	case msgLen == -1:
		log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} invalid content length: %d",
			appid, r.RemoteAddr, realIp, topic, ver, msgLen)

		writeBadRequest(w, "invalid content length")
		return

	case int64(msgLen) > Options.MaxJobSize:
		log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too big content length: %d",
			appid, r.RemoteAddr, realIp, topic, ver, msgLen)
		writeBadRequest(w, ErrTooBigMessage.Error())
		return

	case msgLen < Options.MinPubSize:
		log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too small content length: %d",
			appid, r.RemoteAddr, realIp, topic, ver, msgLen)
		writeBadRequest(w, ErrTooSmallMessage.Error())
		return
	}

	lbr := io.LimitReader(r.Body, Options.MaxJobSize+1)
	msg := mpool.NewMessage(msgLen)
	msg.Body = msg.Body[0:msgLen]
	if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {
		msg.Free()

		log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s",
			appid, r.RemoteAddr, realIp, topic, ver, err)
		writeBadRequest(w, ErrTooBigMessage.Error()) // TODO http.StatusRequestEntityTooLarge
		return
	}

	log.Debug("+job[%s] %s(%s) {topic:%s, ver:%s} due:%d/%ds",
		appid, r.RemoteAddr, realIp, topic, ver, due, due-t1.Unix())

	if !Options.DisableMetrics {
		this.pubMetrics.JobQps.Mark(1)
		this.pubMetrics.JobMsgSize.Update(int64(len(msg.Body)))
	}

	_, found := manager.Default.LookupCluster(appid)
	if !found {
		msg.Free()

		log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} cluster not found",
			appid, r.RemoteAddr, realIp, topic, ver)

		writeBadRequest(w, "invalid appid")
		return
	}

	jobId, err := job.Default.Add(appid, manager.Default.KafkaTopic(appid, topic, ver), msg.Body, due)
	msg.Free()
	if err != nil {
		if !Options.DisableMetrics {
			this.pubMetrics.PubFail(appid, topic, ver)
		}

		log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s",
			appid, r.RemoteAddr, realIp, topic, ver, err)
		writeServerError(w, err.Error())
		return
	}

	if Options.AuditPub {
		this.auditor.Trace("+job[%s] %s(%s) {topic:%s ver:%s UA:%s} due:%d id:%s",
			appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), due, jobId)
	}

	w.Header().Set(HttpHeaderJobId, jobId)
	w.WriteHeader(http.StatusCreated)

	if _, err = w.Write(ResponseOk); err != nil {
		log.Error("%s: %v", r.RemoteAddr, err)
		this.pubMetrics.ClientError.Inc(1)
	}

	if !Options.DisableMetrics {
		this.pubMetrics.PubOk(appid, topic, ver)
		this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms
	}
}
コード例 #17
0
ファイル: handler_sub_ws.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest GET /v1/ws/msgs/:appid/:topic/:ver?group=xx
func (this *subServer) subWsHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	ws, err := upgrader.Upgrade(w, r, nil)
	if err != nil {
		log.Error("%s: %v", r.RemoteAddr, err)
		return
	}

	defer func() {
		ws.Close()

		this.gw.svrMetrics.ConcurrentSubWs.Dec(1)
		this.idleConnsWg.Done()
	}()

	var (
		topic       string
		ver         string
		myAppid     string
		hisAppid    string
		resetOffset string
		group       string
	)

	query := r.URL.Query()
	group = query.Get("group")
	resetOffset = query.Get("reset")
	limit, err := getHttpQueryInt(&query, "limit", 1)
	if err != nil {
		writeWsError(ws, err.Error())
		return
	}
	if !manager.Default.ValidateGroupName(r.Header, group) {
		log.Warn("consumer %s{topic:%s, ver:%s, group:%s, limit:%d} invalid group",
			r.RemoteAddr, topic, ver, group, limit)
		return
	}

	ver = params.ByName(UrlParamVersion)
	topic = params.ByName(UrlParamTopic)
	hisAppid = params.ByName(UrlParamAppid)
	myAppid = r.Header.Get(HttpHeaderAppid)
	realIp := getHttpRemoteIp(r)
	if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey),
		hisAppid, topic, group); err != nil {
		log.Error("consumer[%s] %s {hisapp:%s, topic:%s, ver:%s, group:%s, limit:%d}: %s",
			myAppid, r.RemoteAddr, hisAppid, topic, ver, group, limit, err)

		writeWsError(ws, "auth fail")
		return
	}

	log.Debug("sub[%s] %s: %+v", myAppid, r.RemoteAddr, params)

	rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
	cluster, found := manager.Default.LookupCluster(hisAppid)
	if !found {
		log.Error("cluster not found for subd app: %s", hisAppid)

		writeWsError(ws, "invalid subd appid")
		return
	}

	fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic,
		myAppid+"."+group, r.RemoteAddr, realIp, resetOffset, Options.PermitStandbySub)
	if err != nil {
		log.Error("sub[%s] %s: %+v %v", myAppid, r.RemoteAddr, params, err)

		writeWsError(ws, err.Error())
		return
	}

	// kateway             sub client
	//   |                    |
	//   | ping               |
	//   |------------------->|
	//   |                    |
	//   |               pong |
	//   |<-------------------|
	//   |                    |
	//

	clientGone := make(chan struct{})
	go this.wsWritePump(clientGone, ws, fetcher)
	this.wsReadPump(clientGone, ws)

	return
}
コード例 #18
0
ファイル: handler_sub_ack.go プロジェクト: funkygao/gafka
//go:generate goannotation $GOFILE
// @rest PUT /v1/raw/offsets/:cluster/:topic/:group with json body
func (this *subServer) ackRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	var (
		topic   string
		cluster string
		myAppid string
		group   string
		err     error
	)

	group = params.ByName(UrlParamGroup)
	cluster = params.ByName("cluster")
	topic = params.ByName(UrlParamTopic)
	myAppid = r.Header.Get(HttpHeaderAppid)

	msgLen := int(r.ContentLength)
	switch {
	case int64(msgLen) > Options.MaxPubSize:
		writeBadRequest(w, ErrTooBigMessage.Error())
		return

	case msgLen < Options.MinPubSize:
		writeBadRequest(w, ErrTooSmallMessage.Error())
		return
	}

	var msg *mpool.Message
	msg = mpool.NewMessage(msgLen)
	msg.Body = msg.Body[0:msgLen]
	lbr := io.LimitReader(r.Body, Options.MaxPubSize+1)
	if _, err = io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {
		msg.Free()

		writeBadRequest(w, ErrTooBigMessage.Error())
		return
	}

	var acks ackOffsets
	if err = json.Unmarshal(msg.Body, &acks); err != nil {
		msg.Free()

		writeBadRequest(w, "invalid ack json body")
		return
	}

	msg.Free()

	realIp := getHttpRemoteIp(r)
	realGroup := myAppid + "." + group
	for i := 0; i < len(acks); i++ {
		acks[i].cluster = cluster
		acks[i].topic = topic
		acks[i].group = realGroup
	}

	log.Debug("ack raw[%s/%s] %s(%s) {%s/%s UA:%s} %+v",
		myAppid, group, r.RemoteAddr, realIp, cluster, topic, r.Header.Get("User-Agent"), acks)

	if atomic.AddInt32(&this.ackShutdown, 1) == 0 {
		writeServerError(w, "server is shutting down")
		return
	}

	this.ackCh <- acks
	atomic.AddInt32(&this.ackShutdown, -1)

	w.Write(ResponseOk)
}