func BenchmarkAddTagToMessage(b *testing.B) { b.ReportAllocs() m := mpool.NewMessage(1024) m.Body = m.Body[:1024] tag := "a;c" m.Body = []byte(strings.Repeat("X", 900)) for i := 0; i < b.N; i++ { AddTagToMessage(m, tag) } b.SetBytes(int64(tagLen(tag)) + 900) }
func BenchmarkExtractMessageTag(b *testing.B) { b.ReportAllocs() m := mpool.NewMessage(1024) m.Body = m.Body[:1024] tag := "a;c" m.Body = []byte(strings.Repeat("X", 900)) AddTagToMessage(m, tag) for i := 0; i < b.N; i++ { ExtractMessageTag(m.Body) } b.SetBytes(int64(len(m.Body))) }
func TestAddAndExtractMessageTag(t *testing.T) { tag := "a=b;c=d" body := "hello world" m := mpool.NewMessage(len(body) + tagLen(tag)) m.Body = m.Body[:len(body)+tagLen(tag)] for i := 0; i < len(body); i++ { m.Body[i] = body[i] } t.Logf("%s %+v %d/%d", string(m.Body), m.Body, len(body), len(m.Body)) AddTagToMessage(m, tag) t.Logf("%s %+v %d", string(m.Body), m.Body, len(m.Body)) assert.Equal(t, TagMarkStart, m.Body[0]) t.Logf("%s", string(m.Body)) // extract tag tags, i, err := ExtractMessageTag(m.Body) assert.Equal(t, nil, err) assert.Equal(t, body, string(m.Body[i:])) assert.Equal(t, 2, len(tags)) t.Logf("%+v", tags) }
//go:generate goannotation $GOFILE // @rest POST /v1/jobs/:topic/:ver?delay=100|due=1471565204 // TODO tag, partitionKey // TODO use dedicated metrics func (this *pubServer) addJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { if !Options.DisableMetrics { this.pubMetrics.JobTryQps.Mark(1) } t1 := time.Now() realIp := getHttpRemoteIp(r) appid := r.Header.Get(HttpHeaderAppid) var due int64 q := r.URL.Query() dueParam := q.Get("due") // due has higher priority than delay if dueParam != "" { d, err := strconv.ParseInt(dueParam, 10, 64) if err != nil { log.Error("+job[%s] %s(%s) due:%s %s", appid, r.RemoteAddr, realIp, dueParam, err) writeBadRequest(w, "invalid due param") return } due = d } else { delayParam := q.Get("delay") // in sec delay, err := strconv.ParseInt(delayParam, 10, 64) if err != nil { log.Error("+job[%s] %s(%s) delay:%s %s", appid, r.RemoteAddr, realIp, delayParam, err) writeBadRequest(w, "invalid delay param") return } due = t1.Unix() + delay } if due <= t1.Unix() { log.Error("+job[%s] %s(%s) due=%d before now?", appid, r.RemoteAddr, realIp, due) writeBadRequest(w, "invalid param") return } if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) { log.Warn("+job[%s] %s(%s) rate limit reached", appid, r.RemoteAddr, realIp) writeQuotaExceeded(w) return } topic := params.ByName(UrlParamTopic) ver := params.ByName(UrlParamVersion) if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil { log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeAuthFailure(w, err) return } // get the raw POST message msgLen := int(r.ContentLength) switch { case msgLen == -1: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} invalid content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, "invalid content length") return case int64(msgLen) > Options.MaxJobSize: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too big content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, ErrTooBigMessage.Error()) return case msgLen < Options.MinPubSize: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too small content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, ErrTooSmallMessage.Error()) return } lbr := io.LimitReader(r.Body, Options.MaxJobSize+1) msg := mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeBadRequest(w, ErrTooBigMessage.Error()) // TODO http.StatusRequestEntityTooLarge return } log.Debug("+job[%s] %s(%s) {topic:%s, ver:%s} due:%d/%ds", appid, r.RemoteAddr, realIp, topic, ver, due, due-t1.Unix()) if !Options.DisableMetrics { this.pubMetrics.JobQps.Mark(1) this.pubMetrics.JobMsgSize.Update(int64(len(msg.Body))) } _, found := manager.Default.LookupCluster(appid) if !found { msg.Free() log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} cluster not found", appid, r.RemoteAddr, realIp, topic, ver) writeBadRequest(w, "invalid appid") return } jobId, err := job.Default.Add(appid, manager.Default.KafkaTopic(appid, topic, ver), msg.Body, due) msg.Free() if err != nil { if !Options.DisableMetrics { this.pubMetrics.PubFail(appid, topic, ver) } log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeServerError(w, err.Error()) return } if Options.AuditPub { this.auditor.Trace("+job[%s] %s(%s) {topic:%s ver:%s UA:%s} due:%d id:%s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), due, jobId) } w.Header().Set(HttpHeaderJobId, jobId) w.WriteHeader(http.StatusCreated) if _, err = w.Write(ResponseOk); err != nil { log.Error("%s: %v", r.RemoteAddr, err) this.pubMetrics.ClientError.Inc(1) } if !Options.DisableMetrics { this.pubMetrics.PubOk(appid, topic, ver) this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms } }
// POST /v1/jobs/:topic/:ver?delay=100s func (this *pubServer) addJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { t1 := time.Now() if Options.EnableClientStats { // TODO enable pub or sub client stats this.gw.clientStates.RegisterPubClient(r) } appid := r.Header.Get(HttpHeaderAppid) realIp := getHttpRemoteIp(r) if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) { log.Warn("+job[%s] %s(%s) rate limit reached", appid, r.RemoteAddr, realIp) writeQuotaExceeded(w) return } topic := params.ByName(UrlParamTopic) ver := params.ByName(UrlParamVersion) if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil { log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeAuthFailure(w, err) return } // get the raw POST message msgLen := int(r.ContentLength) switch { case msgLen == -1: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} invalid content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, "invalid content length") return case int64(msgLen) > Options.MaxPubSize: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too big content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, ErrTooBigMessage.Error()) return case msgLen < Options.MinPubSize: log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} too small content length: %d", appid, r.RemoteAddr, realIp, topic, ver, msgLen) writeBadRequest(w, ErrTooSmallMessage.Error()) return } lbr := io.LimitReader(r.Body, Options.MaxPubSize+1) msg := mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeBadRequest(w, ErrTooBigMessage.Error()) return } if Options.Debug { log.Debug("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, string(msg.Body)) } if !Options.DisableMetrics { this.pubMetrics.PubQps.Mark(1) this.pubMetrics.PubMsgSize.Update(int64(len(msg.Body))) } delay, err := time.ParseDuration(r.URL.Query().Get("delay")) if err != nil { writeBadRequest(w, "invalid delay format") return } cluster, found := manager.Default.LookupCluster(appid) if !found { log.Warn("+job[%s] %s(%s) {topic:%s, ver:%s} cluster not found", appid, r.RemoteAddr, realIp, topic, ver) writeBadRequest(w, "invalid appid") return } jobId, err := store.DefaultPubStore.AddJob(cluster, manager.Default.KafkaTopic(appid, topic, ver), msg.Body, delay) if err != nil { msg.Free() // defer is costly if !Options.DisableMetrics { this.pubMetrics.PubFail(appid, topic, ver) } log.Error("+job[%s] %s(%s) {topic:%s, ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) writeServerError(w, err.Error()) return } msg.Free() w.Header().Set(HttpHeaderJobId, jobId) w.WriteHeader(http.StatusCreated) if _, err = w.Write(ResponseOk); err != nil { log.Error("%s: %v", r.RemoteAddr, err) this.pubMetrics.ClientError.Inc(1) } if !Options.DisableMetrics { this.pubMetrics.PubOk(appid, topic, ver) this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms } }
// POST /v1/msgs/:topic/:ver?key=mykey&async=1&ack=all&batch=1 func (this *pubServer) pubHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( appid string topic string ver string tag string partitionKey string t1 = time.Now() ) if Options.EnableClientStats { // TODO enable pub or sub client stats this.gw.clientStates.RegisterPubClient(r) } realIp := getHttpRemoteIp(r) if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) { log.Warn("pub[%s] %s(%s) rate limit reached: %d/s", appid, r.RemoteAddr, realIp, Options.PubQpsLimit) this.pubMetrics.ClientError.Inc(1) writeQuotaExceeded(w) return } appid = r.Header.Get(HttpHeaderAppid) topic = params.ByName(UrlParamTopic) ver = params.ByName(UrlParamVersion) if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err) this.pubMetrics.ClientError.Inc(1) writeAuthFailure(w, err) return } msgLen := int(r.ContentLength) switch { case int64(msgLen) > Options.MaxPubSize: log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big content length: %d", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen) this.pubMetrics.ClientError.Inc(1) writeBadRequest(w, ErrTooBigMessage.Error()) return case msgLen < Options.MinPubSize: log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too small content length: %d", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen) this.pubMetrics.ClientError.Inc(1) writeBadRequest(w, ErrTooSmallMessage.Error()) return } var msg *mpool.Message tag = r.Header.Get(HttpHeaderMsgTag) if tag != "" { if len(tag) > Options.MaxMsgTagLen { writeBadRequest(w, "too big tag") return } msg = mpool.NewMessage(tagLen(tag) + msgLen) msg.Body = msg.Body[0 : tagLen(tag)+msgLen] } else { msg = mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] } // get the raw POST message lbr := io.LimitReader(r.Body, Options.MaxPubSize+1) if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() log.Error("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err) this.pubMetrics.ClientError.Inc(1) writeBadRequest(w, ErrTooBigMessage.Error()) return } if tag != "" { AddTagToMessage(msg, tag) } if Options.AuditPub { this.auditor.Trace("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} k:%s vlen:%d h:%d", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), partitionKey, msgLen, adler32.Checksum(msg.Body)) } if !Options.DisableMetrics { this.pubMetrics.PubQps.Mark(1) this.pubMetrics.PubMsgSize.Update(int64(len(msg.Body))) } query := r.URL.Query() // reuse the query will save 100ns if query.Get("batch") == "1" { // TODO } partitionKey = query.Get("key") if len(partitionKey) > MaxPartitionKeyLen { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big key: %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), partitionKey) this.pubMetrics.ClientError.Inc(1) writeBadRequest(w, "too big key") return } pubMethod := store.DefaultPubStore.SyncPub if query.Get("async") == "1" { pubMethod = store.DefaultPubStore.AsyncPub } if query.Get("ack") == "all" { pubMethod = store.DefaultPubStore.SyncAllPub } cluster, found := manager.Default.LookupCluster(appid) if !found { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} cluster not found", appid, r.RemoteAddr, realIp, topic, r.Header.Get("User-Agent"), ver) this.pubMetrics.ClientError.Inc(1) writeBadRequest(w, "invalid appid") return } partition, offset, err := pubMethod(cluster, manager.Default.KafkaTopic(appid, topic, ver), []byte(partitionKey), msg.Body) if err != nil { log.Error("pub[%s] %s(%s) {topic:%s ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) msg.Free() // defer is costly if !Options.DisableMetrics { this.pubMetrics.PubFail(appid, topic, ver) } writeServerError(w, err.Error()) return } msg.Free() w.Header().Set(HttpHeaderPartition, strconv.FormatInt(int64(partition), 10)) w.Header().Set(HttpHeaderOffset, strconv.FormatInt(offset, 10)) w.WriteHeader(http.StatusCreated) if _, err = w.Write(ResponseOk); err != nil { log.Error("%s: %v", r.RemoteAddr, err) this.pubMetrics.ClientError.Inc(1) } if !Options.DisableMetrics { this.pubMetrics.PubOk(appid, topic, ver) this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms } }
// PUT /v1/offsets/:appid/:topic/:ver/:group with json body func (this *subServer) ackHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string ver string myAppid string hisAppid string group string err error ) group = params.ByName(UrlParamGroup) ver = params.ByName(UrlParamVersion) topic = params.ByName(UrlParamTopic) hisAppid = params.ByName(UrlParamAppid) myAppid = r.Header.Get(HttpHeaderAppid) if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey), hisAppid, topic, group); err != nil { writeAuthFailure(w, err) return } cluster, found := manager.Default.LookupCluster(hisAppid) if !found { writeBadRequest(w, "invalid appid") return } msgLen := int(r.ContentLength) switch { case int64(msgLen) > Options.MaxPubSize: writeBadRequest(w, ErrTooBigMessage.Error()) return case msgLen < Options.MinPubSize: writeBadRequest(w, ErrTooSmallMessage.Error()) return } var msg *mpool.Message msg = mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] lbr := io.LimitReader(r.Body, Options.MaxPubSize+1) if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() writeBadRequest(w, ErrTooBigMessage.Error()) return } var acks ackOffsets if err = json.Unmarshal(msg.Body, &acks); err != nil { msg.Free() writeBadRequest(w, "invalid ack json body") return } realIp := getHttpRemoteIp(r) log.Debug("ack[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %+v", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), acks) realGroup := myAppid + "." + group rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver) for i := 0; i < len(acks); i++ { acks[i].cluster = cluster acks[i].topic = rawTopic acks[i].group = realGroup } if atomic.AddInt32(&this.ackShutdown, 1) == 0 { // kateway is shutting down, ackCh is already closed msg.Free() log.Warn("ack[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} server is shutting down %+v ", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), acks) writeServerError(w, "server is shutting down") return } this.ackCh <- acks atomic.AddInt32(&this.ackShutdown, -1) msg.Free() w.Write(ResponseOk) }
//go:generate goannotation $GOFILE // @rest POST /v1/msgs/:topic/:ver?key=mykey&async=1&ack=all&hh=n func (this *pubServer) pubHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( appid string topic string ver string tag string partitionKey string async bool hhDisabled bool // hh enabled by default t1 = time.Now() ) if !Options.DisableMetrics { this.pubMetrics.PubTryQps.Mark(1) } realIp := getHttpRemoteIp(r) if Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) { log.Warn("pub[%s] %s(%s) rate limit reached: %d/s", appid, r.RemoteAddr, realIp, Options.PubQpsLimit) this.pubMetrics.ClientError.Inc(1) writeQuotaExceeded(w) return } appid = r.Header.Get(HttpHeaderAppid) topic = params.ByName(UrlParamTopic) ver = params.ByName(UrlParamVersion) if err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, err.Error(), http.StatusUnauthorized) return } msgLen := int(r.ContentLength) switch { case int64(msgLen) > Options.MaxPubSize: log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big content length: %d", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, ErrTooBigMessage.Error(), http.StatusBadRequest) return case msgLen < Options.MinPubSize: log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too small content length: %d", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), msgLen) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, ErrTooSmallMessage.Error(), http.StatusBadRequest) return } query := r.URL.Query() // reuse the query will save 100ns partitionKey = query.Get("key") if len(partitionKey) > MaxPartitionKeyLen { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} too big key: %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), partitionKey) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, "too big key", http.StatusBadRequest) return } var msg *mpool.Message tag = r.Header.Get(HttpHeaderMsgTag) if tag != "" { if len(tag) > Options.MaxMsgTagLen { this.respond4XX(appid, w, "too big tag", http.StatusBadRequest) return } msgSz := tagLen(tag) + msgLen msg = mpool.NewMessage(msgSz) msg.Body = msg.Body[0:msgSz] } else { msg = mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] } // get the raw POST message, if body more than content-length ignore the extra payload lbr := io.LimitReader(r.Body, Options.MaxPubSize+1) if _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() log.Error("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, r.Header.Get("User-Agent"), err) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, err.Error(), http.StatusBadRequest) // TODO http.StatusRequestEntityTooLarge return } if tag != "" { AddTagToMessage(msg, tag) } if !Options.DisableMetrics { this.pubMetrics.PubQps.Mark(1) this.pubMetrics.PubMsgSize.Update(int64(len(msg.Body))) } cluster, found := manager.Default.LookupCluster(appid) if !found { log.Warn("pub[%s] %s(%s) {topic:%s ver:%s UA:%s} cluster not found", appid, r.RemoteAddr, realIp, topic, r.Header.Get("User-Agent"), ver) this.pubMetrics.ClientError.Inc(1) this.respond4XX(appid, w, "invalid appid", http.StatusBadRequest) return } var ( partition int32 offset int64 err error rawTopic = manager.Default.KafkaTopic(appid, topic, ver) ) pubMethod := store.DefaultPubStore.SyncPub async = query.Get("async") == "1" if async { pubMethod = store.DefaultPubStore.AsyncPub } ackAll := query.Get("ack") == "all" if ackAll { pubMethod = store.DefaultPubStore.SyncAllPub } hhDisabled = query.Get("hh") == "n" // yes | no msgKey := []byte(partitionKey) if ackAll { // hh not applied partition, offset, err = pubMethod(cluster, rawTopic, msgKey, msg.Body) } else if Options.AllwaysHintedHandoff { err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body) } else if !hhDisabled && Options.EnableHintedHandoff && !hh.Default.Empty(cluster, rawTopic) { err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body) } else if async { if !hhDisabled && Options.EnableHintedHandoff { // async uses hinted handoff mechanism to save memory overhead err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body) } else { // message pool can't be applied on async pub because // we don't know when to recycle the memory body := make([]byte, 0, len(msg.Body)) copy(body, msg.Body) partition, offset, err = pubMethod(cluster, rawTopic, msgKey, body) } } else { // hack byte string conv TODO partition, offset, err = pubMethod(cluster, rawTopic, msgKey, msg.Body) if err != nil && store.DefaultPubStore.IsSystemError(err) && !hhDisabled && Options.EnableHintedHandoff { log.Warn("pub[%s] %s(%s) {%s.%s.%s UA:%s} resort hh for: %v", appid, r.RemoteAddr, realIp, appid, topic, ver, r.Header.Get("User-Agent"), err) err = hh.Default.Append(cluster, rawTopic, msgKey, msg.Body) } } // in case of request panic, mem pool leakage msg.Free() if Options.AuditPub { this.auditor.Trace("pub[%s] %s(%s) {%s.%s.%s UA:%s} {P:%d O:%d}", appid, r.RemoteAddr, realIp, appid, topic, ver, r.Header.Get("User-Agent"), partition, offset) } if err != nil { log.Error("pub[%s] %s(%s) {topic:%s ver:%s} %s", appid, r.RemoteAddr, realIp, topic, ver, err) if !Options.DisableMetrics { this.pubMetrics.PubFail(appid, topic, ver) } if store.DefaultPubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { this.respond4XX(appid, w, err.Error(), http.StatusBadRequest) } return } w.Header().Set(HttpHeaderPartition, strconv.FormatInt(int64(partition), 10)) w.Header().Set(HttpHeaderOffset, strconv.FormatInt(offset, 10)) if async { w.WriteHeader(http.StatusAccepted) } else { w.WriteHeader(http.StatusCreated) } if _, err = w.Write(ResponseOk); err != nil { log.Error("%s: %v", r.RemoteAddr, err) this.pubMetrics.ClientError.Inc(1) } if !Options.DisableMetrics { this.pubMetrics.PubOk(appid, topic, ver) this.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() / 1e6) // in ms } }
//go:generate goannotation $GOFILE // @rest PUT /v1/raw/offsets/:cluster/:topic/:group with json body func (this *subServer) ackRawHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string cluster string myAppid string group string err error ) group = params.ByName(UrlParamGroup) cluster = params.ByName("cluster") topic = params.ByName(UrlParamTopic) myAppid = r.Header.Get(HttpHeaderAppid) msgLen := int(r.ContentLength) switch { case int64(msgLen) > Options.MaxPubSize: writeBadRequest(w, ErrTooBigMessage.Error()) return case msgLen < Options.MinPubSize: writeBadRequest(w, ErrTooSmallMessage.Error()) return } var msg *mpool.Message msg = mpool.NewMessage(msgLen) msg.Body = msg.Body[0:msgLen] lbr := io.LimitReader(r.Body, Options.MaxPubSize+1) if _, err = io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil { msg.Free() writeBadRequest(w, ErrTooBigMessage.Error()) return } var acks ackOffsets if err = json.Unmarshal(msg.Body, &acks); err != nil { msg.Free() writeBadRequest(w, "invalid ack json body") return } msg.Free() realIp := getHttpRemoteIp(r) realGroup := myAppid + "." + group for i := 0; i < len(acks); i++ { acks[i].cluster = cluster acks[i].topic = topic acks[i].group = realGroup } log.Debug("ack raw[%s/%s] %s(%s) {%s/%s UA:%s} %+v", myAppid, group, r.RemoteAddr, realIp, cluster, topic, r.Header.Get("User-Agent"), acks) if atomic.AddInt32(&this.ackShutdown, 1) == 0 { writeServerError(w, "server is shutting down") return } this.ackCh <- acks atomic.AddInt32(&this.ackShutdown, -1) w.Write(ResponseOk) }