func (this *Client) Sub(opt SubOption, h SubHandler) error { var u url.URL u.Scheme = this.cf.Sub.Scheme u.Host = this.cf.Sub.Endpoint u.Path = fmt.Sprintf("/v1/msgs/%s/%s/%s", opt.AppId, opt.Topic, opt.Ver) q := u.Query() q.Set("group", opt.Group) if opt.Shadow != "" && sla.ValidateShadowName(opt.Shadow) { q.Set("use", opt.Shadow) } if opt.Reset != "" { q.Set("reset", opt.Reset) } if opt.Batch > 1 { q.Set("batch", strconv.Itoa(opt.Batch)) } if opt.Wait != "" { q.Set("wait", opt.Wait) } u.RawQuery = q.Encode() req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return err } req.Header.Set(gateway.HttpHeaderAppid, this.cf.AppId) req.Header.Set(gateway.HttpHeaderSubkey, this.cf.Secret) for { response, err := this.subConn.Do(req) if err != nil { return err } b, err := ioutil.ReadAll(response.Body) if err != nil { return err } if this.cf.Debug { log.Printf("--> [%s]", response.Status) log.Printf("Partition:%s Offset:%s", response.Header.Get("X-Partition"), response.Header.Get("X-Offset")) } // reuse the connection response.Body.Close() if err = h(response.StatusCode, b); err != nil { return err } } }
// SubX is advanced Sub with features of delayed ack and shadow bury. func (this *Client) SubX(opt SubOption, h SubXHandler) error { var u url.URL u.Scheme = this.cf.Sub.Scheme u.Host = this.cf.Sub.Endpoint u.Path = fmt.Sprintf("/v1/msgs/%s/%s/%s", opt.AppId, opt.Topic, opt.Ver) q := u.Query() q.Set("group", opt.Group) q.Set("ack", "1") if opt.Shadow != "" && sla.ValidateShadowName(opt.Shadow) { q.Set("q", opt.Shadow) } if opt.Reset != "" { q.Set("reset", opt.Reset) } if opt.Batch > 1 { q.Set("batch", strconv.Itoa(opt.Batch)) } if opt.Wait != "" { q.Set("wait", opt.Wait) } u.RawQuery = q.Encode() req := gorequest.New() req.Get(u.String()). Set(gateway.HttpHeaderAppid, this.cf.AppId). Set(gateway.HttpHeaderSubkey, this.cf.Secret). Set("User-Agent", UserAgent). Set(gateway.HttpHeaderPartition, "-1"). Set(gateway.HttpHeaderOffset, "-1") r := &SubXResult{} for { response, b, errs := req.EndBytes() if len(errs) > 0 { return errs[0] } // reset the request header req.Set(gateway.HttpHeaderPartition, "") req.Set(gateway.HttpHeaderOffset, "") req.Set(gateway.HttpHeaderMsgBury, "") if this.cf.Debug { log.Printf("--> [%s]", response.Status) log.Printf("Partition:%s Offset:%s", response.Header.Get(gateway.HttpHeaderPartition), response.Header.Get(gateway.HttpHeaderOffset)) } r.Reset() r.Partition = response.Header.Get(gateway.HttpHeaderPartition) r.Offset = response.Header.Get(gateway.HttpHeaderOffset) r.Tag = response.Header.Get(gateway.HttpHeaderMsgTag) if err := h(response.StatusCode, b, r); err != nil { return err } req.Set(gateway.HttpHeaderPartition, r.Partition) req.Set(gateway.HttpHeaderOffset, r.Offset) if r.Bury != "" { if r.Bury != ShadowRetry && r.Bury != ShadowDead { return ErrInvalidBury } req.Set("X-Bury", r.Bury) } } }
// GET /v1/msgs/:appid/:topic/:ver?group=xx&batch=10&reset=<newest|oldest>&ack=1&q=<dead|retry> func (this *subServer) subHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string ver string myAppid string hisAppid string reset string group string shadow string rawTopic string partition string partitionN int = -1 offset string offsetN int64 = -1 limit int // max messages to include in the message set delayedAck bool // explicit application level acknowledgement tagFilters []MsgTag = nil err error ) if Options.EnableClientStats { this.gw.clientStates.RegisterSubClient(r) } query := r.URL.Query() group = query.Get("group") reset = query.Get("reset") if !manager.Default.ValidateGroupName(r.Header, group) { writeBadRequest(w, "illegal group") return } limit, err = getHttpQueryInt(&query, "batch", 1) if err != nil { writeBadRequest(w, "illegal limit") return } if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 { limit = Options.MaxSubBatchSize } ver = params.ByName(UrlParamVersion) topic = params.ByName(UrlParamTopic) hisAppid = params.ByName(UrlParamAppid) myAppid = r.Header.Get(HttpHeaderAppid) realIp := getHttpRemoteIp(r) // auth if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey), hisAppid, topic, group); err != nil { log.Error("sub[%s] -(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %v", myAppid, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), err) writeAuthFailure(w, err) return } // fetch the client ack partition and offset delayedAck = query.Get("ack") == "1" if delayedAck { // consumers use explicit acknowledges in order to signal a message as processed successfully // if consumers fail to ACK, the message hangs and server will refuse to move ahead // get the partitionN and offsetN from client header // client will ack with partition=-1, offset=-1: // 1. handshake phase // 2. when 204 No Content partition = r.Header.Get(HttpHeaderPartition) offset = r.Header.Get(HttpHeaderOffset) if partition != "" && offset != "" { // convert partition and offset to int offsetN, err = strconv.ParseInt(offset, 10, 64) if err != nil { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} offset:%s", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), offset) writeBadRequest(w, "ack with bad offset") return } partitionN, err = strconv.Atoi(partition) if err != nil { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} partition:%s", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), partition) writeBadRequest(w, "ack with bad partition") return } } else if len(partition+offset) != 0 { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s partition:%s offset:%s UA:%s} partial ack", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, partition, offset, r.Header.Get("User-Agent")) writeBadRequest(w, "partial ack not allowed") return } } shadow = query.Get("q") if Options.AuditSub { this.auditor.Trace("sub[%s] %s(%s): {app:%s q:%s topic:%s ver:%s group:%s batch:%d ack:%s partition:%s offset:%s UA:%s}", myAppid, r.RemoteAddr, realIp, hisAppid, shadow, topic, ver, group, limit, query.Get("ack"), partition, offset, r.Header.Get("User-Agent")) } // calculate raw topic according to shadow if shadow != "" { if !sla.ValidateShadowName(shadow) { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s q:%s UA:%s} invalid shadow name", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, shadow, r.Header.Get("User-Agent")) writeBadRequest(w, "invalid shadow name") return } if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s q:%s UA:%s} not a shadowed topic", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, shadow, r.Header.Get("User-Agent")) writeBadRequest(w, "register shadow first") return } rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group) } else { rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver) } cluster, found := manager.Default.LookupCluster(hisAppid) if !found { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} cluster not found", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent")) writeBadRequest(w, "invalid appid") return } fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic, myAppid+"."+group, r.RemoteAddr, reset, Options.PermitStandbySub) if err != nil { log.Error("sub[%s] -(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %v", myAppid, realIp, hisAppid, topic, ver, group, r.Header.Get("User-Agent"), err) if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { writeBadRequest(w, err.Error()) } return } // commit the acked offset if delayedAck && partitionN >= 0 && offsetN >= 0 { if err = fetcher.CommitUpto(&sarama.ConsumerMessage{ Topic: rawTopic, Partition: int32(partitionN), Offset: offsetN, }); err != nil { // during rebalance, this might happen, but with no bad effects log.Trace("sub land[%s] %s(%s): {app:%s topic:%s/%s ver:%s group:%s ack:1 offset:%s UA:%s} %v", myAppid, r.RemoteAddr, realIp, hisAppid, topic, partition, ver, group, offset, r.Header.Get("User-Agent"), err) } else { log.Debug("sub land %s(%s): {G:%s, T:%s/%s, O:%s}", r.RemoteAddr, realIp, group, rawTopic, partition, offset) } } tag := r.Header.Get(HttpHeaderMsgTag) if tag != "" { tagFilters = parseMessageTag(tag) } err = this.pumpMessages(w, r, fetcher, limit, myAppid, hisAppid, topic, ver, group, delayedAck, tagFilters) if err != nil { // e,g. broken pipe, io timeout, client gone log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s ack:%s partition:%s offset:%s UA:%s} %v", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, query.Get("ack"), partition, offset, r.Header.Get("User-Agent"), err) writeServerError(w, err.Error()) if err = fetcher.Close(); err != nil { log.Error("sub[%s] %s(%s): {app:%s topic:%s ver:%s group:%s} %v", myAppid, r.RemoteAddr, realIp, hisAppid, topic, ver, group, err) } } }
// PUT /v1/bury/:appid/:topic/:ver?group=xx&q=yy func (this *subServer) buryHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string ver string myAppid string hisAppid string group string rawTopic string shadow string bury string partition string partitionN int = -1 offset string offsetN int64 = -1 err error ) query := r.URL.Query() group = query.Get("group") if !manager.Default.ValidateGroupName(r.Header, group) { writeBadRequest(w, "illegal group") return } ver = params.ByName(UrlParamVersion) topic = params.ByName(UrlParamTopic) hisAppid = params.ByName(UrlParamAppid) myAppid = r.Header.Get(HttpHeaderAppid) bury = r.Header.Get(HttpHeaderMsgBury) if !sla.ValidateShadowName(bury) { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} illegal bury: %s", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), bury) writeBadRequest(w, "illegal bury") return } // auth if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey), hisAppid, topic, group); err != nil { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %v", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), err) writeAuthFailure(w, err) return } partition = r.Header.Get(HttpHeaderPartition) offset = r.Header.Get(HttpHeaderOffset) if partition == "" || offset == "" { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} empty offset or partition", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent")) writeBadRequest(w, "empty offset or partition") return } offsetN, err = strconv.ParseInt(offset, 10, 64) if err != nil || offsetN < 0 { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} illegal offset:%s", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), offset) writeBadRequest(w, "bad offset") return } partitionN, err = strconv.Atoi(partition) if err != nil || partitionN < 0 { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} illegal partition:%s", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), partition) writeBadRequest(w, "bad partition") return } shadow = query.Get("q") log.Debug("bury[%s] %s(%s): {app:%s bury:%s shadow=%s topic:%s ver:%s group:%s partition:%s offset:%s UA:%s}", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, bury, shadow, topic, ver, partition, offset, r.Header.Get("User-Agent")) msgLen := int(r.ContentLength) msg := make([]byte, 0, msgLen) if _, err := io.ReadAtLeast(r.Body, msg, msgLen); err != nil { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %v", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), err) writeBadRequest(w, err.Error()) return } cluster, found := manager.Default.LookupCluster(hisAppid) if !found { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} invalid appid:%s", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), hisAppid) writeBadRequest(w, "invalid appid") return } // calculate raw topic according to shadow if shadow != "" { if !sla.ValidateShadowName(shadow) { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s q:%s UA:%s} invalid shadow name", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, shadow, r.Header.Get("User-Agent")) writeBadRequest(w, "invalid shadow name") return } if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s q:%s UA:%s} not a shadowed topic", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, shadow, r.Header.Get("User-Agent")) writeBadRequest(w, "register shadow first") return } rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group) } else { rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver) } fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic, myAppid+"."+group, r.RemoteAddr, "", Options.PermitStandbySub) if err != nil { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s UA:%s} %v", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, r.Header.Get("User-Agent"), err) writeBadRequest(w, err.Error()) return } // step1: pub shadowTopic := manager.Default.ShadowTopic(bury, myAppid, hisAppid, topic, ver, group) _, _, err = store.DefaultPubStore.SyncPub(cluster, shadowTopic, nil, msg) if err != nil { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s} %v", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, err) writeServerError(w, err.Error()) return } // step2: skip this message in the master topic TODO atomic with step1 if err = fetcher.CommitUpto(&sarama.ConsumerMessage{ Topic: rawTopic, // FIXME it's wrong!!! Partition: int32(partitionN), Offset: offsetN, }); err != nil { log.Error("bury[%s] %s(%s): {app:%s topic:%s ver:%s group:%s} %v", myAppid, r.RemoteAddr, getHttpRemoteIp(r), hisAppid, topic, ver, group, err) writeServerError(w, err.Error()) return } w.Write(ResponseOk) }
//go:generate goannotation $GOFILE // @rest GET /v1/msgs/:appid/:topic/:ver?group=xx&batch=10&reset=<newest|oldest>&ack=1&q=<dead|retry> func (this *subServer) subHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { var ( topic string ver string myAppid string hisAppid string reset string group string realGroup string shadow string rawTopic string partition string partitionN int = -1 offset string offsetN int64 = -1 limit int // max messages to include in the message set delayedAck bool // last acked partition/offset piggybacked on this request err error ) if !Options.DisableMetrics { this.subMetrics.SubTryQps.Mark(1) } query := r.URL.Query() group = query.Get("group") myAppid = r.Header.Get(HttpHeaderAppid) realGroup = myAppid + "." + group reset = query.Get("reset") realIp := getHttpRemoteIp(r) if !manager.Default.ValidateGroupName(r.Header, group) { log.Error("sub -(%s): illegal group: %s", realIp, group) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal group") return } if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 0) { this.goodGroupLock.RLock() _, good := this.goodGroupClients[r.RemoteAddr] this.goodGroupLock.RUnlock() if !good { // this bad group client is in confinement period log.Error("sub -(%s): group[%s] failure quota exceeded %s", realIp, realGroup, r.Header.Get("User-Agent")) writeQuotaExceeded(w) return } } limit, err = getHttpQueryInt(&query, "batch", 1) if err != nil { log.Error("sub -(%s): illegal batch: %v", realIp, err) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "illegal batch") return } if limit > Options.MaxSubBatchSize && Options.MaxSubBatchSize > 0 { limit = Options.MaxSubBatchSize } ver = params.ByName(UrlParamVersion) topic = params.ByName(UrlParamTopic) hisAppid = params.ByName(UrlParamAppid) // auth if err = manager.Default.AuthSub(myAppid, r.Header.Get(HttpHeaderSubkey), hisAppid, topic, group); err != nil { log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v", myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err) this.subMetrics.ClientError.Mark(1) writeAuthFailure(w, err) return } // fetch the client ack partition and offset delayedAck = query.Get("ack") == "1" if delayedAck { // consumers use explicit acknowledges in order to signal a message as processed successfully // if consumers fail to ACK, the message hangs and server will refuse to move ahead // get the partitionN and offsetN from client header // client will ack with partition=-1, offset=-1: // 1. handshake phase // 2. when 204 No Content partition = r.Header.Get(HttpHeaderPartition) offset = r.Header.Get(HttpHeaderOffset) if partition != "" && offset != "" { // convert partition and offset to int offsetN, err = strconv.ParseInt(offset, 10, 64) if err != nil { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} offset:%s", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), offset) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "ack with bad offset") return } partitionN, err = strconv.Atoi(partition) if err != nil { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} partition:%s", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), partition) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "ack with bad partition") return } } else if len(partition+offset) != 0 { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s P:%s O:%s UA:%s} partial ack", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, partition, offset, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "partial ack not allowed") return } } shadow = query.Get("q") log.Debug("sub[%s/%s] %s(%s) {%s.%s.%s q:%s batch:%d ack:%s P:%s O:%s UA:%s}", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, limit, query.Get("ack"), partition, offset, r.Header.Get("User-Agent")) if !Options.DisableMetrics { this.subMetrics.SubQps.Mark(1) } // calculate raw topic according to shadow if shadow != "" { if !sla.ValidateShadowName(shadow) { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} invalid shadow name", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "invalid shadow name") return } if !manager.Default.IsShadowedTopic(hisAppid, topic, ver, myAppid, group) { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s q:%s UA:%s} not a shadowed topic", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, shadow, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "register shadow first") return } rawTopic = manager.Default.ShadowTopic(shadow, myAppid, hisAppid, topic, ver, group) } else { rawTopic = manager.Default.KafkaTopic(hisAppid, topic, ver) } cluster, found := manager.Default.LookupCluster(hisAppid) if !found { log.Error("sub[%s/%s] %s(%s) {%s.%s.%s UA:%s} cluster not found", myAppid, group, r.RemoteAddr, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent")) this.subMetrics.ClientError.Mark(1) writeBadRequest(w, "invalid appid") return } fetcher, err := store.DefaultSubStore.Fetch(cluster, rawTopic, realGroup, r.RemoteAddr, realIp, reset, Options.PermitStandbySub) if err != nil { // e,g. kafka was totally shutdown // e,g. too many consumers for the same group log.Error("sub[%s/%s] -(%s): {%s.%s.%s UA:%s} %v", myAppid, group, realIp, hisAppid, topic, ver, r.Header.Get("User-Agent"), err) if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { this.subMetrics.ClientError.Mark(1) if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) { writeQuotaExceeded(w) } else { writeBadRequest(w, err.Error()) } } return } // commit the acked offset if delayedAck && partitionN >= 0 && offsetN >= 0 { if err = fetcher.CommitUpto(&sarama.ConsumerMessage{ Topic: rawTopic, Partition: int32(partitionN), Offset: offsetN, }); err != nil { // during rebalance, this might happen, but with no bad effects log.Trace("sub land[%s/%s] %s(%s) {%s/%s ack:1 O:%s UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset, r.Header.Get("User-Agent"), err) } else { log.Debug("sub land[%s/%s] %s(%s) {T:%s/%s, O:%s}", myAppid, group, r.RemoteAddr, realIp, rawTopic, partition, offset) } } var gz *gzip.Writer w, gz = gzipWriter(w, r) err = this.pumpMessages(w, r, realIp, fetcher, limit, myAppid, hisAppid, topic, ver, group, delayedAck) if err != nil { // e,g. broken pipe, io timeout, client gone // e,g. kafka: error while consuming app1.foobar.v1/0: EOF (kafka was shutdown) log.Error("sub[%s/%s] %s(%s) {%s ack:%s P:%s O:%s UA:%s} %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, query.Get("ack"), partition, offset, r.Header.Get("User-Agent"), err) if err != ErrClientGone { if store.DefaultSubStore.IsSystemError(err) { writeServerError(w, err.Error()) } else { this.subMetrics.ClientError.Mark(1) if Options.BadGroupRateLimit && !this.throttleBadGroup.Pour(realGroup, 1) { writeQuotaExceeded(w) } else { writeBadRequest(w, err.Error()) } } } else if Options.BadGroupRateLimit && !store.DefaultSubStore.IsSystemError(err) { this.throttleBadGroup.Pour(realGroup, 1) } // fetch.Close might be called by subServer.closedConnCh if err = fetcher.Close(); err != nil { log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err) } } else if w.Header().Get("Connection") == "close" { // max req reached, synchronously close this connection if err = fetcher.Close(); err != nil { log.Error("sub[%s/%s] %s(%s) %s %v", myAppid, group, r.RemoteAddr, realIp, rawTopic, err) } } if Options.BadGroupRateLimit { // record the good consumer group client this.goodGroupLock.Lock() this.goodGroupClients[r.RemoteAddr] = struct{}{} this.goodGroupLock.Unlock() } if gz != nil { gz.Close() } }