Exemple #1
0
func (this *subServer) wsWritePump(clientGone chan struct{}, ws *websocket.Conn, fetcher store.Fetcher) {
	defer fetcher.Close()

	var err error
	for {
		select {
		case msg := <-fetcher.Messages():
			ws.SetWriteDeadline(time.Now().Add(time.Second * 10))
			// FIXME because of buffer, client recv 10, but kateway written 100, then
			// client quit...
			if err = ws.WriteMessage(websocket.BinaryMessage, msg.Value); err != nil {
				log.Error("%s: %v", ws.RemoteAddr(), err)
				return
			}

			if err = fetcher.CommitUpto(msg); err != nil {
				log.Error(err) // TODO add more ctx
			}

		case err = <-fetcher.Errors():
			// TODO
			log.Error(err)

		case <-this.timer.After(this.wsPongWait / 3):
			ws.SetWriteDeadline(time.Now().Add(time.Second * 10))
			if err = ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
				log.Error("%s: %v", ws.RemoteAddr(), err)
				return
			}

		case <-this.gw.shutdownCh:
			return

		case <-clientGone:
			return
		}

	}

}
Exemple #2
0
func (this *subServer) pumpRawMessages(w http.ResponseWriter, r *http.Request, realIp string,
	fetcher store.Fetcher, limit int, myAppid, topic, group string) error {
	cn, ok := w.(http.CloseNotifier)
	if !ok {
		return ErrBadResponseWriter
	}

	var (
		n                   = 0
		idleTimeout         = Options.SubTimeout
		chunkedEver         = false
		clientGoneCh        = cn.CloseNotify()
		metaBuf      []byte = nil
	)

	for {
		select {
		case <-clientGoneCh:
			// FIXME access log will not be able to record this behavior
			return ErrClientGone

		case <-this.gw.shutdownCh:
			// don't call me again
			w.Header().Set("Connection", "close")

			if !chunkedEver {
				w.WriteHeader(http.StatusNoContent)
				w.Write([]byte{})
			}

			return nil

		case err := <-fetcher.Errors():
			// e,g. consume a non-existent topic
			// e,g. conn with broker is broken
			// e,g. kafka: error while consuming foobar/0: EOF
			// e,g. kafka: error while consuming foobar/2: read tcp 10.1.1.1:60088->10.1.1.2:11005: i/o timeout
			return err

		case <-this.timer.After(idleTimeout):
			if chunkedEver {
				return nil
			}

			w.WriteHeader(http.StatusNoContent)
			w.Write([]byte{}) // without this, client cant get response
			return nil

		case msg, ok := <-fetcher.Messages():
			if !ok {
				return ErrClientKilled
			}

			if limit == 1 {
				partition := strconv.FormatInt(int64(msg.Partition), 10)

				w.Header().Set("Content-Type", "text/plain; charset=utf8") // override middleware header
				w.Header().Set(HttpHeaderMsgKey, string(msg.Key))
				w.Header().Set(HttpHeaderPartition, partition)
				w.Header().Set(HttpHeaderOffset, strconv.FormatInt(msg.Offset, 10))

				// non-batch mode, just the message itself without meta
				if _, err := w.Write(msg.Value); err != nil {
					// when remote close silently, the write still ok
					return err
				}

				fetcher.CommitUpto(msg)
			} else {
				// batch mode, write MessageSet
				// MessageSet => [Partition(int32) Offset(int64) MessageSize(int32) Message] BigEndian
				if metaBuf == nil {
					// initialize the reuseable buffer
					metaBuf = make([]byte, 8)

					// override the middleware added header
					w.Header().Set("Content-Type", "application/octet-stream")
				}

				if err := writeI32(w, metaBuf, msg.Partition); err != nil {
					return err
				}
				if err := writeI64(w, metaBuf, msg.Offset); err != nil {
					return err
				}
				if err := writeI32(w, metaBuf, int32(len(msg.Value))); err != nil {
					return err
				}
				if _, err := w.Write(msg.Value); err != nil {
					return err
				}
			}

			n++
			if n >= limit {
				return nil
			}

			// http chunked: len in hex
			// curl CURLOPT_HTTP_TRANSFER_DECODING will auto unchunk
			w.(http.Flusher).Flush()

			chunkedEver = true
		}
	}
}
Exemple #3
0
func (this *subServer) pumpMessages(w http.ResponseWriter, r *http.Request,
	fetcher store.Fetcher, limit int, myAppid, hisAppid, topic, ver,
	group string, delayedAck bool, tagFilters []MsgTag) error {
	clientGoneCh := w.(http.CloseNotifier).CloseNotify()

	var (
		metaBuf     []byte = nil
		n                  = 0
		idleTimeout        = Options.SubTimeout
		realIp             = getHttpRemoteIp(r)
		chunkedEver        = false
	)
	for {
		select {
		case <-clientGoneCh:
			// FIXME access log will not be able to record this behavior
			return ErrClientGone

		case <-this.gw.shutdownCh:
			// don't call me again
			w.Header().Set("Connection", "close")

			if !chunkedEver {
				w.WriteHeader(http.StatusNoContent)
				w.Write([]byte{})
			}

			return nil

		case err := <-fetcher.Errors():
			// e,g. consume a non-existent topic
			// e,g. conn with broker is broken
			// e,g. kafka: error while consuming foobar/0: EOF
			// e,g. kafka: error while consuming foobar/2: read tcp 10.209.36.33:60088->10.209.18.16:11005: i/o timeout
			return err

		case <-this.gw.timer.After(idleTimeout):
			if chunkedEver {
				// response already sent in chunk
				log.Debug("chunked sub idle timeout %s {A:%s/G:%s->A:%s T:%s V:%s}",
					idleTimeout, myAppid, group, hisAppid, topic, ver)
				return nil
			}

			w.WriteHeader(http.StatusNoContent)
			w.Write([]byte{}) // without this, client cant get response
			return nil

		case msg, ok := <-fetcher.Messages():
			if !ok {
				return ErrClientKilled
			}

			if Options.Debug {
				log.Debug("sub[%s] %s(%s): {G:%s T:%s/%d O:%d}",
					myAppid, r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset)
			}

			partition := strconv.FormatInt(int64(msg.Partition), 10)

			if limit == 1 {
				w.Header().Set(HttpHeaderMsgKey, string(msg.Key))
				w.Header().Set(HttpHeaderPartition, partition)
				w.Header().Set(HttpHeaderOffset, strconv.FormatInt(msg.Offset, 10))
			}

			var (
				tags    []MsgTag
				bodyIdx int
				err     error
			)
			if IsTaggedMessage(msg.Value) {
				// TagMarkStart + tag + TagMarkEnd + body
				tags, bodyIdx, err = ExtractMessageTag(msg.Value)
				if limit == 1 && err == nil {
					// needn't check 'index out of range' here
					w.Header().Set(HttpHeaderMsgTag, hack.String(msg.Value[1:bodyIdx-1]))
				} else {
					// not a valid tagged message, treat it as non-tagged message
				}
			}

			if len(tags) > 0 {
				// TODO compare with tagFilters
			}

			if limit == 1 {
				// non-batch mode, just the message itself without meta
				if _, err = w.Write(msg.Value[bodyIdx:]); err != nil {
					// when remote close silently, the write still ok
					return err
				}
			} else {
				// batch mode, write MessageSet
				// MessageSet => [Partition(int32) Offset(int64) MessageSize(int32) Message] BigEndian
				if metaBuf == nil {
					// initialize the reuseable buffer
					metaBuf = make([]byte, 8)

					// remove the middleware added header
					w.Header().Del("Content-Type")
				}

				if err = writeI32(w, metaBuf, msg.Partition); err != nil {
					return err
				}
				if err = writeI64(w, metaBuf, msg.Offset); err != nil {
					return err
				}
				if err = writeI32(w, metaBuf, int32(len(msg.Value[bodyIdx:]))); err != nil {
					return err
				}
				// TODO add tag?
				if _, err = w.Write(msg.Value[bodyIdx:]); err != nil {
					return err
				}
			}

			if !delayedAck {
				log.Debug("sub auto commit offset %s(%s): {G:%s, T:%s/%d, O:%d}",
					r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset)

				// ignore the offset commit err on purpose:
				// during rebalance, offset commit often encounter errors because fetcher
				// underlying partition offset tracker has changed
				// e,g.
				// topic has partition: 0, 1
				// 1. got msg(p=0) from fetcher
				// 2. rebalanced, then start consuming p=1
				// 3. commit the msg offset, still msg(p=0) => error
				// BUT, it has no fatal effects.
				// The worst case is between 1-3, kateway shutdown, sub client
				// will get 1 duplicated msg.
				fetcher.CommitUpto(msg)
			} else {
				log.Debug("sub take off %s(%s): {G:%s, T:%s/%d, O:%d}",
					r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset)
			}

			this.subMetrics.ConsumeOk(myAppid, topic, ver)
			this.subMetrics.ConsumedOk(hisAppid, topic, ver)

			n++
			if n >= limit {
				return nil
			}

			// http chunked: len in hex
			// curl CURLOPT_HTTP_TRANSFER_DECODING will auto unchunk
			w.(http.Flusher).Flush()

			chunkedEver = true

			if n == 1 {
				log.Debug("sub idle timeout %s->1s %s(%s): {G:%s, T:%s/%d, O:%d B:%d}",
					idleTimeout, r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset, limit)
				idleTimeout = time.Second
			}

		}
	}
}
Exemple #4
0
func (this *subServer) pumpMessages(w http.ResponseWriter, r *http.Request, realIp string,
	fetcher store.Fetcher, limit int, myAppid, hisAppid, topic, ver, group string, delayedAck bool) error {
	cn, ok := w.(http.CloseNotifier)
	if !ok {
		return ErrBadResponseWriter
	}

	var (
		metaBuf       []byte = nil
		n                    = 0
		idleTimeout          = Options.SubTimeout
		chunkedEver          = false
		tagConditions        = make(map[string]struct{})
		clientGoneCh         = cn.CloseNotify()
		startedAt            = time.Now()
	)

	// parse http tag header as filter condition
	if tagFilter := r.Header.Get(HttpHeaderMsgTag); tagFilter != "" {
		for _, t := range parseMessageTag(tagFilter) {
			if t != "" {
				tagConditions[t] = struct{}{}
			}
		}
	}

	for {
		if len(tagConditions) > 0 && time.Since(startedAt) > idleTimeout {
			// e,g. tag filter got 1000 msgs, but no tag hit after timeout, we'll return 204
			if chunkedEver {
				return nil
			}

			w.WriteHeader(http.StatusNoContent)
			w.Write([]byte{})
			return nil
		}

		select {
		case <-clientGoneCh:
			// FIXME access log will not be able to record this behavior
			return ErrClientGone

		case <-this.gw.shutdownCh:
			// don't call me again
			w.Header().Set("Connection", "close")

			if !chunkedEver {
				w.WriteHeader(http.StatusNoContent)
				w.Write([]byte{})
			}

			return nil

		case err := <-fetcher.Errors():
			// e,g. consume a non-existent topic
			// e,g. conn with broker is broken
			// e,g. kafka: error while consuming foobar/0: EOF
			// e,g. kafka: error while consuming foobar/2: read tcp 10.1.1.1:60088->10.1.1.2:11005: i/o timeout
			return err

		case <-this.timer.After(idleTimeout):
			if chunkedEver {
				// response already sent in chunk
				log.Debug("chunked sub idle timeout %s {A:%s/G:%s->A:%s T:%s V:%s}",
					idleTimeout, myAppid, group, hisAppid, topic, ver)
				return nil
			}

			w.WriteHeader(http.StatusNoContent)
			w.Write([]byte{}) // without this, client cant get response
			return nil

		case msg, ok := <-fetcher.Messages():
			if !ok {
				return ErrClientKilled
			}

			if Options.AuditSub {
				this.auditor.Trace("sub[%s/%s] %s(%s) {T:%s/%d O:%d}",
					myAppid, group, r.RemoteAddr, realIp, msg.Topic, msg.Partition, msg.Offset)
			}

			partition := strconv.FormatInt(int64(msg.Partition), 10)

			if limit == 1 {
				w.Header().Set("Content-Type", "text/plain; charset=utf8") // override middleware header
				w.Header().Set(HttpHeaderMsgKey, string(msg.Key))
				w.Header().Set(HttpHeaderPartition, partition)
				w.Header().Set(HttpHeaderOffset, strconv.FormatInt(msg.Offset, 10))
			}

			var (
				tags    []string
				bodyIdx int
				err     error
			)
			if IsTaggedMessage(msg.Value) {
				tags, bodyIdx, err = ExtractMessageTag(msg.Value)
				if err != nil {
					// always move offset cursor ahead, otherwise will be blocked forever
					fetcher.CommitUpto(msg)

					return err
				}
			}

			// assert tag conditions are satisfied. if empty, feed all messages
			if len(tagConditions) > 0 {
				tagSatisfied := false
				for _, t := range tags {
					if _, present := tagConditions[t]; present {
						tagSatisfied = true
						break
					}
				}

				if !tagSatisfied {
					if !delayedAck {
						log.Debug("sub auto commit offset with tag unmatched %s(%s) {G:%s, T:%s/%d, O:%d} %+v/%+v",
							r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset, tagConditions, tags)

						fetcher.CommitUpto(msg)
					}

					continue
				}
			}

			if limit == 1 {
				// non-batch mode, just the message itself without meta
				if _, err = w.Write(msg.Value[bodyIdx:]); err != nil {
					// when remote close silently, the write still ok
					return err
				}
			} else {
				// batch mode, write MessageSet
				// MessageSet => [Partition(int32) Offset(int64) MessageSize(int32) Message] BigEndian
				if metaBuf == nil {
					// initialize the reuseable buffer
					metaBuf = make([]byte, 8)

					// override the middleware added header
					w.Header().Set("Content-Type", "application/octet-stream")
				}

				if err = writeI32(w, metaBuf, msg.Partition); err != nil {
					return err
				}
				if err = writeI64(w, metaBuf, msg.Offset); err != nil {
					return err
				}
				if err = writeI32(w, metaBuf, int32(len(msg.Value[bodyIdx:]))); err != nil {
					return err
				}
				if _, err = w.Write(msg.Value[bodyIdx:]); err != nil {
					return err
				}
			}

			if !delayedAck {
				log.Debug("sub[%s/%s] %s(%s) auto commit offset {%s/%d O:%d}",
					myAppid, group, r.RemoteAddr, realIp, msg.Topic, msg.Partition, msg.Offset)

				// ignore the offset commit err on purpose:
				// during rebalance, offset commit often encounter errors because fetcher
				// underlying partition offset tracker has changed
				// e,g.
				// topic has partition: 0, 1
				// 1. got msg(p=0) from fetcher
				// 2. rebalanced, then start consuming p=1
				// 3. commit the msg offset, still msg(p=0) => error
				// BUT, it has no fatal effects.
				// The worst case is between 1-3, kateway shutdown, sub client
				// will get 1 duplicated msg.
				fetcher.CommitUpto(msg)
			} else {
				log.Debug("sub[%s/%s] %s(%s) take off {%s/%d O:%d}",
					myAppid, group, r.RemoteAddr, realIp, msg.Topic, msg.Partition, msg.Offset)
			}

			this.subMetrics.ConsumeOk(myAppid, topic, ver)
			this.subMetrics.ConsumedOk(hisAppid, topic, ver)

			n++
			if n >= limit {
				return nil
			}

			// http chunked: len in hex
			// curl CURLOPT_HTTP_TRANSFER_DECODING will auto unchunk
			w.(http.Flusher).Flush()

			chunkedEver = true

			if n == 1 {
				log.Debug("sub idle timeout %s->1s %s(%s) {G:%s, T:%s/%d, O:%d B:%d}",
					idleTimeout, r.RemoteAddr, realIp, group, msg.Topic, msg.Partition, msg.Offset, limit)
				idleTimeout = time.Second
			}

		}
	}
}