func (p *protocolV2) internalMPUBAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) { startPub := time.Now().UnixNano() _, topic, preErr := p.preparePub(client, params, p.ctx.getOpts().MaxBodySize) if preErr != nil { return nil, preErr } messages, buffers, preErr := readMPUB(client.Reader, client.LenSlice, topic, p.ctx.getOpts().MaxMsgSize, traceEnable) defer func() { for _, b := range buffers { topic.BufferPoolPut(b) } }() if preErr != nil { return nil, preErr } topicName := topic.GetTopicName() partition := topic.GetTopicPart() if p.ctx.checkForMasterWrite(topicName, partition) { id, offset, rawSize, err := p.ctx.PutMessages(topic, messages) //p.ctx.setHealth(err) if err != nil { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true) nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err) if clusterErr, ok := err.(*consistence.CommonCoordErr); ok { if !clusterErr.IsLocalErr() { return nil, protocol.NewClientErr(err, FailedOnNotWritable, "") } } return nil, protocol.NewFatalClientErr(err, "E_MPUB_FAILED", err.Error()) } topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), false) cost := time.Now().UnixNano() - startPub topic.GetDetailStats().UpdateTopicMsgStats(0, cost/1000/int64(len(messages))) if !traceEnable { return okBytes, nil } return getTracedReponse(buffers[0], id, 0, offset, rawSize) } else { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true) //forward to master of topic nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v", topic.GetFullName(), client.RemoteAddr) topic.DisableForSlave() return nil, protocol.NewClientErr(preErr, FailedOnNotLeader, "") } }
func (p *protocolV2) internalSUB(client *nsqd.ClientV2, params [][]byte, enableTrace bool, ordered bool, startFrom *ConsumeOffset) ([]byte, error) { state := atomic.LoadInt32(&client.State) if state != stateInit { nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot SUB in current state") } if client.HeartbeatInterval <= 0 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot SUB with heartbeats disabled") } if len(params) < 3 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "SUB insufficient number of parameters") } topicName := string(params[1]) if !protocol.IsValidTopicName(topicName) { return nil, protocol.NewFatalClientErr(nil, "E_BAD_TOPIC", fmt.Sprintf("SUB topic name %q is not valid", topicName)) } partition := -1 channelName := "" var err error channelName = string(params[2]) if !protocol.IsValidChannelName(channelName) { return nil, protocol.NewFatalClientErr(nil, "E_BAD_CHANNEL", fmt.Sprintf("SUB channel name %q is not valid", channelName)) } if len(params) == 4 { partition, err = strconv.Atoi(string(params[3])) if err != nil { return nil, protocol.NewFatalClientErr(nil, "E_BAD_PARTITION", fmt.Sprintf("topic partition is not valid: %v", err)) } } if err = p.CheckAuth(client, "SUB", topicName, channelName); err != nil { return nil, err } if partition == -1 { partition = p.ctx.getDefaultPartition(topicName) } topic, err := p.ctx.getExistingTopic(topicName, partition) if err != nil { nsqd.NsqLogger().Logf("sub to not existing topic: %v, err:%v", topicName, err.Error()) return nil, protocol.NewFatalClientErr(nil, E_TOPIC_NOT_EXIST, "") } if !p.ctx.checkForMasterWrite(topicName, partition) { nsqd.NsqLogger().Logf("sub failed on not leader: %v-%v, remote is : %v", topicName, partition, client.RemoteAddr()) // we need disable topic here to trigger a notify, maybe we failed to notify lookup last time. topic.DisableForSlave() return nil, protocol.NewFatalClientErr(nil, FailedOnNotLeader, "") } channel := topic.GetChannel(channelName) err = channel.AddClient(client.ID, client) if err != nil { nsqd.NsqLogger().Logf("sub failed to add client: %v, %v", client, err) return nil, protocol.NewFatalClientErr(nil, FailedOnNotWritable, "") } atomic.StoreInt32(&client.State, stateSubscribed) client.Channel = channel if enableTrace { nsqd.NsqLogger().Logf("sub channel %v with trace enabled, remote is : %v", channelName, client.RemoteAddr()) } if ordered { if atomic.LoadInt32(&client.SampleRate) != 0 { nsqd.NsqLogger().Errorf("%v", ErrOrderChannelOnSampleRate) return nil, protocol.NewFatalClientErr(nil, E_INVALID, ErrOrderChannelOnSampleRate.Error()) } channel.SetOrdered(true) } if startFrom != nil { cnt := channel.GetClientsCount() if cnt > 1 { nsqd.NsqLogger().LogDebugf("the consume offset: %v can only be set by the first client: %v", startFrom, cnt) } else { queueOffset, cnt, err := p.ctx.SetChannelOffset(channel, startFrom, false) if err != nil { return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error()) } nsqd.NsqLogger().Logf("set the channel offset: %v (actual set : %v:%v), by client:%v, %v", startFrom, queueOffset, cnt, client.String(), client.UserAgent) } } client.EnableTrace = enableTrace // update message pump client.SubEventChan <- channel return okBytes, nil }
func (p *protocolV2) internalPubAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) { startPub := time.Now().UnixNano() bodyLen, topic, err := p.preparePub(client, params, p.ctx.getOpts().MaxMsgSize) if err != nil { return nil, err } if traceEnable && bodyLen <= nsqd.MsgTraceIDLength { return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY", fmt.Sprintf("invalid body size %d with trace id enabled", bodyLen)) } messageBodyBuffer := topic.BufferPoolGet(int(bodyLen)) defer topic.BufferPoolPut(messageBodyBuffer) asyncAction := shouldHandleAsync(client, params) _, err = io.CopyN(messageBodyBuffer, client.Reader, int64(bodyLen)) if err != nil { return nil, protocol.NewFatalClientErr(err, "E_BAD_MESSAGE", "failed to read message body") } messageBody := messageBodyBuffer.Bytes()[:bodyLen] topicName := topic.GetTopicName() partition := topic.GetTopicPart() var traceID uint64 var realBody []byte if traceEnable { traceID = binary.BigEndian.Uint64(messageBody[:nsqd.MsgTraceIDLength]) realBody = messageBody[nsqd.MsgTraceIDLength:] } else { realBody = messageBody } if p.ctx.checkForMasterWrite(topicName, partition) { id := nsqd.MessageID(0) offset := nsqd.BackendOffset(0) rawSize := int32(0) if asyncAction { err = internalPubAsync(client.PubTimeout, messageBodyBuffer, topic) } else { id, offset, rawSize, _, err = p.ctx.PutMessage(topic, realBody, traceID) } //p.ctx.setHealth(err) if err != nil { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true) nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err) if clusterErr, ok := err.(*consistence.CommonCoordErr); ok { if !clusterErr.IsLocalErr() { return nil, protocol.NewClientErr(err, FailedOnNotWritable, "") } } return nil, protocol.NewClientErr(err, "E_PUB_FAILED", err.Error()) } topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, false) cost := time.Now().UnixNano() - startPub topic.GetDetailStats().UpdateTopicMsgStats(int64(len(realBody)), cost/1000) if !traceEnable { return okBytes, nil } return getTracedReponse(messageBodyBuffer, id, traceID, offset, rawSize) } else { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true) //forward to master of topic nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v", topic.GetFullName(), client.RemoteAddr) topic.DisableForSlave() return nil, protocol.NewClientErr(err, FailedOnNotLeader, "") } }