func (p *protocolV2) internalMPUBAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) { startPub := time.Now().UnixNano() _, topic, preErr := p.preparePub(client, params, p.ctx.getOpts().MaxBodySize) if preErr != nil { return nil, preErr } messages, buffers, preErr := readMPUB(client.Reader, client.LenSlice, topic, p.ctx.getOpts().MaxMsgSize, traceEnable) defer func() { for _, b := range buffers { topic.BufferPoolPut(b) } }() if preErr != nil { return nil, preErr } topicName := topic.GetTopicName() partition := topic.GetTopicPart() if p.ctx.checkForMasterWrite(topicName, partition) { id, offset, rawSize, err := p.ctx.PutMessages(topic, messages) //p.ctx.setHealth(err) if err != nil { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true) nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err) if clusterErr, ok := err.(*consistence.CommonCoordErr); ok { if !clusterErr.IsLocalErr() { return nil, protocol.NewClientErr(err, FailedOnNotWritable, "") } } return nil, protocol.NewFatalClientErr(err, "E_MPUB_FAILED", err.Error()) } topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), false) cost := time.Now().UnixNano() - startPub topic.GetDetailStats().UpdateTopicMsgStats(0, cost/1000/int64(len(messages))) if !traceEnable { return okBytes, nil } return getTracedReponse(buffers[0], id, 0, offset, rawSize) } else { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true) //forward to master of topic nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v", topic.GetFullName(), client.RemoteAddr) topic.DisableForSlave() return nil, protocol.NewClientErr(preErr, FailedOnNotLeader, "") } }
func (p *protocolV2) TOUCH(client *nsqd.ClientV2, params [][]byte) ([]byte, error) { state := atomic.LoadInt32(&client.State) if state != stateSubscribed && state != stateClosing { nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot TOUCH in current state") } if len(params) < 2 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "TOUCH insufficient number of params") } id, err := getFullMessageID(params[1]) if err != nil { return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error()) } client.LockRead() msgTimeout := client.MsgTimeout client.UnlockRead() if client.Channel == nil { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel") } err = client.Channel.TouchMessage(client.ID, nsqd.GetMessageIDFromFullMsgID(*id), msgTimeout) if err != nil { return nil, protocol.NewClientErr(err, "E_TOUCH_FAILED", fmt.Sprintf("TOUCH %v failed %s", *id, err.Error())) } return nil, nil }
func (p *protocolV2) internalCreateTopic(client *nsqd.ClientV2, params [][]byte) ([]byte, error) { var err error if len(params) < 3 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "CREATE_TOPIC insufficient number of parameters") } topicName := string(params[1]) if !protocol.IsValidTopicName(topicName) { return nil, protocol.NewFatalClientErr(nil, "E_BAD_TOPIC", fmt.Sprintf("topic name %q is not valid", topicName)) } partition, err := strconv.Atoi(string(params[2])) if err != nil { return nil, protocol.NewFatalClientErr(nil, "E_BAD_PARTITION", fmt.Sprintf("topic partition is not valid: %v", err)) } if err = p.CheckAuth(client, "CREATE_TOPIC", topicName, ""); err != nil { return nil, err } if partition < 0 { return nil, protocol.NewFatalClientErr(nil, "E_BAD_PARTITION", "partition should not less than 0") } if p.ctx.nsqdCoord != nil { return nil, protocol.NewClientErr(err, "E_CREATE_TOPIC_FAILED", fmt.Sprintf("CREATE_TOPIC is not allowed here while cluster feature enabled.")) } topic := p.ctx.getTopic(topicName, partition) if topic == nil { return nil, protocol.NewClientErr(err, "E_CREATE_TOPIC_FAILED", fmt.Sprintf("CREATE_TOPIC %v failed", topicName)) } return okBytes, nil }
func (p *protocolV2) FIN(client *nsqd.ClientV2, params [][]byte) ([]byte, error) { state := atomic.LoadInt32(&client.State) if state != stateSubscribed && state != stateClosing { nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot FIN in current state") } if len(params) < 2 { nsqd.NsqLogger().LogDebugf("FIN error params: %v", params) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "FIN insufficient number of params") } id, err := getFullMessageID(params[1]) if err != nil { nsqd.NsqLogger().LogDebugf("FIN error: %v, %v", params[1], err) return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error()) } msgID := nsqd.GetMessageIDFromFullMsgID(*id) if int64(msgID) <= 0 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "Invalid Message ID") } if client.Channel == nil { nsqd.NsqLogger().LogDebugf("FIN error no channel: %v", msgID) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel") } if !p.ctx.checkForMasterWrite(client.Channel.GetTopicName(), client.Channel.GetTopicPart()) { nsqd.NsqLogger().Logf("topic %v fin message failed for not leader", client.Channel.GetTopicName()) return nil, protocol.NewFatalClientErr(nil, FailedOnNotLeader, "") } err = p.ctx.FinishMessage(client.Channel, client.ID, client.String(), msgID) if err != nil { client.IncrSubError(int64(1)) nsqd.NsqLogger().LogDebugf("FIN error : %v, err: %v, channel: %v, topic: %v", msgID, err, client.Channel.GetName(), client.Channel.GetTopicName()) if clusterErr, ok := err.(*consistence.CommonCoordErr); ok { if !clusterErr.IsLocalErr() { return nil, protocol.NewFatalClientErr(err, FailedOnNotWritable, "") } } return nil, protocol.NewClientErr(err, "E_FIN_FAILED", fmt.Sprintf("FIN %v failed %s", *id, err.Error())) } client.FinishedMessage() return nil, nil }
func (p *protocolV2) REQ(client *nsqd.ClientV2, params [][]byte) ([]byte, error) { state := atomic.LoadInt32(&client.State) if state != stateSubscribed && state != stateClosing { nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state) return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot REQ in current state") } if len(params) < 3 { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "REQ insufficient number of params") } id, err := getFullMessageID(params[1]) if err != nil { return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error()) } timeoutMs, err := protocol.ByteToBase10(params[2]) if err != nil { return nil, protocol.NewFatalClientErr(err, E_INVALID, fmt.Sprintf("REQ could not parse timeout %s", params[2])) } timeoutDuration := time.Duration(timeoutMs) * time.Millisecond if timeoutDuration < 0 || timeoutDuration > p.ctx.getOpts().MaxReqTimeout { return nil, protocol.NewFatalClientErr(nil, E_INVALID, fmt.Sprintf("REQ timeout %v out of range 0-%v", timeoutDuration, p.ctx.getOpts().MaxReqTimeout)) } if client.Channel == nil { return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel") } err = client.Channel.RequeueMessage(client.ID, client.String(), nsqd.GetMessageIDFromFullMsgID(*id), timeoutDuration, true) if err != nil { client.IncrSubError(int64(1)) return nil, protocol.NewClientErr(err, "E_REQ_FAILED", fmt.Sprintf("REQ %v failed %s", *id, err.Error())) } client.RequeuedMessage(timeoutDuration > 0) return nil, nil }
func (p *protocolV2) internalPubAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) { startPub := time.Now().UnixNano() bodyLen, topic, err := p.preparePub(client, params, p.ctx.getOpts().MaxMsgSize) if err != nil { return nil, err } if traceEnable && bodyLen <= nsqd.MsgTraceIDLength { return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY", fmt.Sprintf("invalid body size %d with trace id enabled", bodyLen)) } messageBodyBuffer := topic.BufferPoolGet(int(bodyLen)) defer topic.BufferPoolPut(messageBodyBuffer) asyncAction := shouldHandleAsync(client, params) _, err = io.CopyN(messageBodyBuffer, client.Reader, int64(bodyLen)) if err != nil { return nil, protocol.NewFatalClientErr(err, "E_BAD_MESSAGE", "failed to read message body") } messageBody := messageBodyBuffer.Bytes()[:bodyLen] topicName := topic.GetTopicName() partition := topic.GetTopicPart() var traceID uint64 var realBody []byte if traceEnable { traceID = binary.BigEndian.Uint64(messageBody[:nsqd.MsgTraceIDLength]) realBody = messageBody[nsqd.MsgTraceIDLength:] } else { realBody = messageBody } if p.ctx.checkForMasterWrite(topicName, partition) { id := nsqd.MessageID(0) offset := nsqd.BackendOffset(0) rawSize := int32(0) if asyncAction { err = internalPubAsync(client.PubTimeout, messageBodyBuffer, topic) } else { id, offset, rawSize, _, err = p.ctx.PutMessage(topic, realBody, traceID) } //p.ctx.setHealth(err) if err != nil { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true) nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err) if clusterErr, ok := err.(*consistence.CommonCoordErr); ok { if !clusterErr.IsLocalErr() { return nil, protocol.NewClientErr(err, FailedOnNotWritable, "") } } return nil, protocol.NewClientErr(err, "E_PUB_FAILED", err.Error()) } topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, false) cost := time.Now().UnixNano() - startPub topic.GetDetailStats().UpdateTopicMsgStats(int64(len(realBody)), cost/1000) if !traceEnable { return okBytes, nil } return getTracedReponse(messageBodyBuffer, id, traceID, offset, rawSize) } else { topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true) //forward to master of topic nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v", topic.GetFullName(), client.RemoteAddr) topic.DisableForSlave() return nil, protocol.NewClientErr(err, FailedOnNotLeader, "") } }