Esempio n. 1
0
func (p *protocolV2) TOUCH(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)
	if state != stateSubscribed && state != stateClosing {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot TOUCH in current state")
	}

	if len(params) < 2 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "TOUCH insufficient number of params")
	}

	id, err := getFullMessageID(params[1])
	if err != nil {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error())
	}

	client.LockRead()
	msgTimeout := client.MsgTimeout
	client.UnlockRead()

	if client.Channel == nil {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel")
	}
	err = client.Channel.TouchMessage(client.ID, nsqd.GetMessageIDFromFullMsgID(*id), msgTimeout)
	if err != nil {
		return nil, protocol.NewClientErr(err, "E_TOUCH_FAILED",
			fmt.Sprintf("TOUCH %v failed %s", *id, err.Error()))
	}

	return nil, nil
}
Esempio n. 2
0
func (p *protocolV2) CLS(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)
	if state != stateSubscribed {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot CLS in current state")
	}

	client.StartClose()

	return []byte("CLOSE_WAIT"), nil
}
Esempio n. 3
0
func (p *protocolV2) CheckAuth(client *nsqd.ClientV2, cmd, topicName, channelName string) error {
	// if auth is enabled, the client must have authorized already
	// compare topic/channel against cached authorization data (refetching if expired)
	if p.ctx.isAuthEnabled() {
		if !client.HasAuthorizations() {
			return protocol.NewFatalClientErr(nil, "E_AUTH_FIRST",
				fmt.Sprintf("AUTH required before %s", cmd))
		}
		ok, err := client.IsAuthorized(topicName, channelName)
		if err != nil {
			// we don't want to leak errors contacting the auth server to untrusted clients
			nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] Auth Failed %s", client, err)
			return protocol.NewFatalClientErr(nil, "E_AUTH_FAILED", "AUTH failed")
		}
		if !ok {
			return protocol.NewFatalClientErr(nil, "E_UNAUTHORIZED",
				fmt.Sprintf("AUTH failed for %s on %q %q", cmd, topicName, channelName))
		}
	}
	return nil
}
Esempio n. 4
0
func (p *protocolV2) RDY(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)

	if state == stateClosing {
		// just ignore ready changes on a closing channel
		nsqd.NsqLogger().Logf(
			"PROTOCOL(V2): [%s] ignoring RDY after CLS in state ClientStateV2Closing",
			client)
		return nil, nil
	}

	if state != stateSubscribed {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot RDY in current state")
	}

	count := int64(1)
	if len(params) > 1 {
		b10, err := protocol.ByteToBase10(params[1])
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, E_INVALID,
				fmt.Sprintf("RDY could not parse count %s", params[1]))
		}
		count = int64(b10)
	}

	if count < 0 || count > p.ctx.getOpts().MaxRdyCount {
		// this needs to be a fatal error otherwise clients would have
		// inconsistent state
		return nil, protocol.NewFatalClientErr(nil, E_INVALID,
			fmt.Sprintf("RDY count %d out of range 0-%d", count, p.ctx.getOpts().MaxRdyCount))
	}

	client.SetReadyCount(count)

	return nil, nil
}
Esempio n. 5
0
func internalSend(client *nsqd.ClientV2, frameType int32, data []byte, needFlush bool) error {
	client.LockWrite()
	defer client.UnlockWrite()
	if client.Writer == nil {
		return errors.New("client closed")
	}

	var zeroTime time.Time
	if client.HeartbeatInterval > 0 {
		client.SetWriteDeadline(time.Now().Add(client.HeartbeatInterval))
	} else {
		client.SetWriteDeadline(zeroTime)
	}

	_, err := protocol.SendFramedResponse(client.Writer, frameType, data)
	if err != nil {
		return err
	}

	if needFlush || frameType != frameTypeMessage {
		err = client.Flush()
	}
	return err
}
Esempio n. 6
0
func (p *protocolV2) internalMPUBAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) {
	startPub := time.Now().UnixNano()
	_, topic, preErr := p.preparePub(client, params, p.ctx.getOpts().MaxBodySize)
	if preErr != nil {
		return nil, preErr
	}

	messages, buffers, preErr := readMPUB(client.Reader, client.LenSlice, topic,
		p.ctx.getOpts().MaxMsgSize, traceEnable)

	defer func() {
		for _, b := range buffers {
			topic.BufferPoolPut(b)
		}
	}()
	if preErr != nil {
		return nil, preErr
	}

	topicName := topic.GetTopicName()
	partition := topic.GetTopicPart()
	if p.ctx.checkForMasterWrite(topicName, partition) {
		id, offset, rawSize, err := p.ctx.PutMessages(topic, messages)
		//p.ctx.setHealth(err)
		if err != nil {
			topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true)
			nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err)

			if clusterErr, ok := err.(*consistence.CommonCoordErr); ok {
				if !clusterErr.IsLocalErr() {
					return nil, protocol.NewClientErr(err, FailedOnNotWritable, "")
				}
			}
			return nil, protocol.NewFatalClientErr(err, "E_MPUB_FAILED", err.Error())
		}
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), false)
		cost := time.Now().UnixNano() - startPub
		topic.GetDetailStats().UpdateTopicMsgStats(0, cost/1000/int64(len(messages)))
		if !traceEnable {
			return okBytes, nil
		}
		return getTracedReponse(buffers[0], id, 0, offset, rawSize)
	} else {
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", int64(len(messages)), true)
		//forward to master of topic
		nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v",
			topic.GetFullName(), client.RemoteAddr)
		topic.DisableForSlave()
		return nil, protocol.NewClientErr(preErr, FailedOnNotLeader, "")
	}
}
Esempio n. 7
0
func (p *protocolV2) FIN(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)
	if state != stateSubscribed && state != stateClosing {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot FIN in current state")
	}

	if len(params) < 2 {
		nsqd.NsqLogger().LogDebugf("FIN error params: %v", params)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "FIN insufficient number of params")
	}

	id, err := getFullMessageID(params[1])
	if err != nil {
		nsqd.NsqLogger().LogDebugf("FIN error: %v, %v", params[1], err)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error())
	}
	msgID := nsqd.GetMessageIDFromFullMsgID(*id)
	if int64(msgID) <= 0 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "Invalid Message ID")
	}

	if client.Channel == nil {
		nsqd.NsqLogger().LogDebugf("FIN error no channel: %v", msgID)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel")
	}

	if !p.ctx.checkForMasterWrite(client.Channel.GetTopicName(), client.Channel.GetTopicPart()) {
		nsqd.NsqLogger().Logf("topic %v fin message failed for not leader", client.Channel.GetTopicName())
		return nil, protocol.NewFatalClientErr(nil, FailedOnNotLeader, "")
	}

	err = p.ctx.FinishMessage(client.Channel, client.ID, client.String(), msgID)
	if err != nil {
		client.IncrSubError(int64(1))
		nsqd.NsqLogger().LogDebugf("FIN error : %v, err: %v, channel: %v, topic: %v", msgID,
			err, client.Channel.GetName(), client.Channel.GetTopicName())
		if clusterErr, ok := err.(*consistence.CommonCoordErr); ok {
			if !clusterErr.IsLocalErr() {
				return nil, protocol.NewFatalClientErr(err, FailedOnNotWritable, "")
			}
		}
		return nil, protocol.NewClientErr(err, "E_FIN_FAILED",
			fmt.Sprintf("FIN %v failed %s", *id, err.Error()))
	}
	client.FinishedMessage()

	return nil, nil
}
Esempio n. 8
0
func (p *protocolV2) REQ(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)
	if state != stateSubscribed && state != stateClosing {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot REQ in current state")
	}

	if len(params) < 3 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "REQ insufficient number of params")
	}

	id, err := getFullMessageID(params[1])
	if err != nil {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error())
	}

	timeoutMs, err := protocol.ByteToBase10(params[2])
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, E_INVALID,
			fmt.Sprintf("REQ could not parse timeout %s", params[2]))
	}
	timeoutDuration := time.Duration(timeoutMs) * time.Millisecond

	if timeoutDuration < 0 || timeoutDuration > p.ctx.getOpts().MaxReqTimeout {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID,
			fmt.Sprintf("REQ timeout %v out of range 0-%v", timeoutDuration, p.ctx.getOpts().MaxReqTimeout))
	}

	if client.Channel == nil {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "No channel")
	}
	err = client.Channel.RequeueMessage(client.ID, client.String(), nsqd.GetMessageIDFromFullMsgID(*id), timeoutDuration, true)
	if err != nil {
		client.IncrSubError(int64(1))
		return nil, protocol.NewClientErr(err, "E_REQ_FAILED",
			fmt.Sprintf("REQ %v failed %s", *id, err.Error()))
	}

	client.RequeuedMessage(timeoutDuration > 0)

	return nil, nil
}
Esempio n. 9
0
func (p *protocolV2) internalSUB(client *nsqd.ClientV2, params [][]byte, enableTrace bool,
	ordered bool, startFrom *ConsumeOffset) ([]byte, error) {

	state := atomic.LoadInt32(&client.State)
	if state != stateInit {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot SUB in current state")
	}

	if client.HeartbeatInterval <= 0 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot SUB with heartbeats disabled")
	}

	if len(params) < 3 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "SUB insufficient number of parameters")
	}

	topicName := string(params[1])
	if !protocol.IsValidTopicName(topicName) {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_TOPIC",
			fmt.Sprintf("SUB topic name %q is not valid", topicName))
	}

	partition := -1
	channelName := ""
	var err error
	channelName = string(params[2])
	if !protocol.IsValidChannelName(channelName) {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_CHANNEL",
			fmt.Sprintf("SUB channel name %q is not valid", channelName))
	}

	if len(params) == 4 {
		partition, err = strconv.Atoi(string(params[3]))
		if err != nil {
			return nil, protocol.NewFatalClientErr(nil, "E_BAD_PARTITION",
				fmt.Sprintf("topic partition is not valid: %v", err))
		}
	}

	if err = p.CheckAuth(client, "SUB", topicName, channelName); err != nil {
		return nil, err
	}

	if partition == -1 {
		partition = p.ctx.getDefaultPartition(topicName)
	}

	topic, err := p.ctx.getExistingTopic(topicName, partition)
	if err != nil {
		nsqd.NsqLogger().Logf("sub to not existing topic: %v, err:%v", topicName, err.Error())
		return nil, protocol.NewFatalClientErr(nil, E_TOPIC_NOT_EXIST, "")
	}
	if !p.ctx.checkForMasterWrite(topicName, partition) {
		nsqd.NsqLogger().Logf("sub failed on not leader: %v-%v, remote is : %v", topicName, partition, client.RemoteAddr())
		// we need disable topic here to trigger a notify, maybe we failed to notify lookup last time.
		topic.DisableForSlave()
		return nil, protocol.NewFatalClientErr(nil, FailedOnNotLeader, "")
	}
	channel := topic.GetChannel(channelName)
	err = channel.AddClient(client.ID, client)
	if err != nil {
		nsqd.NsqLogger().Logf("sub failed to add client: %v, %v", client, err)
		return nil, protocol.NewFatalClientErr(nil, FailedOnNotWritable, "")
	}

	atomic.StoreInt32(&client.State, stateSubscribed)
	client.Channel = channel
	if enableTrace {
		nsqd.NsqLogger().Logf("sub channel %v with trace enabled, remote is : %v", channelName, client.RemoteAddr())
	}
	if ordered {
		if atomic.LoadInt32(&client.SampleRate) != 0 {
			nsqd.NsqLogger().Errorf("%v", ErrOrderChannelOnSampleRate)
			return nil, protocol.NewFatalClientErr(nil, E_INVALID, ErrOrderChannelOnSampleRate.Error())
		}
		channel.SetOrdered(true)
	}
	if startFrom != nil {
		cnt := channel.GetClientsCount()
		if cnt > 1 {
			nsqd.NsqLogger().LogDebugf("the consume offset: %v can only be set by the first client: %v", startFrom, cnt)
		} else {
			queueOffset, cnt, err := p.ctx.SetChannelOffset(channel, startFrom, false)
			if err != nil {
				return nil, protocol.NewFatalClientErr(nil, E_INVALID, err.Error())
			}
			nsqd.NsqLogger().Logf("set the channel offset: %v (actual set : %v:%v), by client:%v, %v",
				startFrom, queueOffset, cnt, client.String(), client.UserAgent)
		}
	}
	client.EnableTrace = enableTrace
	// update message pump
	client.SubEventChan <- channel

	return okBytes, nil
}
Esempio n. 10
0
func (p *protocolV2) AUTH(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	state := atomic.LoadInt32(&client.State)
	if state != stateInit {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot AUTH in current state")
	}

	if len(params) != 1 {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "AUTH invalid number of parameters")
	}

	bodyLen, err := readLen(client.Reader, client.LenSlice)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "AUTH failed to read body size")
	}

	if int64(bodyLen) > p.ctx.getOpts().MaxBodySize {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("AUTH body too big %d > %d", bodyLen, p.ctx.getOpts().MaxBodySize))
	}

	if bodyLen <= 0 {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("AUTH invalid body size %d", bodyLen))
	}

	body := make([]byte, bodyLen)
	_, err = io.ReadFull(client.Reader, body)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "AUTH failed to read body")
	}

	if client.HasAuthorizations() {
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "AUTH Already set")
	}

	if !p.ctx.isAuthEnabled() {
		return nil, protocol.NewFatalClientErr(err, "E_AUTH_DISABLED", "AUTH Disabled")
	}

	if err = client.Auth(string(body)); err != nil {
		// we don't want to leak errors contacting the auth server to untrusted clients
		nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] Auth Failed %s", client, err)
		return nil, protocol.NewFatalClientErr(err, "E_AUTH_FAILED", "AUTH failed")
	}

	if !client.HasAuthorizations() {
		return nil, protocol.NewFatalClientErr(nil, "E_UNAUTHORIZED", "AUTH No authorizations found")
	}

	var resp []byte
	resp, err = json.Marshal(struct {
		Identity        string `json:"identity"`
		IdentityURL     string `json:"identity_url"`
		PermissionCount int    `json:"permission_count"`
	}{
		Identity:        client.AuthState.Identity,
		IdentityURL:     client.AuthState.IdentityURL,
		PermissionCount: len(client.AuthState.Authorizations),
	})
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_AUTH_ERROR", "AUTH error "+err.Error())
	}

	err = Send(client, frameTypeResponse, resp)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_AUTH_ERROR", "AUTH error "+err.Error())
	}

	return nil, nil

}
Esempio n. 11
0
func (p *protocolV2) IDENTIFY(client *nsqd.ClientV2, params [][]byte) ([]byte, error) {
	var err error

	state := atomic.LoadInt32(&client.State)
	if state != stateInit {
		nsqd.NsqLogger().LogWarningf("[%s] command in wrong state: %v", client, state)
		return nil, protocol.NewFatalClientErr(nil, E_INVALID, "cannot IDENTIFY in current state")
	}

	bodyLen, err := readLen(client.Reader, client.LenSlice)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "IDENTIFY failed to read body size")
	}

	if int64(bodyLen) > p.ctx.getOpts().MaxBodySize {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("IDENTIFY body too big %d > %d", bodyLen, p.ctx.getOpts().MaxBodySize))
	}

	if bodyLen <= 0 {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("IDENTIFY invalid body size %d", bodyLen))
	}

	body := make([]byte, bodyLen)
	_, err = io.ReadFull(client.Reader, body)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "IDENTIFY failed to read body")
	}

	// body is a json structure with producer information
	var identifyData nsqd.IdentifyDataV2
	err = json.Unmarshal(body, &identifyData)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "IDENTIFY failed to decode JSON body")
	}

	nsqd.NsqLogger().LogDebugf("PROTOCOL(V2): [%s] %+v", client, identifyData)

	err = client.Identify(identifyData)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_BODY", "IDENTIFY "+err.Error())
	}

	// bail out early if we're not negotiating features
	if !identifyData.FeatureNegotiation {
		return okBytes, nil
	}

	tlsv1 := p.ctx.GetTlsConfig() != nil && identifyData.TLSv1
	deflate := p.ctx.getOpts().DeflateEnabled && identifyData.Deflate
	deflateLevel := 0
	if deflate {
		if identifyData.DeflateLevel <= 0 {
			deflateLevel = 6
		}
		deflateLevel = int(math.Min(float64(deflateLevel), float64(p.ctx.getOpts().MaxDeflateLevel)))
	}
	snappy := p.ctx.getOpts().SnappyEnabled && identifyData.Snappy

	if deflate && snappy {
		return nil, protocol.NewFatalClientErr(nil, "E_IDENTIFY_FAILED", "cannot enable both deflate and snappy compression")
	}

	resp, err := json.Marshal(struct {
		MaxRdyCount         int64  `json:"max_rdy_count"`
		Version             string `json:"version"`
		MaxMsgTimeout       int64  `json:"max_msg_timeout"`
		MsgTimeout          int64  `json:"msg_timeout"`
		TLSv1               bool   `json:"tls_v1"`
		Deflate             bool   `json:"deflate"`
		DeflateLevel        int    `json:"deflate_level"`
		MaxDeflateLevel     int    `json:"max_deflate_level"`
		Snappy              bool   `json:"snappy"`
		SampleRate          int32  `json:"sample_rate"`
		AuthRequired        bool   `json:"auth_required"`
		OutputBufferSize    int    `json:"output_buffer_size"`
		OutputBufferTimeout int64  `json:"output_buffer_timeout"`
	}{
		MaxRdyCount:         p.ctx.getOpts().MaxRdyCount,
		Version:             version.Binary,
		MaxMsgTimeout:       int64(p.ctx.getOpts().MaxMsgTimeout / time.Millisecond),
		MsgTimeout:          int64(client.MsgTimeout / time.Millisecond),
		TLSv1:               tlsv1,
		Deflate:             deflate,
		DeflateLevel:        deflateLevel,
		MaxDeflateLevel:     p.ctx.getOpts().MaxDeflateLevel,
		Snappy:              snappy,
		SampleRate:          client.SampleRate,
		AuthRequired:        p.ctx.isAuthEnabled(),
		OutputBufferSize:    client.OutputBufferSize,
		OutputBufferTimeout: int64(client.OutputBufferTimeout / time.Millisecond),
	})
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
	}

	err = Send(client, frameTypeResponse, resp)
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
	}

	if tlsv1 {
		nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] upgrading connection to TLS", client)
		err = client.UpgradeTLS()
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}

		err = Send(client, frameTypeResponse, okBytes)
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}
	}

	if snappy {
		nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] upgrading connection to snappy", client)
		err = client.UpgradeSnappy()
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}

		err = Send(client, frameTypeResponse, okBytes)
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}
	}

	if deflate {
		nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] upgrading connection to deflate", client)
		err = client.UpgradeDeflate(deflateLevel)
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}

		err = Send(client, frameTypeResponse, okBytes)
		if err != nil {
			return nil, protocol.NewFatalClientErr(err, "E_IDENTIFY_FAILED", "IDENTIFY failed "+err.Error())
		}
	}

	return nil, nil
}
Esempio n. 12
0
func (p *protocolV2) messagePump(client *nsqd.ClientV2, startedChan chan bool,
	stoppedChan chan bool) {
	var err error
	var buf bytes.Buffer
	var clientMsgChan chan *nsqd.Message
	var subChannel *nsqd.Channel
	// NOTE: `flusherChan` is used to bound message latency for
	// the pathological case of a channel on a low volume topic
	// with >1 clients having >1 RDY counts
	var flusherChan <-chan time.Time
	var sampleRate int32

	subEventChan := client.SubEventChan
	identifyEventChan := client.IdentifyEventChan
	outputBufferTicker := time.NewTicker(client.OutputBufferTimeout)
	heartbeatTicker := time.NewTicker(client.HeartbeatInterval)
	heartbeatChan := heartbeatTicker.C
	heartbeatFailedCnt := 0
	msgTimeout := client.MsgTimeout

	// v2 opportunistically buffers data to clients to reduce write system calls
	// we force flush in two cases:
	//    1. when the client is not ready to receive messages
	//    2. we're buffered and the channel has nothing left to send us
	//       (ie. we would block in this loop anyway)
	//
	flushed := true

	// signal to the goroutine that started the messagePump
	// that we've started up
	close(startedChan)

	for {
		if subChannel == nil || !client.IsReadyForMessages() {
			// the client is not ready to receive messages...
			clientMsgChan = nil
			flusherChan = nil
			// force flush
			client.LockWrite()
			err = client.Flush()
			client.UnlockWrite()
			if err != nil {
				goto exit
			}
			flushed = true
		} else if flushed {
			// last iteration we flushed...
			// do not select on the flusher ticker channel
			clientMsgChan = subChannel.GetClientMsgChan()
			flusherChan = nil
		} else {
			// we're buffered (if there isn't any more data we should flush)...
			// select on the flusher ticker channel, too
			clientMsgChan = subChannel.GetClientMsgChan()
			flusherChan = outputBufferTicker.C
		}

		select {
		case <-client.ExitChan:
			goto exit
		case <-flusherChan:
			// if this case wins, we're either starved
			// or we won the race between other channels...
			// in either case, force flush
			client.LockWrite()
			err = client.Flush()
			client.UnlockWrite()
			if err != nil {
				goto exit
			}
			flushed = true
		case <-client.ReadyStateChan:
		case subChannel = <-subEventChan:
			// you can't SUB anymore
			nsqd.NsqLogger().Logf("client %v sub to channel: %v", client.ID,
				subChannel.GetName())
			subEventChan = nil
		case identifyData := <-identifyEventChan:
			// you can't IDENTIFY anymore
			identifyEventChan = nil

			outputBufferTicker.Stop()
			if identifyData.OutputBufferTimeout > 0 {
				outputBufferTicker = time.NewTicker(identifyData.OutputBufferTimeout)
			}

			heartbeatTicker.Stop()
			heartbeatChan = nil
			if identifyData.HeartbeatInterval > 0 {
				heartbeatTicker = time.NewTicker(identifyData.HeartbeatInterval)
				heartbeatChan = heartbeatTicker.C
			}

			if identifyData.SampleRate > 0 {
				sampleRate = identifyData.SampleRate
			}

			msgTimeout = identifyData.MsgTimeout
		case <-heartbeatChan:
			if subChannel != nil && client.IsReadyForMessages() {
				// try wake up the channel
				subChannel.TryWakeupRead()
			}

			err = Send(client, frameTypeResponse, heartbeatBytes)
			nsqd.NsqLogger().LogDebugf("PROTOCOL(V2): [%s] send heartbeat", client)
			if err != nil {
				heartbeatFailedCnt++
				nsqd.NsqLogger().LogWarningf("PROTOCOL(V2): [%s] send heartbeat failed %v times, %v", client, heartbeatFailedCnt, err)
				if heartbeatFailedCnt > 2 {
					goto exit
				}
			} else {
				heartbeatFailedCnt = 0
			}
		case msg, ok := <-clientMsgChan:
			if !ok {
				goto exit
			}

			if sampleRate > 0 && rand.Int31n(100) > sampleRate {
				// FIN automatically, all message will not wait to confirm if not sending,
				// and the reader keep moving forward.
				offset, _, _, _ := subChannel.ConfirmBackendQueue(msg)
				// TODO: sync to replica nodes.
				_ = offset
				continue
			}
			// avoid re-send some confirmed message,
			// this may happen while the channel reader is reset to old position
			// due to some retry or leader change.
			if subChannel.IsConfirmed(msg) {
				continue
			}

			subChannel.StartInFlightTimeout(msg, client.ID, client.String(), msgTimeout)
			client.SendingMessage()
			err = SendMessage(client, msg, &buf, subChannel.IsOrdered())
			if err != nil {
				goto exit
			}
			flushed = false
		}
	}

exit:
	nsqd.NsqLogger().LogDebugf("PROTOCOL(V2): [%s] exiting messagePump", client)
	heartbeatTicker.Stop()
	outputBufferTicker.Stop()
	if err != nil {
		nsqd.NsqLogger().Logf("PROTOCOL(V2): [%s] messagePump error - %s", client, err)
	}
	close(stoppedChan)
}
Esempio n. 13
0
func (p *protocolV2) internalPubAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) {
	startPub := time.Now().UnixNano()
	bodyLen, topic, err := p.preparePub(client, params, p.ctx.getOpts().MaxMsgSize)
	if err != nil {
		return nil, err
	}
	if traceEnable && bodyLen <= nsqd.MsgTraceIDLength {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("invalid body size %d with trace id enabled", bodyLen))
	}

	messageBodyBuffer := topic.BufferPoolGet(int(bodyLen))
	defer topic.BufferPoolPut(messageBodyBuffer)
	asyncAction := shouldHandleAsync(client, params)

	_, err = io.CopyN(messageBodyBuffer, client.Reader, int64(bodyLen))
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_MESSAGE", "failed to read message body")
	}
	messageBody := messageBodyBuffer.Bytes()[:bodyLen]

	topicName := topic.GetTopicName()
	partition := topic.GetTopicPart()
	var traceID uint64
	var realBody []byte
	if traceEnable {
		traceID = binary.BigEndian.Uint64(messageBody[:nsqd.MsgTraceIDLength])
		realBody = messageBody[nsqd.MsgTraceIDLength:]
	} else {
		realBody = messageBody
	}
	if p.ctx.checkForMasterWrite(topicName, partition) {
		id := nsqd.MessageID(0)
		offset := nsqd.BackendOffset(0)
		rawSize := int32(0)
		if asyncAction {
			err = internalPubAsync(client.PubTimeout, messageBodyBuffer, topic)
		} else {
			id, offset, rawSize, _, err = p.ctx.PutMessage(topic, realBody, traceID)
		}
		//p.ctx.setHealth(err)
		if err != nil {
			topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true)
			nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err)
			if clusterErr, ok := err.(*consistence.CommonCoordErr); ok {
				if !clusterErr.IsLocalErr() {
					return nil, protocol.NewClientErr(err, FailedOnNotWritable, "")
				}
			}
			return nil, protocol.NewClientErr(err, "E_PUB_FAILED", err.Error())
		}
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, false)
		cost := time.Now().UnixNano() - startPub
		topic.GetDetailStats().UpdateTopicMsgStats(int64(len(realBody)), cost/1000)
		if !traceEnable {
			return okBytes, nil
		}
		return getTracedReponse(messageBodyBuffer, id, traceID, offset, rawSize)
	} else {
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true)
		//forward to master of topic
		nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v",
			topic.GetFullName(), client.RemoteAddr)
		topic.DisableForSlave()
		return nil, protocol.NewClientErr(err, FailedOnNotLeader, "")
	}
}