コード例 #1
0
func (self *NsqdCoordinator) SetChannelConsumeOffsetToCluster(ch *nsqd.Channel, queueOffset int64, cnt int64, force bool) error {
	topicName := ch.GetTopicName()
	partition := ch.GetTopicPart()
	coord, checkErr := self.getTopicCoord(topicName, partition)
	if checkErr != nil {
		return checkErr.ToErrorType()
	}

	var syncOffset ChannelConsumerOffset
	syncOffset.AllowBackward = true
	syncOffset.VCnt = cnt
	syncOffset.VOffset = queueOffset

	doLocalWrite := func(d *coordData) *CoordErr {
		err := ch.SetConsumeOffset(nsqd.BackendOffset(queueOffset), cnt, force)
		if err != nil {
			if err != nsqd.ErrSetConsumeOffsetNotFirstClient {
				coordLog.Infof("failed to set the consume offset: %v, err:%v", queueOffset, err)
				return &CoordErr{err.Error(), RpcNoErr, CoordLocalErr}
			}
			coordLog.Debugf("the consume offset: %v can only be set by the first client", queueOffset)
			return ErrLocalSetChannelOffsetNotFirstClient
		}
		return nil
	}
	doLocalExit := func(err *CoordErr) {}
	doLocalCommit := func() error {
		return nil
	}
	doLocalRollback := func() {}
	doRefresh := func(d *coordData) *CoordErr {
		return nil
	}
	doSlaveSync := func(c *NsqdRpcClient, nodeID string, tcData *coordData) *CoordErr {
		if ch.IsEphemeral() {
			return nil
		}
		rpcErr := c.UpdateChannelOffset(&tcData.topicLeaderSession, &tcData.topicInfo, ch.GetName(), syncOffset)
		if rpcErr != nil {
			coordLog.Infof("sync channel(%v) offset to replica %v failed: %v, offset: %v", ch.GetName(),
				nodeID, rpcErr, syncOffset)
		}
		return rpcErr
	}
	handleSyncResult := func(successNum int, tcData *coordData) bool {
		if successNum == len(tcData.topicInfo.ISR) {
			return true
		}
		return false
	}
	clusterErr := self.doSyncOpToCluster(false, coord, doLocalWrite, doLocalExit, doLocalCommit, doLocalRollback,
		doRefresh, doSlaveSync, handleSyncResult)
	if clusterErr != nil {
		return clusterErr.ToErrorType()
	}
	return nil
}
コード例 #2
0
ファイル: context.go プロジェクト: absolute8511/nsq
func (c *context) SetChannelOffset(ch *nsqd.Channel, startFrom *ConsumeOffset, force bool) (int64, int64, error) {
	var l *consistence.CommitLogData
	var queueOffset int64
	cnt := int64(0)
	var err error
	if startFrom.OffsetType == offsetTimestampType {
		if c.nsqdCoord != nil {
			l, queueOffset, cnt, err = c.nsqdCoord.SearchLogByMsgTimestamp(ch.GetTopicName(), ch.GetTopicPart(), startFrom.OffsetValue)
		} else {
			err = errors.New("Not supported while coordinator disabled")
		}
	} else if startFrom.OffsetType == offsetSpecialType {
		if startFrom.OffsetValue == -1 {
			e := ch.GetChannelEnd()
			queueOffset = int64(e.Offset())
			cnt = e.TotalMsgCnt()
		} else {
			nsqd.NsqLogger().Logf("not known special offset :%v", startFrom)
			err = errors.New("not supported offset type")
		}
	} else if startFrom.OffsetType == offsetVirtualQueueType {
		queueOffset = startFrom.OffsetValue
		cnt = 0
		if c.nsqdCoord != nil {
			l, queueOffset, cnt, err = c.nsqdCoord.SearchLogByMsgOffset(ch.GetTopicName(), ch.GetTopicPart(), queueOffset)
		} else {
			err = errors.New("Not supported while coordinator disabled")
		}
	} else if startFrom.OffsetType == offsetMsgCountType {
		if c.nsqdCoord != nil {
			l, queueOffset, cnt, err = c.nsqdCoord.SearchLogByMsgCnt(ch.GetTopicName(), ch.GetTopicPart(), startFrom.OffsetValue)
		} else {
			err = errors.New("Not supported while coordinator disabled")
		}
	} else {
		nsqd.NsqLogger().Logf("not supported offset type:%v", startFrom)
		err = errors.New("not supported offset type")
	}
	if err != nil {
		nsqd.NsqLogger().Logf("failed to search the consume offset: %v, err:%v", startFrom, err)
		return 0, 0, err
	}
	nsqd.NsqLogger().Logf("%v searched log : %v, offset: %v:%v", startFrom, l, queueOffset, cnt)
	if c.nsqdCoord == nil {
		err = ch.SetConsumeOffset(nsqd.BackendOffset(queueOffset), cnt, force)
		if err != nil {
			if err != nsqd.ErrSetConsumeOffsetNotFirstClient {
				nsqd.NsqLogger().Logf("failed to set the consume offset: %v, err:%v", startFrom, err)
				return 0, 0, err
			}
			nsqd.NsqLogger().Logf("the consume offset: %v can only be set by the first client", startFrom)
		}
	} else {
		err = c.nsqdCoord.SetChannelConsumeOffsetToCluster(ch, queueOffset, cnt, force)
		if err != nil {
			if coordErr, ok := err.(*consistence.CommonCoordErr); ok {
				if coordErr.IsEqual(consistence.ErrLocalSetChannelOffsetNotFirstClient) {
					nsqd.NsqLogger().Logf("the consume offset: %v can only be set by the first client", startFrom)
					return queueOffset, cnt, nil
				}
			}
			nsqd.NsqLogger().Logf("failed to set the consume offset: %v (%v:%v), err: %v ", startFrom, queueOffset, cnt, err)
			return 0, 0, err
		}
	}
	return queueOffset, cnt, nil
}
コード例 #3
0
ファイル: protocol_v2.go プロジェクト: absolute8511/nsq
func (p *protocolV2) internalPubAndTrace(client *nsqd.ClientV2, params [][]byte, traceEnable bool) ([]byte, error) {
	startPub := time.Now().UnixNano()
	bodyLen, topic, err := p.preparePub(client, params, p.ctx.getOpts().MaxMsgSize)
	if err != nil {
		return nil, err
	}
	if traceEnable && bodyLen <= nsqd.MsgTraceIDLength {
		return nil, protocol.NewFatalClientErr(nil, "E_BAD_BODY",
			fmt.Sprintf("invalid body size %d with trace id enabled", bodyLen))
	}

	messageBodyBuffer := topic.BufferPoolGet(int(bodyLen))
	defer topic.BufferPoolPut(messageBodyBuffer)
	asyncAction := shouldHandleAsync(client, params)

	_, err = io.CopyN(messageBodyBuffer, client.Reader, int64(bodyLen))
	if err != nil {
		return nil, protocol.NewFatalClientErr(err, "E_BAD_MESSAGE", "failed to read message body")
	}
	messageBody := messageBodyBuffer.Bytes()[:bodyLen]

	topicName := topic.GetTopicName()
	partition := topic.GetTopicPart()
	var traceID uint64
	var realBody []byte
	if traceEnable {
		traceID = binary.BigEndian.Uint64(messageBody[:nsqd.MsgTraceIDLength])
		realBody = messageBody[nsqd.MsgTraceIDLength:]
	} else {
		realBody = messageBody
	}
	if p.ctx.checkForMasterWrite(topicName, partition) {
		id := nsqd.MessageID(0)
		offset := nsqd.BackendOffset(0)
		rawSize := int32(0)
		if asyncAction {
			err = internalPubAsync(client.PubTimeout, messageBodyBuffer, topic)
		} else {
			id, offset, rawSize, _, err = p.ctx.PutMessage(topic, realBody, traceID)
		}
		//p.ctx.setHealth(err)
		if err != nil {
			topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true)
			nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err)
			if clusterErr, ok := err.(*consistence.CommonCoordErr); ok {
				if !clusterErr.IsLocalErr() {
					return nil, protocol.NewClientErr(err, FailedOnNotWritable, "")
				}
			}
			return nil, protocol.NewClientErr(err, "E_PUB_FAILED", err.Error())
		}
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, false)
		cost := time.Now().UnixNano() - startPub
		topic.GetDetailStats().UpdateTopicMsgStats(int64(len(realBody)), cost/1000)
		if !traceEnable {
			return okBytes, nil
		}
		return getTracedReponse(messageBodyBuffer, id, traceID, offset, rawSize)
	} else {
		topic.GetDetailStats().UpdatePubClientStats(client.RemoteAddr().String(), client.UserAgent, "tcp", 1, true)
		//forward to master of topic
		nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v",
			topic.GetFullName(), client.RemoteAddr)
		topic.DisableForSlave()
		return nil, protocol.NewClientErr(err, FailedOnNotLeader, "")
	}
}
コード例 #4
0
func (self *NsqdCoordinator) updateChannelOffsetOnSlave(tc *coordData, channelName string, offset ChannelConsumerOffset) *CoordErr {
	topicName := tc.topicInfo.Name
	partition := tc.topicInfo.Partition

	if !tc.IsMineISR(self.myNode.GetID()) {
		return ErrTopicWriteOnNonISR
	}

	if coordLog.Level() >= levellogger.LOG_DETAIL {
		coordLog.Debugf("got update channel(%v) offset on slave : %v", channelName, offset)
	}
	coord, coordErr := self.getTopicCoord(topicName, partition)
	if coordErr != nil {
		return ErrMissingTopicCoord
	}

	topic, localErr := self.localNsqd.GetExistingTopic(topicName, partition)
	if localErr != nil {
		coordLog.Warningf("slave missing topic : %v", topicName)
		// TODO: leave the isr and try re-sync with leader
		return &CoordErr{localErr.Error(), RpcCommonErr, CoordSlaveErr}
	}

	if topic.GetTopicPart() != partition {
		coordLog.Errorf("topic on slave has different partition : %v vs %v", topic.GetTopicPart(), partition)
		return ErrLocalMissingTopic
	}
	var ch *nsqd.Channel
	ch, localErr = topic.GetExistingChannel(channelName)
	// if a new channel on slave, we should set the consume offset by force
	if localErr != nil {
		offset.AllowBackward = true
		ch = topic.GetChannel(channelName)
		coordLog.Infof("slave init the channel : %v, %v, offset: %v", topic.GetTopicName(), channelName, ch.GetConfirmed())
	}
	if ch.IsEphemeral() {
		coordLog.Errorf("ephemeral channel %v should not be synced on slave", channelName)
	}
	currentEnd := ch.GetChannelEnd()
	if nsqd.BackendOffset(offset.VOffset) > currentEnd.Offset() {
		coordLog.Debugf("update channel(%v) consume offset exceed end %v on slave : %v", channelName, offset, currentEnd)
		// cache the offset (using map?) to reduce the slave channel flush.
		coord.consumeMgr.Lock()
		cur, ok := coord.consumeMgr.channelConsumeOffset[channelName]
		if !ok || cur.VOffset < offset.VOffset {
			coord.consumeMgr.channelConsumeOffset[channelName] = offset
		}
		coord.consumeMgr.Unlock()

		if offset.Flush {
			topic.ForceFlush()
			currentEnd = ch.GetChannelEnd()
			if nsqd.BackendOffset(offset.VOffset) > currentEnd.Offset() {
				offset.VOffset = int64(currentEnd.Offset())
				offset.VCnt = currentEnd.TotalMsgCnt()
			}
		} else {
			return nil
		}
	}
	err := ch.ConfirmBackendQueueOnSlave(nsqd.BackendOffset(offset.VOffset), offset.VCnt, offset.AllowBackward)
	if err != nil {
		coordLog.Warningf("update local channel(%v) offset %v failed: %v, current channel end: %v, topic end: %v",
			channelName, offset, err, currentEnd, topic.TotalDataSize())
		if err == nsqd.ErrExiting {
			return &CoordErr{err.Error(), RpcNoErr, CoordTmpErr}
		}
		return &CoordErr{err.Error(), RpcCommonErr, CoordSlaveErr}
	}
	return nil
}
コード例 #5
0
func (self *NsqdCoordinator) putMessagesOnSlave(coord *TopicCoordinator, logData CommitLogData, msgs []*nsqd.Message) *CoordErr {
	if len(msgs) == 0 {
		return ErrPubArgError
	}
	if logData.LogID != int64(msgs[0].ID) {
		return ErrPubArgError
	}
	var logMgr *TopicCommitLogMgr
	// this last log id should be used on slave to avoid the slave switch
	// override the leader's prev mpub message id.
	// While slave is chosen as leader, the next id should be larger than the last logid.
	// Because the mpub maybe already committed after the leader is down, the new leader should begin
	// with the last message id + 1 for next message.
	lastMsgLogID := int64(msgs[len(msgs)-1].ID)
	if logData.LastMsgLogID != lastMsgLogID {
		return ErrPubArgError
	}

	var queueEnd nsqd.BackendQueueEnd
	var topic *nsqd.Topic
	checkDupOnSlave := func(tc *coordData) bool {
		if coordLog.Level() >= levellogger.LOG_DETAIL {
			topicName := tc.topicInfo.Name
			coordLog.Debugf("pub on slave : %v, msg count: %v", topicName, len(msgs))
		}
		logMgr = tc.logMgr
		if logMgr.IsCommitted(logData.LogID) {
			coordLog.Infof("put the already committed log id : %v", logData.LogID)
			return true
		}
		return false
	}

	doLocalWriteOnSlave := func(tc *coordData) *CoordErr {
		var localErr error
		var start time.Time
		checkCost := coordLog.Level() >= levellogger.LOG_DEBUG
		if self.enableBenchCost {
			checkCost = true
		}
		if checkCost {
			start = time.Now()
		}
		topicName := tc.topicInfo.Name
		partition := tc.topicInfo.Partition
		topic, localErr = self.localNsqd.GetExistingTopic(topicName, partition)
		if localErr != nil {
			coordLog.Infof("pub on slave missing topic : %v", topicName)
			// leave the isr and try re-sync with leader
			return &CoordErr{localErr.Error(), RpcErrTopicNotExist, CoordSlaveErr}
		}

		topic.Lock()
		var cost time.Duration
		if checkCost {
			cost = time.Now().Sub(start)
			if cost > time.Millisecond {
				coordLog.Infof("prepare write on slave local cost :%v", cost)
			}
		}

		queueEnd, localErr = topic.PutMessagesOnReplica(msgs, nsqd.BackendOffset(logData.MsgOffset))
		if checkCost {
			cost2 := time.Now().Sub(start)
			if cost2 > time.Millisecond {
				coordLog.Infof("write local on slave cost :%v, %v", cost, cost2)
			}
		}

		topic.Unlock()
		if localErr != nil {
			logIndex, lastLogOffset, lastLog, _ := logMgr.GetLastCommitLogOffsetV2()
			coordLog.Errorf("put messages on slave failed: %v, slave last logid: %v, data: %v:%v, %v",
				localErr, logMgr.GetLastCommitLogID(), logIndex, lastLogOffset, lastLog)
			return &CoordErr{localErr.Error(), RpcCommonErr, CoordSlaveErr}
		}
		return nil
	}

	doLocalCommit := func() error {
		localErr := logMgr.AppendCommitLog(&logData, true)
		if localErr != nil {
			coordLog.Errorf("write commit log on slave failed: %v", localErr)
			return localErr
		}
		topic.Lock()
		topic.UpdateCommittedOffset(queueEnd)
		topic.Unlock()
		return nil
	}

	doLocalExit := func(err *CoordErr) {
		if err != nil {
			coordLog.Warningf("failed to batch put messages on slave: %v", err)
		}
	}
	return self.doWriteOpOnSlave(coord, checkDupOnSlave, doLocalWriteOnSlave, doLocalCommit,
		doLocalExit)
}
コード例 #6
0
func (self *NsqdCoordinator) putMessageOnSlave(coord *TopicCoordinator, logData CommitLogData, msg *nsqd.Message) *CoordErr {
	var logMgr *TopicCommitLogMgr
	var topic *nsqd.Topic
	var queueEnd nsqd.BackendQueueEnd

	checkDupOnSlave := func(tc *coordData) bool {
		if coordLog.Level() >= levellogger.LOG_DETAIL {
			topicName := tc.topicInfo.Name
			coordLog.Debugf("pub on slave : %v, msg %v", topicName, msg.ID)
		}
		logMgr = tc.logMgr
		if logMgr.IsCommitted(logData.LogID) {
			coordLog.Infof("pub the already committed log id : %v", logData.LogID)
			return true
		}
		return false
	}

	doLocalWriteOnSlave := func(tc *coordData) *CoordErr {
		var localErr error
		topicName := tc.topicInfo.Name
		partition := tc.topicInfo.Partition
		topic, localErr = self.localNsqd.GetExistingTopic(topicName, partition)
		if localErr != nil {
			coordLog.Infof("pub on slave missing topic : %v", topicName)
			// leave the isr and try re-sync with leader
			return &CoordErr{localErr.Error(), RpcErrTopicNotExist, CoordSlaveErr}
		}

		if topic.GetTopicPart() != partition {
			coordLog.Errorf("topic on slave has different partition : %v vs %v", topic.GetTopicPart(), partition)
			return &CoordErr{ErrLocalTopicPartitionMismatch.String(), RpcErrTopicNotExist, CoordSlaveErr}
		}

		topic.Lock()
		queueEnd, localErr = topic.PutMessageOnReplica(msg, nsqd.BackendOffset(logData.MsgOffset))
		topic.Unlock()
		if localErr != nil {
			coordLog.Errorf("put message on slave failed: %v", localErr)
			return &CoordErr{localErr.Error(), RpcCommonErr, CoordSlaveErr}
		}
		return nil
	}

	doLocalCommit := func() error {
		localErr := logMgr.AppendCommitLog(&logData, true)
		if localErr != nil {
			coordLog.Errorf("write commit log on slave failed: %v", localErr)
			return localErr
		}
		topic.Lock()
		topic.UpdateCommittedOffset(queueEnd)
		topic.Unlock()
		return nil
	}
	doLocalExit := func(err *CoordErr) {
		if err != nil {
			coordLog.Infof("slave put message %v error: %v", logData, err)
		}
	}

	return self.doWriteOpOnSlave(coord, checkDupOnSlave, doLocalWriteOnSlave, doLocalCommit, doLocalExit)
}
コード例 #7
0
func (self *NsqdCoordinator) PutMessageToCluster(topic *nsqd.Topic,
	body []byte, traceID uint64) (nsqd.MessageID, nsqd.BackendOffset, int32, nsqd.BackendQueueEnd, error) {
	var commitLog CommitLogData
	var queueEnd nsqd.BackendQueueEnd
	msg := nsqd.NewMessage(0, body)
	msg.TraceID = traceID

	topicName := topic.GetTopicName()
	partition := topic.GetTopicPart()
	coord, checkErr := self.getTopicCoord(topicName, partition)
	if checkErr != nil {
		return msg.ID, nsqd.BackendOffset(commitLog.MsgOffset), commitLog.MsgSize, queueEnd, checkErr.ToErrorType()
	}

	var logMgr *TopicCommitLogMgr

	doLocalWrite := func(d *coordData) *CoordErr {
		logMgr = d.logMgr
		topic.Lock()
		id, offset, writeBytes, qe, localErr := topic.PutMessageNoLock(msg)
		queueEnd = qe
		topic.Unlock()
		if localErr != nil {
			coordLog.Warningf("put message to local failed: %v", localErr)
			return &CoordErr{localErr.Error(), RpcNoErr, CoordLocalErr}
		}
		commitLog.LogID = int64(id)
		// epoch should not be changed.
		// leader epoch change means leadership change, leadership change
		// need disable write which should hold the write lock.
		// However, we are holding write lock while doing the cluster write replication.
		commitLog.Epoch = d.GetTopicEpochForWrite()
		commitLog.LastMsgLogID = commitLog.LogID
		commitLog.MsgOffset = int64(offset)
		commitLog.MsgSize = writeBytes
		commitLog.MsgCnt = queueEnd.TotalMsgCnt()
		commitLog.MsgNum = 1

		return nil
	}
	doLocalExit := func(err *CoordErr) {
		if err != nil {
			coordLog.Infof("topic %v PutMessageToCluster msg %v error: %v", topic.GetFullName(), msg, err)
			if coord.IsWriteDisabled() {
				topic.DisableForSlave()
			}
		}
	}
	doLocalCommit := func() error {
		localErr := logMgr.AppendCommitLog(&commitLog, false)
		if localErr != nil {
			coordLog.Errorf("topic : %v, Generator %v failed write commit log : %v, logmgr: %v, %v",
				topic.GetFullName(), topic.GetMsgGenerator(), localErr, logMgr.pLogID, logMgr.nLogID)
		}
		topic.Lock()
		topic.UpdateCommittedOffset(queueEnd)
		topic.Unlock()
		return localErr
	}
	doLocalRollback := func() {
		coordLog.Warningf("failed write begin rollback : %v, %v", topic.GetFullName(), commitLog)
		topic.Lock()
		topic.RollbackNoLock(nsqd.BackendOffset(commitLog.MsgOffset), 1)
		topic.Unlock()
	}
	doRefresh := func(d *coordData) *CoordErr {
		logMgr = d.logMgr
		if d.GetTopicEpochForWrite() != commitLog.Epoch {
			coordLog.Warningf("write epoch changed during write: %v, %v", d.GetTopicEpochForWrite(), commitLog)
			return ErrEpochMismatch
		}
		self.requestNotifyNewTopicInfo(d.topicInfo.Name, d.topicInfo.Partition)
		return nil
	}
	doSlaveSync := func(c *NsqdRpcClient, nodeID string, tcData *coordData) *CoordErr {
		// should retry if failed, and the slave should keep the last success write to avoid the duplicated
		putErr := c.PutMessage(&tcData.topicLeaderSession, &tcData.topicInfo, commitLog, msg)
		if putErr != nil {
			coordLog.Infof("sync write to replica %v failed: %v. put offset:%v, logmgr: %v, %v",
				nodeID, putErr, commitLog, logMgr.pLogID, logMgr.nLogID)
		}
		return putErr
	}
	handleSyncResult := func(successNum int, tcData *coordData) bool {
		if successNum == len(tcData.topicInfo.ISR) {
			return true
		}
		return false
	}

	clusterErr := self.doSyncOpToCluster(true, coord, doLocalWrite, doLocalExit, doLocalCommit, doLocalRollback,
		doRefresh, doSlaveSync, handleSyncResult)

	var err error
	if clusterErr != nil {
		err = clusterErr.ToErrorType()
	}
	return msg.ID, nsqd.BackendOffset(commitLog.MsgOffset), commitLog.MsgSize, queueEnd, err
}
コード例 #8
0
ファイル: tool.go プロジェクト: absolute8511/nsq
func main() {
	flag.Parse()

	if *showVersion {
		fmt.Printf("nsq_data_tool v%s\n", version.Binary)
		return
	}

	if *topic == "" {
		log.Fatal("--topic is required\n")
	}
	if *partition == -1 {
		log.Fatal("--partition is required")
	}
	if *dataPath == "" {
		log.Fatal("--data_path is required")
	}

	if *viewCnt > 1000000 {
		log.Fatal("--view_cnt is too large")
	}

	topicDataPath := path.Join(*dataPath, *topic)
	topicCommitLogPath := consistence.GetTopicPartitionBasePath(*dataPath, *topic, *partition)
	tpLogMgr, err := consistence.InitTopicCommitLogMgr(*topic, *partition, topicCommitLogPath, 0)
	if err != nil {
		log.Fatalf("loading commit log %v failed: %v\n", topicCommitLogPath, err)
	}
	logIndex, lastOffset, lastLogData, err := tpLogMgr.GetLastCommitLogOffsetV2()
	if err != nil {
		log.Fatalf("loading last commit log failed: %v\n", err)
	}
	log.Printf("topic last commit log at %v:%v is : %v\n", logIndex, lastOffset, lastLogData)

	// note: since there may be exist group commit. It is not simple to do the direct position of log.
	// we need to search in the ordered log data.
	searchOffset := int64(0)
	searchLogIndexStart := int64(0)
	if *searchMode == "count" {
		searchLogIndexStart, searchOffset, _, err = tpLogMgr.SearchLogDataByMsgCnt(*viewStart)
		if err != nil {
			log.Fatalln(err)
		}
	} else if *searchMode == "id" {
		searchLogIndexStart, searchOffset, _, err = tpLogMgr.SearchLogDataByMsgID(*viewStartID)
		if err != nil {
			log.Fatalln(err)
		}
	} else if *searchMode == "virtual_offset" {
		searchLogIndexStart, searchOffset, _, err = tpLogMgr.SearchLogDataByMsgOffset(*viewStartID)
		if err != nil {
			log.Fatalln(err)
		}
	} else {
		log.Fatalln("not supported search mode")
	}

	logData, err := tpLogMgr.GetCommitLogFromOffsetV2(searchLogIndexStart, searchOffset)
	if err != nil {
		log.Fatalf("topic read at: %v:%v failed: %v\n", searchLogIndexStart, searchOffset, err)
	}
	log.Printf("topic read at: %v:%v, %v\n", searchLogIndexStart, searchOffset, logData)

	if *view == "commitlog" {
		logs, err := tpLogMgr.GetCommitLogsV2(searchLogIndexStart, searchOffset, int(*viewCnt))
		if err != nil {
			if err != consistence.ErrCommitLogEOF {
				log.Fatalf("get logs failed: %v", err)
				return
			}
		}
		for _, l := range logs {
			fmt.Println(l)
		}
	} else if *view == "topicdata" {
		queueOffset := logData.MsgOffset
		if *searchMode == "virtual_offset" {
			if queueOffset != *viewOffset {
				queueOffset = *viewOffset
				log.Printf("search virtual offset not the same : %v, %v\n", logData.MsgOffset, *viewOffset)
			}
		}
		backendName := getBackendName(*topic, *partition)
		backendWriter, err := nsqd.NewDiskQueueWriter(backendName, topicDataPath, 1024*1024*1024, 1, 1024*1024*100, 1)
		if err != nil {
			log.Fatal("init disk writer failed: %v", err)
			return
		}
		backendReader := nsqd.NewDiskQueueSnapshot(backendName, topicDataPath, backendWriter.GetQueueReadEnd())
		backendReader.SeekTo(nsqd.BackendOffset(queueOffset))
		cnt := *viewCnt
		for cnt > 0 {
			cnt--
			ret := backendReader.ReadOne()
			if ret.Err != nil {
				log.Fatalf("read data error: %v", ret)
				return
			}
			fmt.Printf("%v:%v:%v\n", ret.Offset, ret.MovedSize, ret.Data)
		}
	}
}
コード例 #9
0
ファイル: http.go プロジェクト: absolute8511/nsq
func (s *httpServer) internalPUB(w http.ResponseWriter, req *http.Request, ps httprouter.Params, enableTrace bool) (interface{}, error) {
	startPub := time.Now().UnixNano()
	// do not support chunked for http pub, use tcp pub instead.
	if req.ContentLength > s.ctx.getOpts().MaxMsgSize {
		return nil, http_api.Err{413, "MSG_TOO_BIG"}
	} else if req.ContentLength <= 0 {
		return nil, http_api.Err{406, "MSG_EMPTY"}
	}

	// add 1 so that it's greater than our max when we test for it
	// (LimitReader returns a "fake" EOF)
	params, topic, err := s.getExistingTopicFromQuery(req)
	if err != nil {
		nsqd.NsqLogger().Logf("get topic err: %v", err)
		return nil, http_api.Err{404, E_TOPIC_NOT_EXIST}
	}

	readMax := req.ContentLength + 1
	b := topic.BufferPoolGet(int(req.ContentLength))
	defer topic.BufferPoolPut(b)
	asyncAction := !enableTrace
	n, err := io.CopyN(b, io.LimitReader(req.Body, readMax), int64(req.ContentLength))
	body := b.Bytes()[:req.ContentLength]

	if err != nil {
		nsqd.NsqLogger().Logf("read request body error: %v", err)
		body = body[:n]
		if err == io.EOF || err == io.ErrUnexpectedEOF {
			// we ignore EOF, maybe the ContentLength is not match?
			nsqd.NsqLogger().LogWarningf("read request body eof: %v, ContentLength: %v,return length %v.",
				err, req.ContentLength, n)
		} else {
			return nil, http_api.Err{500, "INTERNAL_ERROR"}
		}
	}
	if len(body) == 0 {
		return nil, http_api.Err{406, "MSG_EMPTY"}
	}

	if s.ctx.checkForMasterWrite(topic.GetTopicName(), topic.GetTopicPart()) {
		var err error
		traceIDStr := params.Get("trace_id")
		traceID, err := strconv.ParseUint(traceIDStr, 10, 0)
		if enableTrace && err != nil {
			nsqd.NsqLogger().Logf("trace id invalid %v, %v",
				traceIDStr, err)
			return nil, http_api.Err{400, "INVALID_TRACE_ID"}
		}

		id := nsqd.MessageID(0)
		offset := nsqd.BackendOffset(0)
		rawSize := int32(0)
		if asyncAction {
			err = internalPubAsync(nil, b, topic)
		} else {
			id, offset, rawSize, _, err = s.ctx.PutMessage(topic, body, traceID)
		}
		if err != nil {
			nsqd.NsqLogger().LogErrorf("topic %v put message failed: %v", topic.GetFullName(), err)
			if clusterErr, ok := err.(*consistence.CommonCoordErr); ok {
				if !clusterErr.IsLocalErr() {
					return nil, http_api.Err{400, FailedOnNotWritable}
				}
			}
			return nil, http_api.Err{503, err.Error()}
		}

		cost := time.Now().UnixNano() - startPub
		topic.GetDetailStats().UpdateTopicMsgStats(int64(len(body)), cost/1000)
		if enableTrace {
			return struct {
				Status      string `json:"status"`
				ID          uint64 `json:"id"`
				TraceID     string `json:"trace_id"`
				QueueOffset uint64 `json:"queue_offset"`
				DataRawSize uint32 `json:"rawsize"`
			}{"OK", uint64(id), traceIDStr, uint64(offset), uint32(rawSize)}, nil
		} else {
			return "OK", nil
		}
	} else {
		nsqd.NsqLogger().LogDebugf("should put to master: %v, from %v",
			topic.GetFullName(), req.RemoteAddr)
		topic.DisableForSlave()
		return nil, http_api.Err{400, FailedOnNotLeader}
	}
}