예제 #1
0
/*
AppendFirstMessages writes the given messages into a segment and presents it to the cache.

The following steps are followed:

	1 - Confirm that only one segment is open
	2 - Check whether the firstIndex of the segment matches leaderFirstIndex
	3 - If required delete the current segment and create a new one
	4 - Append the messages and populate the cache
*/
func (dlog *DiskLogStorage) AppendFirstMessages(msgs model.Messages, leaderFirstIndex int64) (lastIndex int64, err error) {
	dlog.lock.Lock()
	defer dlog.lock.Unlock()
	//dlog.node_log("AppendMessages called with message count %v\n", len(msgs))
	if msgs.GetCount() == 0 {
		return 0, APPEND_WITH_NO_CONTENT
	}

	// Check we only have one segment.
	if len(dlog.segments) != 1 {
		return 0, FIRST_APPEND_MULTIPLE_SEGMENTS
	}

	// Does our segment's first index match the leaders?
	seg := dlog.segments[0]

	// Check we have no messages.
	if seg.getMessageCount() != 0 {
		return 0, errors.New("Attempt to add first messages, but segment already has content!")
	}

	if seg.GetFirstIndex() != leaderFirstIndex {
		dlog.node_log("First index available from leader is %v, re-creating first segment.\n", leaderFirstIndex)
		err = seg.Delete()
		if err != nil {
			dlog.node_log("Error deleting existing segment: %v", err)
		}
		// Create the new one.
		seg, err = CreateNewSegment(dlog.topicName, dlog.pathName, leaderFirstIndex, dlog.target_max_segment_size)
		if err != nil {
			dlog.node_log("Error trying to create a new disk log storage at location %v: %v\n", dlog.pathName, err)
			return 0, err
		}
		// Replace the existing segment.
		dlog.segments[0] = seg
	}
	// First append is never full.
	localAppendIndex, localLastIndex, _, err := seg.AppendMessages(msgs)
	if err != nil {
		dlog.node_log("Error in first append: %v\n", err)
		return 0, err
	}
	// Give the messages to the cache
	dlog.cache.AppendMessages(localAppendIndex, msgs)

	return localLastIndex, nil
}
예제 #2
0
/*
AppendMessages writes the given messages into a segment and presents it to the cache.

The following steps are followed:

	1 - Attempt to append to the current last open segment.
	2 - If the segment is full, open a new segment.
	3 - If more than TARGET_OPEN_SEGMENTS (2) segments are open then close the older ones until just the previous and new segments are open.
	4 - Present the new messages to the cache
*/
func (dlog *DiskLogStorage) AppendMessages(msgs model.Messages) (lastIndex int64, err error) {
	dlog.lock.Lock()
	defer dlog.lock.Unlock()
	//dlog.node_log("AppendMessages called with message count %v\n", len(msgs))
	if msgs.GetCount() == 0 {
		return 0, APPEND_WITH_NO_CONTENT
	}
	// Attempt to append to the last segment in the list
	//dlog.node_log("Segments: %v\n", dlog)
	seg := dlog.segments[len(dlog.segments)-1]
	localAppendIndex, localLastIndex, full, err := seg.AppendMessages(msgs)
	if err != nil {
		return 0, err
	}
	if full {
		dlog.node_log("Creating a new segment with start index %v\n", localLastIndex+1)
		// Open a new segment.
		newSegment, err := CreateNewSegment(dlog.topicName, dlog.pathName, localLastIndex+1, dlog.target_max_segment_size)
		if err != nil {
			dlog.node_log("Error trying to create a new disk log storage at location %v: %v\n", dlog.pathName, err)
			return 0, err
		}
		dlog.segments = append(dlog.segments, newSegment)

		// Re-attempt the append
		dlog.node_log("Re-attempting the append after creating segment.\n")
		localAppendIndex, localLastIndex, full, err = newSegment.AppendMessages(msgs)
		if err != nil {
			dlog.node_log("Error in append: %v\n", err)
			return 0, err
		} else {
			dlog.node_log("Re-attempt at append worked.  Checking whether to close segments.\n")
		}

		// Tigger a scan for segments to close
		select {
		case dlog.closeSegementsChannel <- 1:
			dlog.node_log("Sent message to close segment loop to scan for segments to close.\n")
		default:
			dlog.node_log("Unable to send message to close segment loop as channel is full\n")
		}
	}
	// Give the messages to the cache
	dlog.cache.AppendMessages(localAppendIndex, msgs)

	return localLastIndex, nil
}
예제 #3
0
func (mlog *MemoryLogStorage) AppendFirstMessages(msgs model.Messages, leaderFirstIndex int64) (lastIndex int64, err error) {
	if len(mlog.offsets) != 0 {
		return 0, errors.New("Attempt to add first messages, but segment already has content!")
	}
	mlog.firstIndex = leaderFirstIndex
	msgs.Write(mlog)
	// Populate the offsets
	// Populate the offsets
	msgsOffsets, err := msgs.Offsets()
	if err != nil {
		return 0, err
	}

	for _, msgOffset := range msgsOffsets {
		mlog.offsets = append(mlog.offsets, msgOffset)
	}
	return int64(len(mlog.offsets)) + mlog.firstIndex - 1, nil
}
예제 #4
0
func (mlog *MemoryLogStorage) AppendMessages(msgs model.Messages) (lastIndex int64, err error) {
	if mlog.firstIndex == 0 {
		mlog.firstIndex = 1
	}
	firstOffset := len(mlog.data)
	msgs.Write(mlog)
	// Populate the offsets
	msgsOffsets, err := msgs.Offsets()
	if err != nil {
		return 0, err
	}

	for _, msgOffset := range msgsOffsets {
		mlog.offsets = append(mlog.offsets, firstOffset+msgOffset)
	}

	return int64(len(mlog.offsets)) + mlog.firstIndex - 1, nil
}
예제 #5
0
/*
AppendMessages checks to see whether the segment is full, and if not it appends the messages to it.
*/
func (seg *Segment) AppendMessages(msgs model.Messages) (appendIndex, lastIndex int64, segmentFull bool, err error) {
	seg.lock.Lock()
	defer seg.lock.Unlock()
	seg.statsAppendMessageCalls++
	seg.lastAccessTime = time.Now()
	seg.lastModifiedTime = seg.lastAccessTime

	if seg.fileSize >= int64(seg.target_max_segment_size)*1024*1024 {
		seg.node_log("Segment %v at or beyond target size of %v MB\n", seg.filename, seg.target_max_segment_size)
		return 0, seg.firstIndex + int64(seg.msgCount) - 1, true, nil
	}

	if !seg.segmentOpen {
		err := seg.openWhileHoldingLock(false)
		if err != nil {
			return 0, 0, false, err
		}
	}

	// Go to the end of the file
	if seg.filePosition != seg.fileSize {
		seg.statsSeekCount++
		seg.fileSize, err = seg.file.Seek(0, 2)
		seg.filePosition = seg.fileSize
		if err != nil {
			seg.node_log("Error seeking to end of file in segment file %v: %v\n", seg.filename, err)
			return 0, 0, false, err
		}
	} else {
		seg.statsNoSeekAppends++
	}
	appendIndex = seg.firstIndex + int64(seg.msgCount)
	//writer := bufio.NewWriter(seg.file)
	seg.writesPendingSync = true

	// Try and write out the messages
	written, err := msgs.Write(seg.file)
	if err != nil {
		seg.node_log("Error writing to segment %v: %v\n", seg.filename, err)
		return appendIndex, seg.firstIndex + int64(seg.msgCount) - 1, false, err
	}

	// Populate the offset index list
	msgsOffsets, err := msgs.Offsets()
	if err != nil {
		seg.node_log("Error getting offsets for messages written: %v\n", err)
		return appendIndex, seg.firstIndex + int64(seg.msgCount) - 1, false, err
	}

	for msgIndex, msgOffset := range msgsOffsets {
		seg.appendOffsetToIndexHoldingLock(appendIndex+int64(msgIndex), seg.filePosition+int64(msgOffset))
	}

	seg.msgCount += msgs.GetCount()
	seg.fileSize += int64(written)
	seg.filePosition = seg.fileSize

	return appendIndex, seg.firstIndex + int64(seg.msgCount) - 1, false, nil
}
예제 #6
0
/*
getAtLeastMessages keeps calling clog.GetMessages until the count of messages has been retrieved.

Callers must be certain that index + count < lastIndex
*/
func (clog *CommitLog) getAtLeastMessages(index, count int64) (model.Messages, error) {
	var readMessageCount int64
	indexToRead := index
	var results model.Messages
	var err error
	for readMessageCount < count && err == nil {
		var msgs model.Messages
		msgs, err = clog.log.GetMessages(indexToRead, count-readMessageCount)
		if err != nil {
			return model.EMPTY_MESSAGES, err
		}
		if msgs.GetCount() > 0 {
			readMessageCount += int64(msgs.GetCount())
			indexToRead += int64(msgs.GetCount())
			results, err = results.Join(msgs)
			if err != nil {
				return model.EMPTY_MESSAGES, err
			}
		}
	}
	return results, err
}
예제 #7
0
/*
Queue is used to append client messages when the node is a leader.

NOTE: The lock is not held in this method as we do not touch commitIndex or waitingReaders
*/
func (clog *CommitLog) Queue(term int64, modelMsgs model.Messages) (IDs []int64, err error) {
	//clog.node_log("Queueing messages: %v\n", msgs)

	//modelMsgs := model.MessagesFromClientData(msgs)

	if modelMsgs.GetCount() == 0 {
		clog.node_log("No messages found from client for Queue.")
		return nil, errors.New("No messages found to be Queued.")
	}

	err = modelMsgs.SetMessageTerm(term)
	if err != nil {
		clog.node_log("Error setting terms for messages: %v\n", err)
		return nil, err
	}

	// Stick them on the end of the log
	lastIndex, err := clog.log.AppendMessages(modelMsgs)
	if err != nil {
		clog.node_log("Error writing message to log in Queue: %v\n", err)
		return nil, err
	} else if clog.syncPolicy == WRITE_SYNC {
		err = clog.log.Sync()
		if err != nil {
			clog.node_log("Error syncing written message to log in Queue: %v\n", err)
			return nil, err
		}
	}

	//time.Sleep(time.Millisecond * 500)

	numMessages := modelMsgs.GetCount()
	results := make([]int64, numMessages)

	// If log has one message, and two are added: lastIndex (3) - len (modelMsgs (2)) + 1 = 2
	index := lastIndex - int64(numMessages) + 1
	for i := 0; i < numMessages; i++ {
		results[i] = index
		index++
	}

	return results, nil
}
예제 #8
0
/*
Append logs the given message and returns the index of the next slot or an error if the message could not be logged.
If the end of the log doesn't match the previousIndex and the previousTerm the append must fail and previousMatch should be false.
If there is an existing entry at msg.Index with a different msg.Term then this entry and all subsequent entries must be deleted prior to the append.

NOTE: The lock is not held in this method as we do not touch commitIndex or waitingReaders
*/
func (clog *CommitLog) Append(msgs model.Messages, previousIndex int64, previousTerm int64, leaderFirstIndex int64) (nextIndex int64, previousMatch bool, err error) {
	overlapMessageCount := 0
	//clog.node_log("Recieved messages to append %v\n", msgs)

	lastIndex, err := clog.log.GetLastIndex()
	nextIndex = lastIndex + 1
	if err != nil {
		clog.node_log("Error getting last message details during append: %v\n", err)
		return 0, false, err
	}

	//clog.node_log("LastIndex is %v, previousIndex passed in is %v\n", lastIndex, previousIndex)
	if lastIndex > 0 {
		// Get our copy of the message at the peviousIndex
		previousMessageList, err := clog.log.GetMessages(previousIndex, 1)

		if err != nil {
			return nextIndex, false, err
		}
		if previousMessageList.GetCount() < 1 {
			clog.node_log("Previous index given by leader (%v) is greater than or smaller than our last index (%v), returning no match\n", previousIndex, lastIndex)

			return nextIndex, false, nil
		}

		previousMessageTerm, err := previousMessageList.GetMessageTerm(0)
		if err != nil {
			clog.node_log("Error getting previous term: %v\n", err)
			return nextIndex, false, err
		}

		if previousMessageTerm != previousTerm {
			// Term of the message at this index doesn't match.
			clog.node_log("Previous term of %v doesn't match previousMessageTerm of %v\n", previousTerm, previousMessageTerm)
			return nextIndex, false, nil
		}

		//clog.node_log("Our last index is %v.  Leader thinks our last index is %v and we are trying to append %v messages\n", lastIndex, previousIndex, msgs.GetCount())
		// How many messages overlap?  If len(msgs) is shorter than the gap between lastIndex and previousIndex, use that.
		// How many messages overlap?  If we have 10 messages and previousIndex was 8 then we have two left to check (9 & 10)
		// How many messages overlap?  If we have a lastIndex of 10 and previousIndex of 8 then we have two left to check (9 & 10)
		msgsToCheck := msgs.GetCount()
		// We have already checked the first message, does the rest push us beyond the lastIndex?
		// E.g. len (msgs) = 2, previousIndex = 5, lastIndex = 7.
		if (int64(msgsToCheck) + previousIndex) >= lastIndex {
			msgsToCheck = int(lastIndex - previousIndex)
		}

		// Next question - do all messages beyond the previousMessage match the contents of the new messages?
		checkMessagesIndex := previousIndex

		if msgsToCheck > 0 {
			overlappingMessages, err := clog.getAtLeastMessages(checkMessagesIndex+1, int64(msgsToCheck))
			clog.node_log("Retrieved %v messages from index %v to check for overlaps (needed at least %v)\n", overlappingMessages.GetCount(), checkMessagesIndex+1, msgsToCheck)
			if err != nil {
				return nextIndex, false, err
			}

			for i := 0; i < overlappingMessages.GetCount(); i++ {
				// Check terms match for the given index
				// TODO: Check payload and CRC as well?
				checkMessagesIndex++
				//clog.node_log("Loop index %v, overlapMessageCount: %v\n", i, overlapMessageCount)
				overlappingMessagesTerm, err := overlappingMessages.GetMessageTerm(i)
				if err != nil {
					return nextIndex, false, err
				}
				msgsTerm, err := msgs.GetMessageTerm(overlapMessageCount)
				if err != nil {
					return nextIndex, false, err
				}
				if overlappingMessagesTerm != msgsTerm {
					// We need to truncate from here to the end of the log.
					clog.node_log("Truncating log due to mismatching terms to index %v.\n", checkMessagesIndex)
					clog.log.TruncateMessages(int64(checkMessagesIndex))
					break
				}
				// If we get this far then we have an overlapping message
				overlapMessageCount++
			}
		}
	} else {
		if previousIndex != 0 {
			clog.node_log("Empty log, but previousIndex of %v given by leader\n", previousIndex)
			return nextIndex, false, nil
		}
		// We have our first messages - use AppendFirstMessages to set the index correctly (first message isn't always 1)
		if msgs.GetCount() > 0 {
			//clog.node_log("Attempting to append %v messages\n", len(msgsToAppend))
			lastID, err := clog.log.AppendFirstMessages(msgs, leaderFirstIndex)
			if err != nil {
				clog.node_log("Error attempting to Append messages to the log: %v\n", err)
				return nextIndex, false, nil
			} else if clog.syncPolicy == WRITE_SYNC {
				err = clog.log.Sync()
				if err != nil {
					clog.node_log("Error attempting to sync Append messages to the log: %v\n", err)
					return nextIndex, false, nil
				}
			}

			nextIndex = lastID + 1
			return nextIndex, true, nil
		}
	}

	// All messages beyond overlapMessageCount should now be appended.
	msgsToAppend, err := msgs.Slice(overlapMessageCount, msgs.GetCount())
	if err != nil {
		clog.node_log("Error slicing overlapping messages: %v\n", err)
		return nextIndex, false, nil
	}
	if msgsToAppend.GetCount() > 0 {
		//clog.node_log("Attempting to append %v messages\n", len(msgsToAppend))
		lastID, err := clog.log.AppendMessages(msgsToAppend)
		if err != nil {
			clog.node_log("Error attempting to Append messages to the log: %v\n", err)
			return nextIndex, false, nil
		} else if clog.syncPolicy == WRITE_SYNC {
			err = clog.log.Sync()
			if err != nil {
				clog.node_log("Error attempting to sync Append messages to the log: %v\n", err)
				return nextIndex, false, nil
			}
		}

		nextIndex = lastID + 1
	} else {
		//clog.node_log("Call to append, but not message to add.\n")
	}

	return nextIndex, true, nil
}