// Create generates a new Segment file in the given directory, starting at the given index. // The Segment is returned in an open state. func CreateNewSegment(topicName string, directory string, firstIndex int64, targetSize int) (*Segment, error) { seg := &Segment{target_max_segment_size: targetSize} seg.lock.Lock() defer seg.lock.Unlock() seg.node_log = utils.GetTopicLogger(topicName, "Segment") // Work out the filename. fileNamePrefix := fmt.Sprintf("%019d", firstIndex) seg.filename = path.Join(directory, fileNamePrefix+LOG_NAME_FORMAT_SUFFIX) seg.firstIndex = firstIndex seg.node_log("Creating new segment file %v for index %v\n", seg.filename, firstIndex) var err error seg.file, err = os.Create(seg.filename) if err != nil { seg.node_log("Error opening file %v: %v\n", seg.filename, err) return nil, err } seg.segmentOpen = true seg.filePosition = 0 seg.discardBuffer = make([]byte, 0, default_discard_buffer_size) seg.lastAccessTime = time.Now() seg.lastModifiedTime = seg.lastAccessTime return seg, nil }
/* StartNode starts up the Raft node implementation for a given topic. The connections are managed by the ServerNode. The Node provides methods that are used by the RPCHandler for interaction with the commit log. */ func (node *Node) SetupNode(topic string, server *ServerNode, ourName string, ourPeers ConfigPeers, ourLog *commitlog.CommitLog, topicStore TopicPersistentStore) error { // Create the shutdown channel node.shutdownServer = make(chan string) node.node_log = utils.GetTopicLogger(topic, "Raft") // Apply our configuration node.topic = topic node.server = server node.log = ourLog node.store = topicStore node.node_log("Applying configuration\n") node.name = ourName node.peers = make(map[string]*Peer) node.ChangePeerConfiguration(ourPeers) node.commitIndex = NewCommitIndex() node.writeAggregator = NewQueueWriteAggregator(node) node.electionTimer = NewElectionTimer(node) // Load the log from disk node.node_log("Loading log info.\n") var err error node.currentTerm, node.votedFor, err = node.store.Load() if err != nil { node.node_log("Error loading persistent store: %v\n", err) return err } // Get the last term recorded in the log _, lastLogTerm, err := node.log.LastLogEntryInfo() if err != nil { node.node_log("Error determining the last log entry: %v\n", err) return err } if lastLogTerm > node.currentTerm { node.node_log("WARNING: Last term found in log file is greater than stored in topic. Defaulting to last value in log.") node.currentTerm = lastLogTerm } node.node_log("Log store - creating RPC handlers.\n") node.node_log("Node initialisation complete.\n") // If we are standalone then recalculate the commit index. if len(node.peers) == 0 && node.state != ORPHAN_NODE { node.node_log("Standalone peer configuration ") node.commitIndex.UpdateCommitIndex(node) node.log.Commit(node.commitIndex.GetCommitIndex()) } node.state = FOLLOWER_NODE return nil }
/* LoadLog will open an existing DiskLogStorage structure from the given path or create a new one. If existing files are found matching the LOG_NAME_FORMAT file format, then: 1 - Get a list of all files match the pattern [0-9]*-forest-log 2 - Sorts these and determine the highest index start value 3 - Validates all message CRC in the highest index value Logfile in case there was previously a crash 4 - Opens the highest index value Logfile and generates the index / offset slice If no existing log files are found then a new segment is created for first Index 1. */ func LoadLog(topicName string, loadpath string, configFuncs ...DiskLogConfigFunction) (*DiskLogStorage, error) { dlog := &DiskLogStorage{pathName: loadpath, target_max_segment_size: DEFAULT_TARGET_MAX_SEGMENT_SIZE, cache_slot_size: DEFAULT_CACHE_SLOT_SIZE} for _, conf := range configFuncs { conf(dlog) } dlog.node_log = utils.GetTopicLogger(topicName, "DiskLogStorage") dlog.topicName = topicName dlog.segments = make([]*Segment, 0) dlog.cache = CreateCache(dlog.cache_slot_size) matches, err := filepath.Glob(path.Join(loadpath, LOG_NAME_FORMAT)) dlog.node_log("Found %v segment filename matches\n", len(matches)) if err != nil { dlog.node_log("Errror opening disk log: %v\n", err) return nil, err } if len(matches) == 0 { dlog.node_log("Creating new disk log in %v\n", loadpath) firstSegment, err := CreateNewSegment(dlog.topicName, loadpath, 1, dlog.target_max_segment_size) if err != nil { dlog.node_log("Error trying to create a new disk log storage at location %v: %v\n", loadpath, err) return nil, err } dlog.segments = append(dlog.segments, firstSegment) } else { dlog.node_log("Loading existing disk log from %v\n", loadpath) sort.Strings(matches) for _, filename := range matches { dlog.segments = append(dlog.segments, ExistingSegment(topicName, filename, dlog.target_max_segment_size)) } // Open and validate the last segment as this is what we'll write to err = dlog.segments[len(dlog.segments)-1].Open(true) if err != nil { dlog.node_log("Error opening latest segment.") return nil, err } } // Setup a goroutine to handle the closing of open segments that are not in use dlog.closeSegementsChannel = make(chan int) dlog.closeSegmentsShutdownChannel = make(chan *utils.ShutdownNotifier, 1) go dlog.closeSegmentsLoop() // Setup a goroutine to periodically check for segments to cleanup dlog.segmentCleanupShutdownChannel = make(chan *utils.ShutdownNotifier, 1) go dlog.cleanupSegmentsLoop() return dlog, err }
func ExistingSegment(topicName string, fullfilename string, targetSize int) *Segment { _, filename := path.Split(fullfilename) prefixIndex := strings.Index(filename, "-") firstIndex, _ := strconv.ParseInt(filename[:prefixIndex], 10, 64) segmentFileInfo, err := os.Stat(fullfilename) seg := &Segment{filename: fullfilename, firstIndex: firstIndex, target_max_segment_size: targetSize} seg.node_log = utils.GetTopicLogger(topicName, "Segment") seg.node_log("Existing segment, first index: %v for filename: %v\n", firstIndex, filename) seg.lastAccessTime = time.Now() if err != nil { seg.node_log("Warning: unable to stat segment file %v: %v", fullfilename, err) seg.lastModifiedTime = time.Now() } else { seg.lastModifiedTime = segmentFileInfo.ModTime() } return seg }
/* LoadLogInfo is used at startup by the Node to read in the persisted state. PersistentLog's are expected at this point to validate their storage and determine the current state. commitIndex is the last committed index ID. nextIndex is the index ID for the next message to be appended to this log. error is populated if there are any issues loadnig the persisted state that are unrecoverable from. */ func (clog *CommitLog) SetupLog(topicName string, store LogStorage) (err error) { clog.waitingReaders = sync.NewCond(clog.lock.RLocker()) clog.node_log = utils.GetTopicLogger(topicName, "CommitLog") clog.inShutdown = false clog.log = store clog.commitIndex = 0 clog.syncPolicy = DEFAULT_SYNC_POLICY if clog.syncPolicy == PERIODIC_SYNC { go func() { timer := time.NewTimer(DEFAULT_PERIODIC_SYNC_PERIOD) for { <-timer.C clog.log.Sync() timer.Reset(DEFAULT_PERIODIC_SYNC_PERIOD) } }() } return nil }