// Returns the (ordered) union of all of the changes made to multiple channels. func (db *Database) VectorMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error) { to := "" var userVbNo uint16 if db.user != nil && db.user.Name() != "" { to = fmt.Sprintf(" (to %s)", db.user.Name()) userVbNo = uint16(db.Bucket.VBHash(db.user.DocID())) } base.LogTo("Changes+", "Vector MultiChangesFeed(%s, %+v) ... %s", chans, options, to) output := make(chan *ChangeEntry, 50) go func() { var cumulativeClock *base.SyncSequenceClock var lastHashedValue string hashedEntryCount := 0 defer func() { base.LogTo("Changes+", "MultiChangesFeed done %s", to) close(output) }() var changeWaiter *changeWaiter var userCounter uint64 // Wait counter used to identify changes to the user document var addedChannels base.Set // Tracks channels added to the user during changes processing. var userChanged bool // Whether the user document has changed // Restrict to available channels, expand wild-card, and find since when these channels // have been available to the user: var channelsSince channels.TimedSet if db.user != nil { channelsSince = db.user.FilterToAvailableChannels(chans) } else { channelsSince = channels.AtSequence(chans, 0) } if options.Wait { changeWaiter = db.startChangeWaiter(channelsSince.AsSet()) userCounter = changeWaiter.CurrentUserCount() db.initializePrincipalPolling(changeWaiter.GetUserKeys()) } cumulativeClock = base.NewSyncSequenceClock() cumulativeClock.SetTo(getChangesClock(options.Since)) // This loop is used to re-run the fetch after every database change, in Wait mode outer: for { // Get the last polled stable sequence. We don't return anything later than stable sequence in each iteration stableClock, err := db.changeCache.GetStableClock(true) if err != nil { base.Warn("MultiChangesFeed got error reading stable sequence: %v", err) return } // Updates the changeWaiter to the current set of available channels. if changeWaiter != nil { changeWaiter.UpdateChannels(channelsSince) } base.LogTo("Changes+", "MultiChangesFeed: channels expand to %#v ... %s", channelsSince.String(), to) // Build the channel feeds. feeds, err := db.initializeChannelFeeds(channelsSince, options, addedChannels, userVbNo) if err != nil { return } // This loop reads the available entries from all the feeds in parallel, merges them, // and writes them to the output channel: current := make([]*ChangeEntry, len(feeds)) var sentSomething bool nextEntry := getNextSequenceFromFeeds(current, feeds) for { minEntry := nextEntry if minEntry == nil { break // Exit the loop when there are no more entries } // Calculate next entry here, to help identify whether minEntry is the last entry we're sending, // to guarantee hashing nextEntry = getNextSequenceFromFeeds(current, feeds) if options.ActiveOnly { if minEntry.Deleted || minEntry.allRemoved { continue } } // Don't send any entries later than the stable sequence if stableClock.GetSequence(minEntry.Seq.vbNo) < minEntry.Seq.Seq { continue } // Add the doc body or the conflicting rev IDs, if those options are set: if options.IncludeDocs || options.Conflicts { db.addDocToChangeEntry(minEntry, options) } // Clock handling if minEntry.Seq.TriggeredBy == 0 { // Update the cumulative clock, and stick it on the entry. cumulativeClock.SetMaxSequence(minEntry.Seq.vbNo, minEntry.Seq.Seq) // Force new hash generation for non-continuous changes feeds if this is the last entry to be sent - either // because there are no more entries in the channel feeds, or we're going to hit the limit. forceHash := false if options.Continuous == false && (nextEntry == nil || options.Limit == 1) { forceHash = true } lastHashedValue = db.calculateHashWhenNeeded( options, minEntry, cumulativeClock, &hashedEntryCount, lastHashedValue, forceHash, ) } else { // For backfill (triggered by), we don't want to update the cumulative clock. All entries triggered by the // same sequence reference the same triggered by clock, so it should only need to get hashed once. // If this is the first entry for this triggered by, initialize the triggered by clock's // hash value. if minEntry.Seq.TriggeredByClock.GetHashedValue() == "" { cumulativeClock.SetMaxSequence(minEntry.Seq.TriggeredByVbNo, minEntry.Seq.TriggeredBy) clockHash, err := db.SequenceHasher.GetHash(cumulativeClock) if err != nil { base.Warn("Error calculating hash for triggered by clock:%v", base.PrintClock(cumulativeClock)) } else { minEntry.Seq.TriggeredByClock.SetHashedValue(clockHash) } } } // Send the entry, and repeat the loop: base.LogTo("Changes+", "MultiChangesFeed sending %+v %s", minEntry, to) select { case <-options.Terminator: return case output <- minEntry: } sentSomething = true // Stop when we hit the limit (if any): if options.Limit > 0 { options.Limit-- if options.Limit == 0 { break outer } } } if !options.Continuous && (sentSomething || changeWaiter == nil) { break } // Update options.Since for use in the next outer loop iteration. options.Since.Clock = cumulativeClock // If nothing found, and in wait mode: wait for the db to change, then run again. // First notify the reader that we're waiting by sending a nil. base.LogTo("Changes+", "MultiChangesFeed waiting... %s", to) output <- nil waitForChanges: for { waitResponse := changeWaiter.Wait() if waitResponse == WaiterClosed { break outer } else if waitResponse == WaiterHasChanges { select { case <-options.Terminator: return default: break waitForChanges } } else if waitResponse == WaiterCheckTerminated { // Check whether I was terminated while waiting for a change. If not, resume wait. select { case <-options.Terminator: return default: } } } // Before checking again, update the User object in case its channel access has // changed while waiting: userChanged, userCounter, addedChannels, err = db.checkForUserUpdates(userCounter, changeWaiter) if userChanged && db.user != nil { channelsSince = db.user.FilterToAvailableChannels(chans) } if err != nil { change := makeErrorEntry("User not found during reload - terminating changes feed") base.LogTo("Changes+", "User not found during reload - terminating changes feed with entry %+v", change) output <- &change return } } }() return output, nil }