Exemplo n.º 1
0
// Cleans up the Value property, and removes rows that aren't visible to the current user
func filterViewResult(input walrus.ViewResult, user auth.User) (result walrus.ViewResult) {
	checkChannels := false
	var visibleChannels ch.TimedSet
	if user != nil {
		visibleChannels = user.InheritedChannels()
		checkChannels = !visibleChannels.Contains("*")
	}
	result.TotalRows = input.TotalRows
	result.Rows = make([]*walrus.ViewRow, 0, len(input.Rows)/2)
	for _, row := range input.Rows {
		value := row.Value.([]interface{})
		// value[0] is the array of channels; value[1] is the actual value
		if !checkChannels || channelsIntersect(visibleChannels, value[0].([]interface{})) {
			// Add this row:
			stripSyncProperty(row)
			result.Rows = append(result.Rows, &walrus.ViewRow{
				Key:   row.Key,
				Value: value[1],
				ID:    row.ID,
				Doc:   row.Doc,
			})
		}
	}
	return
}
Exemplo n.º 2
0
// Is any item of channels found in visibleChannels?
func channelsIntersect(visibleChannels ch.TimedSet, channels []interface{}) bool {
	for _, channel := range channels {
		if visibleChannels.Contains(channel.(string)) || channel == "*" {
			return true
		}
	}
	return false
}
Exemplo n.º 3
0
func (user *userImpl) FilterToAvailableChannels(channels base.Set) ch.TimedSet {
	output := ch.TimedSet{}
	for channel, _ := range channels {
		if channel == ch.AllChannelWildcard {
			return user.InheritedChannels().Copy()
		}
		output.AddChannel(channel, user.CanSeeChannelSince(channel))
	}
	return output
}
Exemplo n.º 4
0
// Cleans up the Value property, and removes rows that aren't visible to the current user
func filterViewResult(input sgbucket.ViewResult, user auth.User, applyChannelFiltering bool) (result sgbucket.ViewResult) {
	hasStarChannel := false
	var visibleChannels ch.TimedSet
	if user != nil {
		visibleChannels = user.InheritedChannels()
		hasStarChannel = !visibleChannels.Contains("*")
		if !applyChannelFiltering {
			return // this is an error
		}
	}
	result.TotalRows = input.TotalRows
	result.Rows = make([]*sgbucket.ViewRow, 0, len(input.Rows)/2)
	for _, row := range input.Rows {
		if applyChannelFiltering {
			value, ok := row.Value.([]interface{})
			if ok {
				// value[0] is the array of channels; value[1] is the actual value
				if !hasStarChannel || channelsIntersect(visibleChannels, value[0].([]interface{})) {
					// Add this row:
					stripSyncProperty(row)
					result.Rows = append(result.Rows, &sgbucket.ViewRow{
						Key:   row.Key,
						Value: value[1],
						ID:    row.ID,
						Doc:   row.Doc,
					})
				}
			}
		} else {
			// Add the raw row:
			stripSyncProperty(row)
			result.Rows = append(result.Rows, &sgbucket.ViewRow{
				Key:   row.Key,
				Value: row.Value,
				ID:    row.ID,
				Doc:   row.Doc,
			})
		}

	}
	result.TotalRows = len(result.Rows)
	return
}
Exemplo n.º 5
0
// Recomputes the set of roles a User has been granted access to by sync() functions.
// This is part of the ChannelComputer interface defined by the Authenticator.
func (context *DatabaseContext) ComputeRolesForUser(user auth.User) (channels.TimedSet, error) {
	var vres struct {
		Rows []struct {
			Value channels.TimedSet
		}
	}

	opts := map[string]interface{}{"stale": false, "key": user.Name()}
	if verr := context.Bucket.ViewCustom(DesignDocSyncGateway, ViewRoleAccess, opts, &vres); verr != nil {
		return nil, verr
	}
	// Merge the TimedSets from the view result:
	var result channels.TimedSet
	for _, row := range vres.Rows {
		if result == nil {
			result = row.Value
		} else {
			result.Add(row.Value)
		}
	}
	return result, nil
}
Exemplo n.º 6
0
// Recomputes the set of channels a User/Role has been granted access to by sync() functions.
// This is part of the ChannelComputer interface defined by the Authenticator.
func (context *DatabaseContext) ComputeChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error) {
	key := princ.Name()
	if _, ok := princ.(auth.User); !ok {
		key = "role:" + key // Roles are identified in access view by a "role:" prefix
	}

	var vres struct {
		Rows []struct {
			Value channels.TimedSet
		}
	}

	opts := map[string]interface{}{"stale": false, "key": key}
	if verr := context.Bucket.ViewCustom(DesignDocSyncGateway, ViewAccess, opts, &vres); verr != nil {
		return nil, verr
	}
	channelSet := channels.TimedSet{}
	for _, row := range vres.Rows {
		channelSet.Add(row.Value)
	}
	return channelSet, nil
}
Exemplo n.º 7
0
func (auth *Authenticator) rebuildRoles(user User) error {
	var roles ch.TimedSet
	if auth.channelComputer != nil {
		var err error
		roles, err = auth.channelComputer.ComputeRolesForUser(user)
		if err != nil {
			base.Warn("channelComputer.ComputeRolesForUser failed on user %s: %v", user.Name(), err)
			return err
		}
	}
	if roles == nil {
		roles = ch.TimedSet{} // it mustn't be nil; nil means it's unknown
	}

	if explicit := user.ExplicitRoles(); explicit != nil {
		roles.Add(explicit)
	}

	base.LogTo("Access", "Computed roles for %q: %s", user.Name(), roles)
	user.setRolesSince(roles)
	return nil
}
Exemplo n.º 8
0
// HTTP handler for _all_docs
func (h *handler) handleAllDocs() error {
	// http://wiki.apache.org/couchdb/HTTP_Bulk_Document_API
	includeDocs := h.getBoolQuery("include_docs")
	includeChannels := h.getBoolQuery("channels")
	includeAccess := h.getBoolQuery("access") && h.user == nil
	includeRevs := h.getBoolQuery("revs")
	includeSeqs := h.getBoolQuery("update_seq")

	// Get the doc IDs if this is a POST request:
	var explicitDocIDs []string
	if h.rq.Method == "POST" {
		input, err := h.readJSON()
		if err == nil {
			keys, ok := input["keys"].([]interface{})
			explicitDocIDs = make([]string, len(keys))
			for i := 0; i < len(keys); i++ {
				if explicitDocIDs[i], ok = keys[i].(string); !ok {
					break
				}
			}
			if !ok {
				err = base.HTTPErrorf(http.StatusBadRequest, "Bad/missing keys")
			}
		}
	} else if h.rq.Method == "GET" {
		keys := h.getQuery("keys")
		if len(keys) > 0 {
			var queryKeys []string
			err := json.Unmarshal([]byte(keys), &queryKeys)
			if err != nil {
				err = base.HTTPErrorf(http.StatusBadRequest, "Bad keys")
			}
			if len(queryKeys) > 0 {
				explicitDocIDs = queryKeys
			}
		}
	}

	// Get the set of channels the user has access to; nil if user is admin or has access to user "*"
	var availableChannels channels.TimedSet
	if h.user != nil {
		availableChannels = h.user.InheritedChannels()
		if availableChannels == nil {
			panic("no channels for user?")
		}
		if availableChannels.Contains(channels.UserStarChannel) {
			availableChannels = nil
		}
	}

	// Subroutines that filter a channel list down to the ones that the user has access to:
	filterChannels := func(channels []string) []string {
		if availableChannels == nil {
			return channels
		}
		dst := 0
		for _, ch := range channels {
			if availableChannels.Contains(ch) {
				channels[dst] = ch
				dst++
			}
		}
		if dst == 0 {
			return nil
		}
		return channels[0:dst]
	}
	filterChannelSet := func(channelMap channels.ChannelMap) []string {
		var result []string
		if availableChannels == nil {
			result = []string{}
		}
		for ch, rm := range channelMap {
			if availableChannels == nil || availableChannels.Contains(ch) {
				//Do not include channels doc removed from in this rev
				if rm == nil {
					result = append(result, ch)
				}
			}
		}
		return result
	}

	type allDocsRowValue struct {
		Rev      string              `json:"rev"`
		Channels []string            `json:"channels,omitempty"`
		Access   map[string]base.Set `json:"access,omitempty"` // for admins only
	}
	type allDocsRow struct {
		Key       string           `json:"key"`
		ID        string           `json:"id,omitempty"`
		Value     *allDocsRowValue `json:"value,omitempty"`
		Doc       db.Body          `json:"doc,omitempty"`
		UpdateSeq uint64           `json:"update_seq,omitempty"`
		Error     string           `json:"error,omitempty"`
		Status    int              `json:"status,omitempty"`
	}

	// Subroutine that creates a response row for a document:
	totalRows := 0
	createRow := func(doc db.IDAndRev, channels []string) *allDocsRow {
		row := &allDocsRow{Key: doc.DocID}
		value := allDocsRowValue{}

		// Filter channels to ones available to user, and bail out if inaccessible:
		if explicitDocIDs == nil {
			if channels = filterChannels(channels); channels == nil {
				return nil // silently skip this doc
			}
		}

		if explicitDocIDs != nil || includeDocs || includeAccess {
			// Fetch the document body and other metadata that lives with it:
			body, channelSet, access, roleAccess, _, _, err := h.db.GetRevAndChannels(doc.DocID, doc.RevID, includeRevs)
			if err != nil {
				row.Status, _ = base.ErrorAsHTTPStatus(err)
				return row
			} else if body["_removed"] != nil {
				row.Status = http.StatusForbidden
				return row
			}
			if explicitDocIDs != nil {
				if channels = filterChannelSet(channelSet); channels == nil {
					row.Status = http.StatusForbidden
					return row
				}
				doc.RevID = body["_rev"].(string)
			}
			if includeDocs {
				row.Doc = body
			}
			if includeAccess && (access != nil || roleAccess != nil) {
				value.Access = map[string]base.Set{}
				for userName, channels := range access {
					value.Access[userName] = channels.AsSet()
				}
				for roleName, channels := range roleAccess {
					value.Access["role:"+roleName] = channels.AsSet()
				}
			}
		}

		row.Value = &value
		row.ID = doc.DocID
		value.Rev = doc.RevID
		if includeSeqs {
			row.UpdateSeq = doc.Sequence
		}
		if includeChannels {
			row.Value.Channels = channels
		}
		return row
	}

	// Subroutine that writes a response entry for a document:
	writeDoc := func(doc db.IDAndRev, channels []string) bool {
		row := createRow(doc, channels)
		if row != nil {
			if row.Status >= 300 {
				row.Error = base.CouchHTTPErrorName(row.Status)
			}
			if totalRows > 0 {
				h.response.Write([]byte(","))
			}
			totalRows++
			h.addJSON(row)
			return true
		}
		return false
	}

	var options db.ForEachDocIDOptions
	options.Startkey = h.getQuery("startkey")
	options.Endkey = h.getQuery("endkey")
	options.Limit = h.getIntQuery("limit", 0)

	// Now it's time to actually write the response!
	lastSeq, _ := h.db.LastSequence()
	h.setHeader("Content-Type", "application/json")
	h.response.Write([]byte(`{"rows":[` + "\n"))

	if explicitDocIDs != nil {
		count := uint64(0)
		for _, docID := range explicitDocIDs {
			writeDoc(db.IDAndRev{DocID: docID, RevID: "", Sequence: 0}, nil)
			count++
			if options.Limit > 0 && count == options.Limit {
				break
			}

		}
	} else {
		if err := h.db.ForEachDocID(writeDoc, options); err != nil {
			return err
		}
	}

	h.response.Write([]byte(fmt.Sprintf("],\n"+`"total_rows":%d,"update_seq":%d}`,
		totalRows, lastSeq)))
	return nil
}
Exemplo n.º 9
0
// Returns the (ordered) union of all of the changes made to multiple channels.
func (db *Database) VectorMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error) {
	to := ""
	var userVbNo uint16
	if db.user != nil && db.user.Name() != "" {
		to = fmt.Sprintf("  (to %s)", db.user.Name())
		userVbNo = uint16(db.Bucket.VBHash(db.user.DocID()))
	}

	base.LogTo("Changes+", "Vector MultiChangesFeed(%s, %+v) ... %s", chans, options, to)
	output := make(chan *ChangeEntry, 50)

	go func() {
		var cumulativeClock *base.SyncSequenceClock
		var lastHashedValue string
		hashedEntryCount := 0
		defer func() {
			base.LogTo("Changes+", "MultiChangesFeed done %s", to)
			close(output)
		}()

		var changeWaiter *changeWaiter
		var userCounter uint64     // Wait counter used to identify changes to the user document
		var addedChannels base.Set // Tracks channels added to the user during changes processing.
		var userChanged bool       // Whether the user document has changed

		// Restrict to available channels, expand wild-card, and find since when these channels
		// have been available to the user:
		var channelsSince channels.TimedSet
		if db.user != nil {
			channelsSince = db.user.FilterToAvailableChannels(chans)
		} else {
			channelsSince = channels.AtSequence(chans, 0)
		}

		if options.Wait {
			changeWaiter = db.startChangeWaiter(channelsSince.AsSet())
			userCounter = changeWaiter.CurrentUserCount()
			db.initializePrincipalPolling(changeWaiter.GetUserKeys())
		}

		cumulativeClock = base.NewSyncSequenceClock()
		cumulativeClock.SetTo(getChangesClock(options.Since))

		// This loop is used to re-run the fetch after every database change, in Wait mode
	outer:
		for {
			// Get the last polled stable sequence.  We don't return anything later than stable sequence in each iteration
			stableClock, err := db.changeCache.GetStableClock(true)
			if err != nil {
				base.Warn("MultiChangesFeed got error reading stable sequence: %v", err)
				return
			}

			// Updates the changeWaiter to the current set of available channels.
			if changeWaiter != nil {
				changeWaiter.UpdateChannels(channelsSince)
			}
			base.LogTo("Changes+", "MultiChangesFeed: channels expand to %#v ... %s", channelsSince.String(), to)

			// Build the channel feeds.
			feeds, err := db.initializeChannelFeeds(channelsSince, options, addedChannels, userVbNo)
			if err != nil {
				return
			}

			// This loop reads the available entries from all the feeds in parallel, merges them,
			// and writes them to the output channel:
			current := make([]*ChangeEntry, len(feeds))
			var sentSomething bool
			nextEntry := getNextSequenceFromFeeds(current, feeds)
			for {
				minEntry := nextEntry

				if minEntry == nil {
					break // Exit the loop when there are no more entries
				}

				// Calculate next entry here, to help identify whether minEntry is the last entry we're sending,
				// to guarantee hashing
				nextEntry = getNextSequenceFromFeeds(current, feeds)

				if options.ActiveOnly {
					if minEntry.Deleted || minEntry.allRemoved {
						continue
					}
				}

				// Don't send any entries later than the stable sequence
				if stableClock.GetSequence(minEntry.Seq.vbNo) < minEntry.Seq.Seq {
					continue
				}

				// Add the doc body or the conflicting rev IDs, if those options are set:
				if options.IncludeDocs || options.Conflicts {
					db.addDocToChangeEntry(minEntry, options)
				}

				// Clock handling
				if minEntry.Seq.TriggeredBy == 0 {
					// Update the cumulative clock, and stick it on the entry.
					cumulativeClock.SetMaxSequence(minEntry.Seq.vbNo, minEntry.Seq.Seq)
					// Force new hash generation for non-continuous changes feeds if this is the last entry to be sent - either
					// because there are no more entries in the channel feeds, or we're going to hit the limit.
					forceHash := false
					if options.Continuous == false && (nextEntry == nil || options.Limit == 1) {
						forceHash = true
					}
					lastHashedValue = db.calculateHashWhenNeeded(
						options,
						minEntry,
						cumulativeClock,
						&hashedEntryCount,
						lastHashedValue,
						forceHash,
					)

				} else {
					// For backfill (triggered by), we don't want to update the cumulative clock.  All entries triggered by the
					// same sequence reference the same triggered by clock, so it should only need to get hashed once.
					// If this is the first entry for this triggered by, initialize the triggered by clock's
					// hash value.
					if minEntry.Seq.TriggeredByClock.GetHashedValue() == "" {
						cumulativeClock.SetMaxSequence(minEntry.Seq.TriggeredByVbNo, minEntry.Seq.TriggeredBy)
						clockHash, err := db.SequenceHasher.GetHash(cumulativeClock)
						if err != nil {
							base.Warn("Error calculating hash for triggered by clock:%v", base.PrintClock(cumulativeClock))
						} else {
							minEntry.Seq.TriggeredByClock.SetHashedValue(clockHash)
						}
					}
				}

				// Send the entry, and repeat the loop:

				base.LogTo("Changes+", "MultiChangesFeed sending %+v %s", minEntry, to)
				select {
				case <-options.Terminator:
					return
				case output <- minEntry:
				}
				sentSomething = true

				// Stop when we hit the limit (if any):
				if options.Limit > 0 {
					options.Limit--
					if options.Limit == 0 {
						break outer
					}
				}
			}

			if !options.Continuous && (sentSomething || changeWaiter == nil) {
				break
			}

			// Update options.Since for use in the next outer loop iteration.
			options.Since.Clock = cumulativeClock

			// If nothing found, and in wait mode: wait for the db to change, then run again.
			// First notify the reader that we're waiting by sending a nil.
			base.LogTo("Changes+", "MultiChangesFeed waiting... %s", to)
			output <- nil

		waitForChanges:
			for {
				waitResponse := changeWaiter.Wait()
				if waitResponse == WaiterClosed {
					break outer
				} else if waitResponse == WaiterHasChanges {
					select {
					case <-options.Terminator:
						return
					default:
						break waitForChanges
					}
				} else if waitResponse == WaiterCheckTerminated {
					// Check whether I was terminated while waiting for a change.  If not, resume wait.
					select {
					case <-options.Terminator:
						return
					default:
					}
				}
			}

			// Before checking again, update the User object in case its channel access has
			// changed while waiting:
			userChanged, userCounter, addedChannels, err = db.checkForUserUpdates(userCounter, changeWaiter)
			if userChanged && db.user != nil {
				channelsSince = db.user.FilterToAvailableChannels(chans)
			}
			if err != nil {
				change := makeErrorEntry("User not found during reload - terminating changes feed")
				base.LogTo("Changes+", "User not found during reload - terminating changes feed with entry %+v", change)
				output <- &change
				return
			}
		}
	}()

	return output, nil
}
Exemplo n.º 10
0
// Updates or creates a principal from a PrincipalConfig structure.
func (dbc *DatabaseContext) UpdatePrincipal(newInfo PrincipalConfig, isUser bool, allowReplace bool) (replaced bool, err error) {
	// Get the existing principal, or if this is a POST make sure there isn't one:
	var princ auth.Principal
	var user auth.User
	authenticator := dbc.Authenticator()
	if isUser {
		isValid, reason := newInfo.IsPasswordValid(dbc.AllowEmptyPassword)
		if !isValid {
			err = base.HTTPErrorf(http.StatusBadRequest, reason)
			return
		}
		user, err = authenticator.GetUser(*newInfo.Name)
		princ = user
	} else {
		princ, err = authenticator.GetRole(*newInfo.Name)
	}
	if err != nil {
		return
	}

	changed := false
	replaced = (princ != nil)
	if !replaced {
		// If user/role didn't exist already, instantiate a new one:
		if isUser {
			user, err = authenticator.NewUser(*newInfo.Name, "", nil)
			princ = user
		} else {
			princ, err = authenticator.NewRole(*newInfo.Name, nil)
		}
		if err != nil {
			return
		}
		changed = true
	} else if !allowReplace {
		err = base.HTTPErrorf(http.StatusConflict, "Already exists")
		return
	}

	updatedChannels := princ.ExplicitChannels()
	if updatedChannels == nil {
		updatedChannels = ch.TimedSet{}
	}
	if !updatedChannels.Equals(newInfo.ExplicitChannels) {
		changed = true
	}

	var updatedRoles ch.TimedSet

	// Then the user-specific fields like roles:
	if isUser {
		if newInfo.Email != user.Email() {
			user.SetEmail(newInfo.Email)
			changed = true
		}
		if newInfo.Password != nil {
			user.SetPassword(*newInfo.Password)
			changed = true
		}
		if newInfo.Disabled != user.Disabled() {
			user.SetDisabled(newInfo.Disabled)
			changed = true
		}

		updatedRoles = user.ExplicitRoles()
		if updatedRoles == nil {
			updatedRoles = ch.TimedSet{}
		}
		if !updatedRoles.Equals(base.SetFromArray(newInfo.ExplicitRoleNames)) {
			changed = true
		}

	}

	// And finally save the Principal:
	if changed {
		// Update the persistent sequence number of this principal (only allocate a sequence when needed - issue #673):
		nextSeq := uint64(0)
		if dbc.writeSequences() {
			var err error
			nextSeq, err = dbc.sequences.nextSequence()
			if err != nil {
				return replaced, err
			}
			princ.SetSequence(nextSeq)
		}

		// Now update the Principal object from the properties in the request, first the channels:
		if updatedChannels.UpdateAtSequence(newInfo.ExplicitChannels, nextSeq) {
			princ.SetExplicitChannels(updatedChannels)
		}

		if isUser {
			if updatedRoles.UpdateAtSequence(base.SetFromArray(newInfo.ExplicitRoleNames), nextSeq) {
				user.SetExplicitRoles(updatedRoles)
			}
		}
		err = authenticator.Save(princ)
	}
	return
}
Exemplo n.º 11
0
// Returns the (ordered) union of all of the changes made to multiple channels.
func (db *Database) SimpleMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error) {
	to := ""
	if db.user != nil && db.user.Name() != "" {
		to = fmt.Sprintf("  (to %s)", db.user.Name())
	}

	base.LogTo("Changes", "MultiChangesFeed(%s, %+v) ... %s", chans, options, to)
	output := make(chan *ChangeEntry, 50)

	go func() {
		defer func() {
			base.LogTo("Changes", "MultiChangesFeed done %s", to)
			close(output)
		}()

		var changeWaiter *changeWaiter
		var lowSequence uint64
		var lateSequenceFeeds map[string]*lateSequenceFeed
		var userCounter uint64     // Wait counter used to identify changes to the user document
		var addedChannels base.Set // Tracks channels added to the user during changes processing.
		var userChanged bool       // Whether the user document has changed in a given iteration loop

		// lowSequence is used to send composite keys to clients, so that they can obtain any currently
		// skipped sequences in a future iteration or request.
		oldestSkipped := db.changeCache.getOldestSkippedSequence()
		if oldestSkipped > 0 {
			lowSequence = oldestSkipped - 1
		} else {
			lowSequence = 0
		}

		// Restrict to available channels, expand wild-card, and find since when these channels
		// have been available to the user:
		var channelsSince channels.TimedSet
		if db.user != nil {
			channelsSince = db.user.FilterToAvailableChannels(chans)
		} else {
			channelsSince = channels.AtSequence(chans, 0)
		}

		if options.Wait {
			options.Wait = false
			changeWaiter = db.startChangeWaiter(channelsSince.AsSet())
			userCounter = changeWaiter.CurrentUserCount()

		}

		// If a request has a low sequence that matches the current lowSequence,
		// ignore the low sequence.  This avoids infinite looping of the records between
		// low::high.  It also means any additional skipped sequences between low::high won't
		// be sent until low arrives or is abandoned.
		if options.Since.LowSeq != 0 && options.Since.LowSeq == lowSequence {
			options.Since.LowSeq = 0
		}

		// For a continuous feed, initialise the lateSequenceFeeds that track late-arriving sequences
		// to the channel caches.
		if options.Continuous {
			lateSequenceFeeds = make(map[string]*lateSequenceFeed)
		}

		// This loop is used to re-run the fetch after every database change, in Wait mode
	outer:
		for {

			// Updates the changeWaiter to the current set of available channels
			if changeWaiter != nil {
				changeWaiter.UpdateChannels(channelsSince)
			}
			base.LogTo("Changes+", "MultiChangesFeed: channels expand to %#v ... %s", channelsSince, to)

			// lowSequence is used to send composite keys to clients, so that they can obtain any currently
			// skipped sequences in a future iteration or request.
			oldestSkipped = db.changeCache.getOldestSkippedSequence()
			if oldestSkipped > 0 {
				lowSequence = oldestSkipped - 1
			} else {
				lowSequence = 0
			}

			// Populate the parallel arrays of channels and names:
			feeds := make([]<-chan *ChangeEntry, 0, len(channelsSince))
			names := make([]string, 0, len(channelsSince))

			// Get read lock for late-arriving sequences, to avoid sending the same late arrival in
			// two different changes iterations.  e.g. without the RLock, a late-arriving sequence
			// could be written to channel X during one iteration, and channel Y during another.  Users
			// with access to both channels would see two versions on the feed.
			for name, vbSeqAddedAt := range channelsSince {
				chanOpts := options
				seqAddedAt := vbSeqAddedAt.Sequence

				// Check whether requires backfill based on addedChannels in this _changes feed
				isNewChannel := false
				if addedChannels != nil {
					_, isNewChannel = addedChannels[name]
				}

				// Check whether requires backfill based on current sequence, seqAddedAt
				// Triggered by handling:
				//   1. options.Since.TriggeredBy == seqAddedAt : We're in the middle of backfill for this channel, based
				//    on the access grant in sequence options.Since.TriggeredBy.  Normally the entire backfill would be done in one
				//    changes feed iteration, but this can be split over multiple iterations when 'limit' is used.
				//   2. options.Since.TriggeredBy == 0 : Not currently doing a backfill
				//   3. options.Since.TriggeredBy != 0 and <= seqAddedAt: We're in the middle of a backfill for another channel, but the backfill for
				//     this channel is still pending.  Initiate the backfill for this channel - will be ordered below in the usual way (iterating over all channels)

				// Backfill required when seqAddedAt is before current sequence
				backfillRequired := seqAddedAt > 1 && options.Since.Before(SequenceID{Seq: seqAddedAt})

				// Ensure backfill isn't already in progress for this seqAddedAt
				backfillPending := options.Since.TriggeredBy == 0 || options.Since.TriggeredBy < seqAddedAt

				if isNewChannel || (backfillRequired && backfillPending) {
					// Newly added channel so initiate backfill:
					chanOpts.Since = SequenceID{Seq: 0, TriggeredBy: seqAddedAt}
				}
				feed, err := db.changesFeed(name, chanOpts)
				if err != nil {
					base.Warn("MultiChangesFeed got error reading changes feed %q: %v", name, err)
					return
				}
				feeds = append(feeds, feed)
				names = append(names, name)

				// Late sequence handling - for out-of-order sequences prior to options.Since that
				// have arrived in the channel cache since this changes request started.  Only need for
				// continuous feeds - one-off changes requests only need the standard channel cache.
				if options.Continuous {
					lateSequenceFeedHandler := lateSequenceFeeds[name]
					if lateSequenceFeedHandler != nil {
						latefeed, err := db.getLateFeed(lateSequenceFeedHandler)
						if err != nil {
							base.Warn("MultiChangesFeed got error reading late sequence feed %q: %v", name, err)
						} else {
							// Mark feed as actively used in this iteration.  Used to remove lateSequenceFeeds
							// when the user loses channel access
							lateSequenceFeedHandler.active = true
							feeds = append(feeds, latefeed)
							names = append(names, fmt.Sprintf("late_%s", name))
						}

					} else {
						// Initialize lateSequenceFeeds[name] for next iteration
						lateSequenceFeeds[name] = db.newLateSequenceFeed(name)
					}
				}
			}
			// If the user object has changed, create a special pseudo-feed for it:
			if db.user != nil {
				feeds, names = db.appendUserFeed(feeds, names, options)
			}

			current := make([]*ChangeEntry, len(feeds))

			// This loop reads the available entries from all the feeds in parallel, merges them,
			// and writes them to the output channel:
			var sentSomething bool
			for {
				// Read more entries to fill up the current[] array:
				for i, cur := range current {
					if cur == nil && feeds[i] != nil {
						var ok bool
						current[i], ok = <-feeds[i]
						if !ok {
							feeds[i] = nil
						}
					}
				}

				// Find the current entry with the minimum sequence:
				minSeq := MaxSequenceID
				var minEntry *ChangeEntry
				for _, cur := range current {
					if cur != nil && cur.Seq.Before(minSeq) {
						minSeq = cur.Seq
						minEntry = cur
					}
				}

				if minEntry == nil {
					break // Exit the loop when there are no more entries
				}

				// Clear the current entries for the sequence just sent:
				if minEntry.Removed != nil {
					minEntry.allRemoved = true
				}
				for i, cur := range current {
					if cur != nil && cur.Seq == minSeq {
						current[i] = nil
						// Track whether this is a removal from all user's channels
						if cur.Removed == nil && minEntry.allRemoved == true {
							minEntry.allRemoved = false
						}
						// Also concatenate the matching entries' Removed arrays:
						if cur != minEntry && cur.Removed != nil {
							if minEntry.Removed == nil {
								minEntry.Removed = cur.Removed
							} else {
								minEntry.Removed = minEntry.Removed.Union(cur.Removed)
							}
						}
					}
				}

				if options.ActiveOnly {
					if minEntry.Deleted || minEntry.allRemoved {
						continue
					}
				}

				// Update options.Since for use in the next outer loop iteration.  Only update
				// when minSeq is greater than the previous options.Since value - we don't want to
				// roll back the Since value when we get an late sequence is processed.
				if options.Since.Before(minSeq) {
					options.Since = minSeq
				}

				// Add the doc body or the conflicting rev IDs, if those options are set:
				if options.IncludeDocs || options.Conflicts {
					db.addDocToChangeEntry(minEntry, options)
				}

				// Update the low sequence on the entry we're going to send
				minEntry.Seq.LowSeq = lowSequence

				// Send the entry, and repeat the loop:
				base.LogTo("Changes+", "MultiChangesFeed sending %+v %s", minEntry, to)
				select {
				case <-options.Terminator:
					return
				case output <- minEntry:
				}
				sentSomething = true

				// Stop when we hit the limit (if any):
				if options.Limit > 0 {
					options.Limit--
					if options.Limit == 0 {
						break outer
					}
				}
			}

			if !options.Continuous && (sentSomething || changeWaiter == nil) {
				break
			}

			// If nothing found, and in wait mode: wait for the db to change, then run again.
			// First notify the reader that we're waiting by sending a nil.
			base.LogTo("Changes+", "MultiChangesFeed waiting... %s", to)
			output <- nil
		waitForChanges:
			for {
				waitResponse := changeWaiter.Wait()
				if waitResponse == WaiterClosed {
					break outer
				} else if waitResponse == WaiterHasChanges {
					select {
					case <-options.Terminator:
						return
					default:
						break waitForChanges
					}
				} else if waitResponse == WaiterCheckTerminated {
					// Check whether I was terminated while waiting for a change.  If not, resume wait.
					select {
					case <-options.Terminator:
						return
					default:
					}
				}
			}

			// Check whether user channel access has changed while waiting:
			var err error
			userChanged, userCounter, addedChannels, err = db.checkForUserUpdates(userCounter, changeWaiter)
			if err != nil {
				change := makeErrorEntry("User not found during reload - terminating changes feed")
				base.LogTo("Changes+", "User not found during reload - terminating changes feed with entry %+v", change)
				output <- &change
				return
			}
			if userChanged && db.user != nil {
				channelsSince = db.user.FilterToAvailableChannels(chans)
			}

			// Clean up inactive lateSequenceFeeds (because user has lost access to the channel)
			for channel, lateFeed := range lateSequenceFeeds {
				if !lateFeed.active {
					db.closeLateFeed(lateFeed)
					delete(lateSequenceFeeds, channel)
				} else {
					lateFeed.active = false
				}
			}
		}
	}()

	return output, nil
}