Esempio n. 1
0
func (db *DatabaseContext) singleChannelStats(kvIndex *kvChangeIndex, channelName string) (*ChannelStats, error) {

	channelStats := &ChannelStats{
		Name: channelName,
	}

	// Create a clean channel reader to retrieve bucket index stats
	indexPartitions, err := kvIndex.getIndexPartitions()
	if err != nil {
		return nil, err
	}

	// Retrieve index stats from bucket
	channelIndex := NewKvChannelIndex(channelName, kvIndex.reader.indexReadBucket, indexPartitions, nil)
	indexClock, err := channelIndex.loadChannelClock()
	if err == nil {
		channelStats.IndexStats = ChannelIndexStats{}
		channelStats.IndexStats.Clock = base.PrintClock(indexClock)
		channelStats.IndexStats.ClockHash = db.SequenceHasher.calculateHash(indexClock)
	}

	// Retrieve polling stats from kvIndex
	pollingChannelIndex := kvIndex.reader.getChannelReader(channelName)
	if pollingChannelIndex != nil {
		lastPolledClock := pollingChannelIndex.lastPolledChannelClock
		if lastPolledClock != nil {
			channelStats.PollingStats = ChannelPollingStats{}
			channelStats.PollingStats.Clock = base.PrintClock(lastPolledClock)
			channelStats.PollingStats.ClockHash = db.SequenceHasher.calculateHash(lastPolledClock)
		}
	}
	return channelStats, nil
}
Esempio n. 2
0
func (k *kvChannelIndex) writeClockCas(updateClock base.SequenceClock) error {
	// Initial set, for the first cas update attempt
	k.clock.UpdateWithClock(updateClock)
	value, err := k.clock.Marshal()
	if err != nil {
		base.Warn("Error marshalling clock [%s] for update:%+v", base.PrintClock(k.clock), err)
		return err
	}
	casOut, err := base.WriteCasRaw(k.indexBucket, getChannelClockKey(k.channelName), value, k.clock.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		writeErr := k.clock.Unmarshal(value)
		if writeErr != nil {
			base.Warn("Error unmarshalling clock during update", writeErr)
			return nil, writeErr
		}
		k.clock.UpdateWithClock(updateClock)
		return k.clock.Marshal()
	})

	if err != nil {
		return err
	}

	k.clock.SetCas(casOut)
	return nil
}
Esempio n. 3
0
// Determines whether the clock hash should be calculated for the entry. For non-continuous changes feeds, hash is only calculated for
// the last entry sent (for use in last_seq), and is done in the defer for the main VectorMultiChangesFeed.
// For continuous changes feeds, we want to calculate the hash for every nth entry, where n=kContinuousHashFrequency.  To ensure that
// clients can restart a new changes feed based on any sequence in the continuous feed, we set the last hash calculated as the LowHash
// value on the sequence.
func (db *Database) calculateHashWhenNeeded(options ChangesOptions, entry *ChangeEntry, cumulativeClock base.SequenceClock, hashedEntryCount *int, continuousLastHash string) string {

	if options.Continuous != true {
		// For non-continuous, only need to calculate hash for lastSent entry.  Initialize empty clock and return
		entry.Seq.Clock = base.NewSyncSequenceClock()
	} else {
		// When hashedEntryCount == 0, recalculate hash
		if *hashedEntryCount == 0 {
			clockHash, err := db.SequenceHasher.GetHash(cumulativeClock)
			if err != nil {
				base.Warn("Error calculating hash for clock:%v", base.PrintClock(cumulativeClock))
				return continuousLastHash
			} else {
				entry.Seq.Clock = base.NewSyncSequenceClock()
				entry.Seq.Clock.SetHashedValue(clockHash)
				continuousLastHash = clockHash
			}
			*hashedEntryCount = kContinuousHashFrequency
		} else {
			entry.Seq.LowHash = continuousLastHash
			*hashedEntryCount--
		}
	}
	return continuousLastHash
}
Esempio n. 4
0
// Determines whether the clock hash should be calculated for the entry. For non-continuous changes feeds, hash is only calculated for
// the last entry sent (for use in last_seq), and is done in the defer for the main VectorMultiChangesFeed.
// For continuous changes feeds, we want to calculate the hash for every nth entry, where n=kChangesHashFrequency.  To ensure that
// clients can restart a new changes feed based on any sequence in the continuous feed, we set the last hash calculated as the LowHash
// value on the sequence.
func (db *Database) calculateHashWhenNeeded(options ChangesOptions, entry *ChangeEntry, cumulativeClock base.SequenceClock, hashedEntryCount *int, lastHashedValue string, forceHash bool) string {

	// When hashedEntryCount == 0 or forceHash==true recalculate hash
	if *hashedEntryCount == 0 || forceHash {
		clockHash, err := db.SequenceHasher.GetHash(cumulativeClock)
		if err != nil {
			base.Warn("Error calculating hash for clock:%v", base.PrintClock(cumulativeClock))
			return lastHashedValue
		} else {
			entry.Seq.Clock = base.NewSyncSequenceClock()
			entry.Seq.Clock.SetHashedValue(clockHash)
			lastHashedValue = clockHash
		}
		*hashedEntryCount = db.SequenceHasher.getHashFrequency()
	} else {
		entry.Seq.LowHash = lastHashedValue
		*hashedEntryCount--
	}
	return lastHashedValue
}
Esempio n. 5
0
// Returns the (ordered) union of all of the changes made to multiple channels.
func (db *Database) VectorMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error) {
	to := ""
	var userVbNo uint16
	if db.user != nil && db.user.Name() != "" {
		to = fmt.Sprintf("  (to %s)", db.user.Name())
		userVbNo = uint16(db.Bucket.VBHash(db.user.DocID()))
	}

	base.LogTo("Changes+", "Vector MultiChangesFeed(%s, %+v) ... %s", chans, options, to)
	output := make(chan *ChangeEntry, 50)

	go func() {
		var cumulativeClock *base.SyncSequenceClock
		var lastHashedValue string
		hashedEntryCount := 0
		defer func() {
			base.LogTo("Changes+", "MultiChangesFeed done %s", to)
			close(output)
		}()

		var changeWaiter *changeWaiter
		var userChangeCount uint64
		var addedChannels base.Set // Tracks channels added to the user during changes processing.

		if options.Wait {
			// Note (Adam): I don't think there's a reason to set this to false here.  We're outside the
			// main iteration loop (so the if check above should only happen once), and I don't believe
			// options.Wait is referenced elsewhere once MultiChangesFeed is called.  Leaving it as-is
			// makes it possible for channels to identify whether a getChanges call has options.Wait set to true,
			// which is useful to identify active change listeners.  However, it's possible there's a subtlety of
			// longpoll or continuous processing I'm missing here - leaving this note instead of just deleting for now.
			//options.Wait = false
			changeWaiter = db.startChangeWaiter(chans)
			userChangeCount = changeWaiter.CurrentUserCount()
		}

		cumulativeClock = base.NewSyncSequenceClock()
		cumulativeClock.SetTo(getChangesClock(options.Since))

		// This loop is used to re-run the fetch after every database change, in Wait mode
	outer:
		for {
			// Get the last polled stable sequence.  We don't return anything later than stable sequence in each iteration
			stableClock, err := db.changeCache.GetStableClock(true)
			if err != nil {
				base.Warn("MultiChangesFeed got error reading stable sequence: %v", err)
				return
			}

			// Restrict to available channels, expand wild-card, and find since when these channels
			// have been available to the user:
			var channelsSince channels.TimedSet
			if db.user != nil {
				channelsSince = db.user.FilterToAvailableChannels(chans)
			} else {
				channelsSince = channels.AtSequence(chans, 0)
			}

			// Updates the changeWaiter to the current set of available channels.
			if changeWaiter != nil {
				changeWaiter.UpdateChannels(channelsSince)
			}
			base.LogTo("Changes+", "MultiChangesFeed: channels expand to %#v ... %s", channelsSince, to)

			// Build the channel feeds.
			feeds, err := db.initializeChannelFeeds(channelsSince, options, addedChannels, userVbNo)
			if err != nil {
				return
			}

			// This loop reads the available entries from all the feeds in parallel, merges them,
			// and writes them to the output channel:
			current := make([]*ChangeEntry, len(feeds))
			var sentSomething bool
			nextEntry := getNextSequenceFromFeeds(current, feeds)
			for {
				minEntry := nextEntry

				if minEntry == nil {
					break // Exit the loop when there are no more entries
				}

				// Calculate next entry here, to help identify whether minEntry is the last entry we're sending,
				// to guarantee hashing
				nextEntry = getNextSequenceFromFeeds(current, feeds)

				// Don't send any entries later than the stable sequence
				if stableClock.GetSequence(minEntry.Seq.vbNo) < minEntry.Seq.Seq {
					continue
				}

				// Add the doc body or the conflicting rev IDs, if those options are set:
				if options.IncludeDocs || options.Conflicts {
					db.addDocToChangeEntry(minEntry, options)
				}

				// Clock handling
				if minEntry.Seq.TriggeredBy == 0 {
					// Update the cumulative clock, and stick it on the entry.
					cumulativeClock.SetMaxSequence(minEntry.Seq.vbNo, minEntry.Seq.Seq)
					// Force new hash generation for non-continuous changes feeds if this is the last entry to be sent - either
					// because there are no more entries in the channel feeds, or we're going to hit the limit.
					forceHash := false
					if options.Continuous == false && (nextEntry == nil || options.Limit == 1) {
						forceHash = true
					}
					lastHashedValue = db.calculateHashWhenNeeded(
						options,
						minEntry,
						cumulativeClock,
						&hashedEntryCount,
						lastHashedValue,
						forceHash,
					)

				} else {
					// For backfill (triggered by), we don't want to update the cumulative clock.  All entries triggered by the
					// same sequence reference the same triggered by clock, so it should only need to get hashed once.
					// If this is the first entry for this triggered by, initialize the triggered by clock's
					// hash value.
					if minEntry.Seq.TriggeredByClock.GetHashedValue() == "" {
						cumulativeClock.SetMaxSequence(minEntry.Seq.TriggeredByVbNo, minEntry.Seq.TriggeredBy)
						clockHash, err := db.SequenceHasher.GetHash(cumulativeClock)
						if err != nil {
							base.Warn("Error calculating hash for triggered by clock:%v", base.PrintClock(cumulativeClock))
						} else {
							minEntry.Seq.TriggeredByClock.SetHashedValue(clockHash)
						}
					}
				}

				// Send the entry, and repeat the loop:
				select {
				case <-options.Terminator:
					return
				case output <- minEntry:
				}
				sentSomething = true

				// Stop when we hit the limit (if any):
				if options.Limit > 0 {
					options.Limit--
					if options.Limit == 0 {
						break outer
					}
				}
			}

			if !options.Continuous && (sentSomething || changeWaiter == nil) {
				break
			}

			// Update options.Since for use in the next outer loop iteration.
			options.Since.Clock = cumulativeClock

			// If nothing found, and in wait mode: wait for the db to change, then run again.
			// First notify the reader that we're waiting by sending a nil.
			base.LogTo("Changes+", "MultiChangesFeed waiting... %s", to)
			output <- nil

		waitForChanges:
			for {
				waitResponse := changeWaiter.Wait()
				if waitResponse == WaiterClosed {
					break outer
				} else if waitResponse == WaiterHasChanges {
					select {
					case <-options.Terminator:
						return
					default:
						break waitForChanges
					}
				} else if waitResponse == WaiterCheckTerminated {
					// Check whether I was terminated while waiting for a change.  If not, resume wait.
					select {
					case <-options.Terminator:
						return
					default:
					}
				}
			}

			// Before checking again, update the User object in case its channel access has
			// changed while waiting:
			userChangeCount, addedChannels, err = db.checkForUserUpdates(userChangeCount, changeWaiter)
			if err != nil {
				change := makeErrorEntry("User not found during reload - terminating changes feed")
				base.LogTo("Changes+", "User not found during reload - terminating changes feed with entry %+v", change)
				output <- &change
				return
			}
		}
	}()

	return output, nil
}