func (k *kvChangeIndexReader) GetStableClock() (clock base.SequenceClock, err error) {

	// Validation partition map is available.
	_, err = k.indexPartitionsCallback()
	if err != nil {
		// Unable to load partitions.  Check whether the index has data (stable counter is non-zero)
		count, err := base.LoadClockCounter(base.KStableSequenceKey, k.indexReadBucket)
		// Index has data, but we can't get partition map.  Return error
		if err == nil && count > 0 {
			return nil, errors.New("Error: Unable to retrieve index partition map, but index counter exists")
		} else {
			// Index doesn't have data.  Return zero clock as stable clock
			return base.NewSequenceClockImpl(), nil
		}
	}

	clock = base.NewSequenceClockImpl()
	stableShardedClock, err := k.loadStableSequence()
	if err != nil {
		base.Warn("Stable sequence and clock not found in index - returning err")
		return nil, err
	} else {
		clock = stableShardedClock.AsClock()
	}

	return clock, nil
}
예제 #2
0
func parseClockSequenceID(str string, sequenceHasher *sequenceHasher) (s SequenceID, err error) {

	if str == "" {
		return SequenceID{
			SeqType: ClockSequenceType,
			Clock:   base.NewSequenceClockImpl(),
		}, nil
	}

	s.SeqType = ClockSequenceType
	components := strings.Split(str, ":")
	if len(components) == 1 {
		// Convert simple zero to empty clock, to handle clients sending zero to mean 'no previous since'
		if components[0] == "0" {
			s.Clock = base.NewSequenceClockImpl()
		} else {
			// Standard clock hash
			if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
				return SequenceID{}, err
			}
		}
	} else if len(components) == 2 {
		// TriggeredBy Clock Hash, and vb.seq sequence
		if s.TriggeredByClock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}
		sequenceComponents := strings.Split(components[1], ".")
		if len(sequenceComponents) != 2 {
			base.Warn("Unexpected sequence format - ignoring and relying on triggered by")
			return
		} else {
			if vb64, err := strconv.ParseUint(sequenceComponents[0], 10, 16); err != nil {
				base.Warn("Unable to convert sequence %v to int.", sequenceComponents[0])
			} else {
				s.vbNo = uint16(vb64)
				s.Seq, err = strconv.ParseUint(sequenceComponents[1], 10, 64)
			}
		}

	} else if len(components) == 3 {
		// Low hash, and vb.seq sequence.  Use low hash as clock, ignore vb.seq
		if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}

	} else {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}

	if err != nil {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}
	return s, err
}
// Tests hash expiry.  Requires a real couchbase server bucket - walrus doesn't support expiry yet
func CouchbaseOnlyTestHashExpiry(t *testing.T) {
	// Create a hasher with a small range (0-256) and short expiry for testing
	seqHasher, err := testSequenceHasher(8, 5)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Add first hash entry
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue, err := seqHasher.GetHash(clock)
	assertNoError(t, err, "Error creating hash")
	// Validate that expiry is reset every time sequence for hash is requested.
	for i := 0; i < 20; i++ {
		clockBack, err := seqHasher.GetClock(hashValue)
		assertNoError(t, err, "Error getting clock")
		assert.Equals(t, clockBack.GetSequence(50), uint64(100))
		time.Sleep(2 * time.Second)
	}

	// Validate it disappears after expiry time when no active requests
	time.Sleep(10 * time.Second)
	clockBack, err := seqHasher.GetClock(hashValue)
	assertNoError(t, err, "Error getting clock")
	log.Println("Got clockback:", clockBack)
	assert.Equals(t, clockBack.GetSequence(50), uint64(0))

}
func TestConcurrentHashStorage(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Simulate multiple processes writing hashes for different clocks concurrently - ensure cache is still valid
	var wg sync.WaitGroup
	for i := 0; i < 20; i++ {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			clock := base.NewSequenceClockImpl()
			clock.SetSequence(uint16(i), uint64(i))
			value, err := seqHasher.GetHash(clock)
			assertNoError(t, err, "Error getting hash")
			assert.Equals(t, value, fmt.Sprintf("%d-0", i))
		}(i)
	}
	wg.Wait()

	// Retrieve values
	for i := 0; i < 20; i++ {
		loadedClock, err := seqHasher.GetClock(fmt.Sprintf("%d-0", i))
		assertTrue(t, err == nil, "Shouldn't return error")
		assert.Equals(t, loadedClock.GetSequence(uint16(i)), uint64(i))
	}
}
예제 #5
0
// Adds a set
func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (clockUpdates base.SequenceClock, err error) {

	// Update the sequences in the appropriate cache block
	if len(entries) == 0 {
		return clockUpdates, nil
	}

	// The set of updates may be distributed over multiple partitions and blocks.
	// To support this, iterate over the set, and define groups of sequences by block
	// TODO: this approach feels like it's generating a lot of GC work.  Considered an iterative
	//       approach where a set update returned a list of entries that weren't targeted at the
	//       same block as the first entry in the list, but this would force sequential
	//       processing of the blocks.  Might be worth revisiting if we see high GC overhead.
	blockSets := make(BlockSet)
	clockUpdates = base.NewSequenceClockImpl()
	for _, entry := range entries {
		// Update the sequence in the appropriate cache block
		base.LogTo("DIndex+", "Add to channel index [%s], vbNo=%d, isRemoval:%v", b.channelName, entry.VbNo, entry.isRemoved())
		blockKey := GenerateBlockKey(b.channelName, entry.Sequence, b.partitions.VbMap[entry.VbNo])
		if _, ok := blockSets[blockKey]; !ok {
			blockSets[blockKey] = make([]*LogEntry, 0)
		}
		blockSets[blockKey] = append(blockSets[blockKey], entry)
		clockUpdates.SetMaxSequence(entry.VbNo, entry.Sequence)
	}

	err = b.writeBlockSetsWithCas(blockSets)
	if err != nil {
		base.Warn("Error writing blockSets with cas for block %s: %+v", blockSets, err)
	}

	return clockUpdates, err
}
func getClockForMap(values map[uint16]uint64) base.SequenceClock {
	clock := base.NewSequenceClockImpl()
	for vb, seq := range values {
		clock.SetSequence(vb, seq)
	}
	return clock
}
예제 #7
0
func (h *handler) readChangesOptionsFromJSON(jsonData []byte) (feed string, options db.ChangesOptions, filter string, channelsArray []string, docIdsArray []string, compress bool, err error) {
	var input struct {
		Feed           string        `json:"feed"`
		Since          db.SequenceID `json:"since"`
		Limit          int           `json:"limit"`
		Style          string        `json:"style"`
		IncludeDocs    bool          `json:"include_docs"`
		Filter         string        `json:"filter"`
		Channels       string        `json:"channels"` // a filter query param, so it has to be a string
		DocIds         []string      `json:"doc_ids"`
		HeartbeatMs    *uint64       `json:"heartbeat"`
		TimeoutMs      *uint64       `json:"timeout"`
		AcceptEncoding string        `json:"accept_encoding"`
		ActiveOnly     bool          `json:"active_only"` // Return active revisions only
	}
	// Initialize since clock and hasher ahead of unmarshalling sequence
	if h.db != nil && h.db.SequenceType == db.ClockSequenceType {
		input.Since.Clock = base.NewSequenceClockImpl()
		input.Since.SeqType = h.db.SequenceType
		input.Since.SequenceHasher = h.db.SequenceHasher
	}
	if err = json.Unmarshal(jsonData, &input); err != nil {
		return
	}
	feed = input.Feed
	options.Since = input.Since
	options.Limit = input.Limit

	options.Conflicts = input.Style == "all_docs"
	options.ActiveOnly = input.ActiveOnly

	options.IncludeDocs = input.IncludeDocs
	filter = input.Filter

	if input.Channels != "" {
		channelsArray = strings.Split(input.Channels, ",")
	}

	docIdsArray = input.DocIds

	options.HeartbeatMs = getRestrictedInt(
		input.HeartbeatMs,
		kDefaultHeartbeatMS,
		kMinHeartbeatMS,
		h.server.config.MaxHeartbeat*1000,
		true,
	)

	options.TimeoutMs = getRestrictedInt(
		input.TimeoutMs,
		kDefaultTimeoutMS,
		0,
		kMaxTimeoutMS,
		true,
	)

	compress = (input.AcceptEncoding == "gzip")

	return
}
예제 #8
0
func getZeroSequence(db *Database) ChangesOptions {
	if db.SequenceType == IntSequenceType {
		return ChangesOptions{Since: SequenceID{Seq: 0}}
	} else {
		return ChangesOptions{Since: SequenceID{Clock: base.NewSequenceClockImpl()}}
	}
}
func (k *kvChangeIndexReader) GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) {

	var sinceClock base.SequenceClock
	if options.Since.Clock == nil {
		// If there's no since clock, we may be in backfill for another channel - revert to the triggered by clock.
		if options.Since.TriggeredByClock != nil {
			sinceClock = options.Since.TriggeredByClock
		} else {
			sinceClock = base.NewSequenceClockImpl()
		}
	} else {
		sinceClock = options.Since.Clock
	}

	reader, err := k.getOrCreateReader(channelName, options)
	if err != nil {
		base.Warn("Error obtaining channel reader (need partition index?) for channel %s", channelName)
		return nil, err
	}
	changes, err := reader.getChanges(sinceClock)
	if err != nil {
		base.LogTo("DIndex+", "No clock found for channel %s, assuming no entries in index", channelName)
		return nil, nil
	}

	// Limit handling
	if options.Limit > 0 && len(changes) > options.Limit {
		limitResult := make([]*LogEntry, options.Limit)
		copy(limitResult[0:], changes[0:])
		return limitResult, nil
	}

	return changes, nil
}
예제 #10
0
// Returns a clock-base SequenceID with all vb values set to seq
func simpleClockSequence(seq uint64) SequenceID {
	result := SequenceID{
		SeqType: ClockSequenceType,
		Clock:   base.NewSequenceClockImpl(),
	}
	for i := 0; i < 1024; i++ {
		result.Clock.SetSequence(uint16(i), seq)
	}
	return result
}
예제 #11
0
func (k *kvChannelIndex) loadClock() {

	if k.clock == nil {
		k.clock = base.NewSequenceClockImpl()
	}
	data, cas, err := k.indexBucket.GetRaw(getChannelClockKey(k.channelName))
	if err != nil {
		base.LogTo("DIndex+", "Unable to find existing channel clock for channel %s - treating as new", k.channelName)
	}
	k.clock.Unmarshal(data)
	k.clock.SetCas(cas)
}
예제 #12
0
func (s *sequenceHasher) GetClock(sequence string) (base.SequenceClock, error) {

	clock := base.NewSequenceClockImpl()
	var err error
	var seqHash sequenceHash

	components := strings.Split(sequence, "-")
	if len(components) == 1 {
		seqHash.hashValue, err = strconv.ParseUint(sequence, 10, 64)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting hash sequence %s to string: %v", sequence, err))
		}
	} else if len(components) == 2 {
		seqHash.hashValue, err = strconv.ParseUint(components[0], 10, 64)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting hash sequence %s to string: %v", sequence, err))
		}
		index, err := strconv.ParseUint(components[1], 10, 16)
		seqHash.collisionIndex = uint16(index)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting collision index %s to int: %v", components[1], err))
		}
	}

	cachedValue := s.getCacheValue(seqHash.hashValue)
	storedClocks, loadErr := cachedValue.load(s.loadClocks)
	if loadErr != nil {
		return clock, loadErr
	}

	if uint16(len(storedClocks.Sequences)) <= seqHash.collisionIndex {
		return clock, errors.New(fmt.Sprintf("Stored hash not found for sequence [%s], returning zero clock", sequence))
	}
	clock = base.NewSequenceClockImpl()
	clock.Init(storedClocks.Sequences[seqHash.collisionIndex], seqHash.String())
	return clock, nil

}
예제 #13
0
func TestHashCalculation(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue := seqHasher.calculateHash(clock)
	assert.Equals(t, hashValue, uint64(14)) // (100 + 20 + 150) mod 256

	clock.SetSequence(55, 300)
	clock.SetSequence(200, 513)
	hashValue = seqHasher.calculateHash(clock)
	assert.Equals(t, hashValue, uint64(59)) // (100 + 20 + 150 + (300 mod 256) + (513 mod 256)) mod 256

}
예제 #14
0
func (k *kvChannelIndex) getChannelClock() (base.SequenceClock, error) {

	var channelClock base.SequenceClock
	var err error
	// If we're polling, return a copy
	k.lastPolledLock.RLock()
	defer k.lastPolledLock.RUnlock()
	if k.lastPolledChannelClock != nil {
		channelClock = base.NewSequenceClockImpl()
		channelClock.SetTo(k.lastPolledChannelClock)
	} else {
		channelClock, err = k.loadChannelClock()
		if err != nil {
			return nil, err
		}
	}
	return channelClock, nil

}
예제 #15
0
// Returns the set of index entries for the channel more recent than the
// specified since SequenceClock.  Index entries with sequence values greater than
// the index stable sequence are not returned.
func (k *kvChannelIndex) getChanges(since base.SequenceClock) ([]*LogEntry, error) {

	var results []*LogEntry

	// Someone is still interested in this channel - reset poll counts
	atomic.StoreUint32(&k.pollCount, 0)
	atomic.StoreUint32(&k.unreadPollCount, 0)

	chanClock, err := k.getChannelClock()
	if err != nil {
		// Note: gocb returns "Key not found.", go-couchbase returns "MCResponse status=KEY_ENOENT, opcode=GET, opaque=0, msg: Not found"
		// Using string matching to identify key not found for now - really need a better API in go-couchbase/gocb for gets that allows us to distinguish
		// between errors and key not found with something more robust than string matching.
		if IsNotFoundError(err) {
			// initialize chanClock as empty clock
			chanClock = base.NewSequenceClockImpl()
		} else {
			return results, err
		}
	}

	// If requested clock is later than the channel clock, return empty
	if since.AllAfter(chanClock) {
		base.LogTo("DIndex+", "requested clock is later than channel clock - no new changes to report")
		return results, nil
	}

	// If the since value is more recent than the last polled clock, return the results from the
	// last polling.  Has the potential to return values earlier than since and later than
	// lastPolledClock, but these duplicates will be ignored by replication.  Could validate
	// greater than since inside this if clause, but leaving out as a performance optimization for
	// now
	if lastPolledResults := k.checkLastPolled(since); len(lastPolledResults) > 0 {
		indexExpvars.Add("getChanges_lastPolled_hit", 1)
		return lastPolledResults, nil
	}
	indexExpvars.Add("getChanges_lastPolled_miss", 1)

	return k.channelStorage.GetChanges(since, chanClock)
}
예제 #16
0
// Creates a go-channel of ChangeEntry for each channel in channelsSince.  Each go-channel sends the ordered entries for that channel.
func (db *Database) initializeChannelFeeds(channelsSince channels.TimedSet, options ChangesOptions, addedChannels base.Set, userVbNo uint16) ([]<-chan *ChangeEntry, error) {
	// Populate the  array of feed channels:
	feeds := make([]<-chan *ChangeEntry, 0, len(channelsSince))

	base.LogTo("Changes+", "GotChannelSince... %v", channelsSince)
	for name, vbSeqAddedAt := range channelsSince {
		seqAddedAt := vbSeqAddedAt.Sequence
		// If there's no vbNo on the channelsSince, it indicates a user doc channel grant - use the userVbNo.
		var vbAddedAt uint16
		if vbSeqAddedAt.VbNo == nil {
			vbAddedAt = userVbNo
		} else {
			vbAddedAt = *vbSeqAddedAt.VbNo
		}

		base.LogTo("Changes+", "Starting for channel... %s, %d", name, seqAddedAt)
		chanOpts := options

		// Check whether requires backfill based on addedChannels in this _changes feed
		isNewChannel := false
		if addedChannels != nil {
			_, isNewChannel = addedChannels[name]
		}

		// Three possible scenarios for backfill handling, based on whether the incoming since value indicates a backfill in progress
		// for this channel, and whether the channel requires a new backfill to be started
		//   Case 1. No backfill in progress, no backfill required - use the incoming since to get changes
		//   Case 2. No backfill in progress, backfill required for this channel.  Get changes since zero, backfilling to the incoming since
		//   Case 3. Backfill in progress.  Get changes since zero, backfilling to incoming triggered by, filtered to later than incoming since.
		backfillInProgress := false
		if options.Since.TriggeredByClock != nil {
			// There's a backfill in progress for SOME channel - check if it's this one
			if options.Since.TriggeredByClock.GetSequence(vbAddedAt) == seqAddedAt {
				backfillInProgress = true
			}
		}

		sinceSeq := getChangesClock(options.Since).GetSequence(vbAddedAt)
		backfillRequired := vbSeqAddedAt.Sequence > 0 && sinceSeq < seqAddedAt

		if isNewChannel || (backfillRequired && !backfillInProgress) {
			// Case 2.  No backfill in progress, backfill required
			base.LogTo("Changes+", "Starting backfill for channel... %s, %d", name, seqAddedAt)
			chanOpts.Since = SequenceID{
				Seq:              0,
				vbNo:             0,
				Clock:            base.NewSequenceClockImpl(),
				TriggeredBy:      seqAddedAt,
				TriggeredByVbNo:  vbAddedAt,
				TriggeredByClock: getChangesClock(options.Since).Copy(),
			}
		} else if backfillInProgress {
			// Case 3.  Backfill in progress.
			chanOpts.Since = SequenceID{
				Seq:              options.Since.Seq,
				vbNo:             options.Since.vbNo,
				Clock:            base.NewSequenceClockImpl(),
				TriggeredBy:      seqAddedAt,
				TriggeredByVbNo:  vbAddedAt,
				TriggeredByClock: options.Since.TriggeredByClock,
			}
		} else {
			// Case 1.  Leave chanOpts.Since set to options.Since.
		}
		feed, err := db.vectorChangesFeed(name, chanOpts)
		if err != nil {
			base.Warn("MultiChangesFeed got error reading changes feed %q: %v", name, err)
			return feeds, err
		}
		feeds = append(feeds, feed)
	}

	// If the user object has changed, create a special pseudo-feed for it:
	if db.user != nil {
		feeds, _ = db.appendVectorUserFeed(feeds, []string{}, options, userVbNo)
	}
	return feeds, nil
}
func (k *kvChangeIndexReader) pollReaders() bool {
	k.channelIndexReaderLock.Lock()
	defer k.channelIndexReaderLock.Unlock()

	if len(k.channelIndexReaders) == 0 {
		return true
	}

	// Build the set of clock keys to retrieve.  Stable sequence, plus one per channel reader
	keySet := make([]string, len(k.channelIndexReaders))
	index := 0
	for _, reader := range k.channelIndexReaders {
		keySet[index] = GetChannelClockKey(reader.channelName)
		index++
	}
	bulkGetResults, err := k.indexReadBucket.GetBulkRaw(keySet)

	if err != nil {
		base.Warn("Error retrieving channel clocks: %v", err)
	}
	IndexExpvars.Add("bulkGet_channelClocks", 1)
	IndexExpvars.Add("bulkGet_channelClocks_keyCount", int64(len(keySet)))
	changedChannels := make(chan string, len(k.channelIndexReaders))
	cancelledChannels := make(chan string, len(k.channelIndexReaders))

	var wg sync.WaitGroup
	for _, reader := range k.channelIndexReaders {
		// For each channel, unmarshal new channel clock, then check with reader whether this represents changes
		wg.Add(1)
		go func(reader *KvChannelIndex, wg *sync.WaitGroup) {
			defer func() {
				wg.Done()
			}()
			// Unmarshal channel clock.  If not present in the bulk get results, use empty clock to support
			// channels that don't have any indexed data yet.  If clock was previously found successfully (i.e. empty clock is
			// due to temporary error from server), empty clock treated safely as a non-update by pollForChanges.
			clockKey := GetChannelClockKey(reader.channelName)
			var newChannelClock *base.SequenceClockImpl
			clockBytes, found := bulkGetResults[clockKey]
			if !found {
				newChannelClock = base.NewSequenceClockImpl()
			} else {
				var err error
				newChannelClock, err = base.NewSequenceClockForBytes(clockBytes)
				if err != nil {
					base.Warn("Error unmarshalling channel clock - skipping polling for channel %s: %v", reader.channelName, err)
					return
				}
			}

			// Poll for changes
			hasChanges, cancelPolling := reader.pollForChanges(k.readerStableSequence.AsClock(), newChannelClock)
			if hasChanges {
				changedChannels <- reader.channelName
			}
			if cancelPolling {
				cancelledChannels <- reader.channelName
			}

		}(reader, &wg)
	}

	wg.Wait()
	close(changedChannels)
	close(cancelledChannels)

	// Build channel set from the changed channels
	var channels []string
	for channelName := range changedChannels {
		channels = append(channels, channelName)
	}

	if len(channels) > 0 && k.onChange != nil {
		k.onChange(base.SetFromArray(channels))
	}

	// Remove cancelled channels from channel readers
	for channelName := range cancelledChannels {
		IndexExpvars.Add("pollingChannels_active", -1)
		delete(k.channelIndexReaders, channelName)
	}

	return true
}
// Index a group of entries.  Iterates over the entry set to build updates per channel, then
// updates using channel index.
func (k *kvChangeIndexWriter) indexEntries(entries []*LogEntry, indexPartitions base.IndexPartitionMap, channelStorage ChannelStorage) error {

	channelSets := make(map[string][]*LogEntry)
	updatedSequences := base.NewSequenceClockImpl()

	// Wait group tracks when the current buffer has been completely processed
	var entryWg sync.WaitGroup
	entryErrorCount := uint32(0)
	// Iterate over entries to write index entry docs, and group entries for subsequent channel index updates
	for _, logEntry := range entries {
		// If principal, update the stable sequence and continue
		if logEntry.IsPrincipal {
			updatedSequences.SetSequence(logEntry.VbNo, logEntry.Sequence)
			continue
		}

		// Remove channels from entry to save space in memory, index entries
		ch := logEntry.Channels

		// Add index log entry if needed
		if channelStorage.StoresLogEntries() {
			entryWg.Add(1)
			go func(logEntry *LogEntry, errorCount uint32) {
				defer entryWg.Done()
				err := channelStorage.WriteLogEntry(logEntry)
				if err != nil {
					atomic.AddUint32(&errorCount, 1)
				}
			}(logEntry, entryErrorCount)
		}
		// Collect entries by channel
		for channelName, removal := range ch {
			if removal == nil || removal.RevID == logEntry.RevID {
				// Store by channel and partition, to avoid having to iterate over results again in the channel index to group by partition
				_, found := channelSets[channelName]
				if !found {
					// TODO: maxCacheUpdate may be unnecessarily large memory allocation here
					channelSets[channelName] = make([]*LogEntry, 0, maxCacheUpdate)
				}
				if removal != nil {
					removalEntry := *logEntry
					removalEntry.Flags |= channels.Removed
					channelSets[channelName] = append(channelSets[channelName], &removalEntry)
				} else {
					channelSets[channelName] = append(channelSets[channelName], logEntry)
				}
			}
		}
		if EnableStarChannelLog {
			_, found := channelSets[channels.UserStarChannel]
			if !found {
				// TODO: maxCacheUpdate may be unnecessarily large memory allocation here
				channelSets[channels.UserStarChannel] = make([]*LogEntry, 0, maxCacheUpdate)
			}
			channelSets[channels.UserStarChannel] = append(channelSets[channels.UserStarChannel], logEntry)
		}

		// Track vbucket sequences for clock update
		updatedSequences.SetSequence(logEntry.VbNo, logEntry.Sequence)
	}

	// Wait group tracks when the current buffer has been completely processed
	var channelWg sync.WaitGroup

	channelErrorCount := uint32(0)
	// Iterate over channel sets to update channel index
	for channelName, entrySet := range channelSets {
		channelWg.Add(1)
		go func(channelName string, entrySet []*LogEntry, errorCount uint32) {
			defer channelWg.Done()
			err := k.addSetToChannelIndex(channelName, entrySet)
			if err != nil {
				atomic.AddUint32(&errorCount, 1)
			}
		}(channelName, entrySet, channelErrorCount)
	}

	// Wait for entry and channel processing to complete
	entryWg.Wait()
	channelWg.Wait()
	if atomic.LoadUint32(&entryErrorCount) > 0 || atomic.LoadUint32(&channelErrorCount) > 0 {
		return errors.New("Unrecoverable error indexing entry or channel")
	}

	// Update stable sequence
	err := k.getWriterStableSequence().UpdateAndWrite(updatedSequences)
	return err
}
예제 #19
0
func TestHashStorage(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Add first hash entry
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue, err := seqHasher.GetHash(clock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-0")

	// Add different hash entry
	clock2 := base.NewSequenceClockImpl()
	clock2.SetSequence(50, 1)
	clock2.SetSequence(80, 2)
	clock2.SetSequence(150, 5)
	hashValue2, err := seqHasher.GetHash(clock2)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue2, "8-0")

	// Retrieve first hash entry
	clockBack, err := seqHasher.GetClock(hashValue)
	assertNoError(t, err, "Error getting clock")
	assert.Equals(t, clockBack.GetSequence(50), uint64(100))
	assert.Equals(t, clockBack.GetSequence(80), uint64(20))
	assert.Equals(t, clockBack.GetSequence(150), uint64(150))

	// Create hash for the first clock again - ensure retrieves existing, and doesn't create new
	hashValue, err = seqHasher.GetHash(clock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-0")

	// Add a second clock that hashes to the same value
	secondClock := base.NewSequenceClockImpl()
	secondClock.SetSequence(50, 100)
	secondClock.SetSequence(80, 20)
	secondClock.SetSequence(150, 150)
	secondClock.SetSequence(300, 256)
	hashValue, err = seqHasher.GetHash(secondClock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-1")

	// Simulate multiple processes requesting a hash for the same clock concurrently - ensures cas write checks
	// whether clock has already been added before writing
	var wg sync.WaitGroup
	for i := 0; i < 20; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			thirdClock := base.NewSequenceClockImpl()
			thirdClock.SetSequence(50, 100)
			thirdClock.SetSequence(80, 20)
			thirdClock.SetSequence(150, 150)
			thirdClock.SetSequence(300, 256)
			thirdClock.SetSequence(500, 256)
			value, err := seqHasher.GetHash(thirdClock)
			assertNoError(t, err, "Error getting hash")
			assert.Equals(t, value, "14-2")
		}()
	}
	wg.Wait()

	// Retrieve non-existent hash
	missingClock, err := seqHasher.GetClock("1234")
	assertTrue(t, err != nil, "Should return error for non-existent hash")
	assert.Equals(t, missingClock.GetSequence(50), uint64(0))
	assert.Equals(t, missingClock.GetSequence(80), uint64(0))
	assert.Equals(t, missingClock.GetSequence(150), uint64(0))
}