func (b *BitFlagStorage) writeBlockWithCas(block IndexBlock, entries []*LogEntry) error {

	// use an empty initial value to force writeCasRaw() to call GET first
	value := []byte{}

	casOut, err := base.WriteCasRaw(b.bucket, block.Key(), value, block.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {

		// Note: The following is invoked upon cas failure - may be called multiple times
		changeCacheExpvars.Add("writeSingleBlock-casRetryCount", 1)
		err = block.Unmarshal(value)
		for _, entry := range entries {
			err := block.AddEntry(entry)
			if err != nil { // Wrong block for this entry
				return nil, err
			}
		}
		return block.Marshal()
	})

	if err != nil {
		return err
	}
	block.SetCas(casOut)
	return nil

}
Esempio n. 2
0
func (k *kvChannelIndex) writeClockCas(updateClock base.SequenceClock) error {
	// Initial set, for the first cas update attempt
	k.clock.UpdateWithClock(updateClock)
	value, err := k.clock.Marshal()
	if err != nil {
		base.Warn("Error marshalling clock [%s] for update:%+v", base.PrintClock(k.clock), err)
		return err
	}
	casOut, err := base.WriteCasRaw(k.indexBucket, getChannelClockKey(k.channelName), value, k.clock.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		writeErr := k.clock.Unmarshal(value)
		if writeErr != nil {
			base.Warn("Error unmarshalling clock during update", writeErr)
			return nil, writeErr
		}
		k.clock.UpdateWithClock(updateClock)
		return k.clock.Marshal()
	})

	if err != nil {
		return err
	}

	k.clock.SetCas(casOut)
	return nil
}
Esempio n. 3
0
// Attempts to remove entries from the block
func (d *DenseBlock) RemoveEntrySet(entries []*LogEntry, bucket base.Bucket) (pendingRemoval []*LogEntry, err error) {

	pendingRemoval = d.removeEntries(entries)
	// If nothing was removed, don't update the block
	if len(pendingRemoval) == len(entries) {
		return entries, nil
	}

	casOut, writeErr := base.WriteCasRaw(bucket, d.Key, d.value, d.cas, 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		d.value = value
		d.clock = nil
		pendingRemoval = d.removeEntries(entries)

		// If nothing was removed, cancel the write
		if len(pendingRemoval) == len(entries) {
			return nil, nil
		}
		return d.value, nil
	})
	if writeErr != nil {
		base.LogTo("ChannelStorage+", "Error writing block to database. %v", err)
		return entries, writeErr
	}
	d.cas = casOut
	if len(pendingRemoval) != len(entries) {
		base.LogTo("ChannelStorage+", "Successfully removed set from block. key:[%s] #removed:[%d] #pending:[%d]",
			d.Key, len(entries)-len(pendingRemoval), len(pendingRemoval))
	}
	return pendingRemoval, nil
}
Esempio n. 4
0
// Adds entries to block and writes block to the bucket
func (d *DenseBlock) AddEntrySet(entries []*LogEntry, bucket base.Bucket) (overflow []*LogEntry, pendingRemoval []*LogEntry, updateClock PartitionClock, err error) {

	// Check if block is already full.  If so, return all entries as overflow.
	if len(d.value) > MaxBlockSize {
		base.LogTo("ChannelStorage+", "Block full - returning entries as overflow.  #entries:[%d]", len(entries))
		return entries, pendingRemoval, nil, nil
	}

	overflow, pendingRemoval, updateClock, addError := d.addEntries(entries)
	if addError != nil {
		// Error adding entries - reset the block and return error
		base.LogTo("ChannelStorage+", "Error adding entries to block. %v", err)
		d.loadBlock(bucket)
		return nil, nil, nil, addError
	}

	casOut, writeErr := base.WriteCasRaw(bucket, d.Key, d.value, d.cas, 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		d.value = value
		d.clock = nil
		// If block full, set overflow and cancel write
		if len(d.value) > MaxBlockSize {
			overflow = entries
			return nil, nil
		}
		overflow, pendingRemoval, updateClock, addError = d.addEntries(entries)
		if addError != nil {
			base.LogTo("ChannelStorage+", "Error adding entries to block: %v", addError)
			d.loadBlock(bucket)
			return nil, addError
		}
		return d.value, nil
	})
	if writeErr != nil {
		base.LogTo("ChannelStorage+", "Error writing block to database. %v", err)
		return entries, nil, nil, writeErr
	}
	d.cas = casOut
	base.LogTo("ChannelStorage+", "Successfully added set to block. key:[%s] #added:[%d] #overflow:[%d] #pendingRemoval:[%d]",
		d.Key, len(entries)-len(overflow), len(overflow), len(pendingRemoval))
	return overflow, pendingRemoval, updateClock, nil
}
Esempio n. 5
0
func (s *sequenceHasher) GetHash(clock base.SequenceClock) (string, error) {

	if clock == nil {
		return "", errors.New("Can't calculate hash for nil clock")
	}

	hashValue := s.calculateHash(clock)

	// Load stored clocks for this hash, to see if it's already been defined.
	// Note: getCacheValue and load are handled as separate operations to optimize locking.
	//   1. getCacheValue locks the cache, and retrieves the current cache entry (or creates a new empty entry if not found)
	//   2. cachedValue.load locks the entry, and loads from the DB if no previous entry is found
	cachedValue := s.getCacheValue(hashValue)
	cachedClocks, err := cachedValue.load(s.loadClocks)
	if err != nil {
		return "", err
	}

	// Check whether the cached clocks for the hash value match our clock
	exists, index := cachedClocks.Contains(clock.Value())
	if exists {
		seqHash := sequenceHash{
			hashValue:      hashValue,
			collisionIndex: uint16(index),
		}
		indexExpvars.Add("seqHash_getHash_hits", 1)
		return seqHash.String(), nil
	}

	// Didn't find a match in cache - update the index and the cache.  Get a write lock on the index value
	// first, to ensure only one goroutine on this SG attempts to write.  writeCas handling below handles
	// the case where other SGs are updating the value concurrently
	indexExpvars.Add("seqHash_getHash_misses", 1)

	// First copy the clock value, to ensure we store a non-mutable version in the cache
	clockValue := make([]uint64, len(clock.Value()))
	copy(clockValue, clock.Value())

	updateErr := func() error {
		cachedValue.lock.Lock()
		defer cachedValue.lock.Unlock()

		// If the number of cached clocks has changed, check whether someone else has added this clock
		// while we waited for the lock
		if len(cachedValue.clocks.Sequences) > len(cachedClocks.Sequences) {
			exists, index = cachedValue.clocks.Contains(clockValue)
			if exists {
				return nil
			}
		}

		// Add our clock to the cached clocks for this hash
		existingClocks := cachedValue.clocks
		existingClocks.Sequences = append(existingClocks.Sequences, clockValue)

		// Update the hash entry in the bucket
		key := kHashPrefix + strconv.FormatUint(hashValue, 10)
		initialValue, err := existingClocks.Marshal()
		index = len(existingClocks.Sequences) - 1
		if err != nil {
			return err
		}
		_, err = base.WriteCasRaw(s.bucket, key, initialValue, existingClocks.cas, int(s.hashExpiry), func(value []byte) (updatedValue []byte, err error) {
			// Note: The following is invoked upon cas failure - may be called multiple times
			base.LogTo("DIndex+", "CAS fail - reapplying changes for hash storage for key: %s", key)
			var sClocks storedClocks
			err = sClocks.Unmarshal(value)
			if err != nil {
				base.Warn("Error unmarshalling hash storage during update", err)
				return nil, err
			}
			exists, index = sClocks.Contains(clockValue)
			if exists {
				// return empty byte array to cancel the update
				return []byte{}, nil
			}
			// Not found - add
			sClocks.Sequences = append(sClocks.Sequences, clockValue)
			base.LogTo("DIndex+", "Reattempting stored hash write for key %s:", key)
			index = len(sClocks.Sequences) - 1
			return sClocks.Marshal()
		})
		return nil
	}()

	if updateErr != nil {
		return "", updateErr
	}

	indexExpvars.Add("writeCasRaw_hash", 1)

	if err != nil && err.Error() != "Already Exists" {
		return "", err
	}

	seqHash := &sequenceHash{
		hashValue:      hashValue,
		collisionIndex: uint16(index),
	}
	return seqHash.String(), nil
}