Example #1
0
// Remove entry from request log and compact
func (this *RequestLog) ditch(id string) errors.Error {
	cacheNum := util.HashString(id, _CACHES)
	this.locks[cacheNum].Lock()
	defer this.locks[cacheNum].Unlock()
	logMap, ok := this.requestMaps[cacheNum][id]
	if ok {
		delete(this.requestMaps[cacheNum], id)
		l := len(this.requestMaps[cacheNum])

		// Nature abhors a vacuum!
		// We copy the last map entry onto the currently empty entry.
		// This is a quick way to keep the two caches in sync without
		// reallocating memory and / or copying huge quantities of data,
		// but it does mean that for scans occurring during deletes,
		// later entries might be skipped.
		// Given the fact that deletes will likely purge huge numbers of
		// entries, copying subslices on a per deleted row basis has the
		// potential for a significant bottleneck.
		// Given the huge benefit in lock contention and memory usage, we
		// are willing to take that risk.
		if logMap.logIdx < l {
			this.requestCaches[cacheNum][logMap.logIdx] = this.requestCaches[cacheNum][l]
			newMap := this.requestMaps[cacheNum][this.requestCaches[cacheNum][l].RequestId]
			newMap.logIdx = logMap.logIdx
		}
		return nil
	} else {
		return errors.NewSystemStmtNotFoundError(nil, id)
	}
}
func (b *activeRequestsKeyspace) Delete(deletes []string) ([]string, errors.Error) {
	for i, name := range deletes {
		done := server.ActiveRequestsDelete(name)

		// save memory allocations by making a new slice only on errors
		if !done {
			deleted := make([]string, i)
			if i > 0 {
				copy(deleted, deletes[0:i-1])
			}
			return deleted, errors.NewSystemStmtNotFoundError(nil, name)
		}
	}
	return deletes, nil
}