Ejemplo n.º 1
0
// Saves a new local revision to the external bucket.
func (s *Shadower) PushRevision(doc *document) {
	defer func() { atomic.AddUint64(&s.pushCount, 1) }()
	if !s.docIDMatches(doc.ID) {
		return
	} else if doc.newestRevID() == doc.UpstreamRev {
		return // This revision was pulled from the external bucket, so don't push it back!
	}

	var err error
	if doc.Flags&channels.Deleted != 0 {
		base.LogTo("Shadow", "Pushing %q, rev %q [deletion]", doc.ID, doc.CurrentRev)
		err = s.bucket.Delete(doc.ID)
	} else {
		base.LogTo("Shadow", "Pushing %q, rev %q", doc.ID, doc.CurrentRev)
		body := doc.getRevision(doc.CurrentRev)
		if body == nil {
			base.Warn("Can't get rev %q.%q to push to external bucket", doc.ID, doc.CurrentRev)
			return
		}
		err = s.bucket.Set(doc.ID, 0, body)
	}
	if err != nil {
		base.Warn("Error pushing rev of %q to external bucket: %v", doc.ID, err)
	}
}
Ejemplo n.º 2
0
// Adds a set of log entries to a block.  Returns:
//  overflow        Entries that didn't fit in the block
//  pendingRemoval  Entries with a parent that needs to be removed from the index,
//                  but the parent isn't in this block
func (d *DenseBlock) addEntries(entries []*LogEntry) (overflow []*LogEntry, pendingRemoval []*LogEntry, updateClock PartitionClock, err error) {

	blockFull := false
	partitionClock := make(PartitionClock)
	for i, entry := range entries {
		if !blockFull {
			removalRequired, err := d.addEntry(entry)
			base.LogTo("ChannelStorage+", "Adding entry to block.  key:[%s] block:[%s] vb.seq:[%d.%d]", entry.DocID, d.Key, entry.VbNo, entry.Sequence)
			if err != nil {
				base.LogTo("ChannelStorage+", "Error adding entry to block.  key:[%s] error:%v", entry.DocID, err)
				return nil, nil, nil, err
			}
			partitionClock.SetSequence(entry.VbNo, entry.Sequence)
			if removalRequired {
				if pendingRemoval == nil {
					pendingRemoval = make([]*LogEntry, 0)
				}
				pendingRemoval = append(pendingRemoval, entry)
			}
			if len(d.value) > MaxBlockSize {
				blockFull = true
			}
		} else {
			overflow = entries[i:]
			break
		}

	}
	return overflow, pendingRemoval, partitionClock, nil
}
Ejemplo n.º 3
0
// Attempts to remove entries from the block
func (d *DenseBlock) RemoveEntrySet(entries []*LogEntry, bucket base.Bucket) (pendingRemoval []*LogEntry, err error) {

	pendingRemoval = d.removeEntries(entries)
	// If nothing was removed, don't update the block
	if len(pendingRemoval) == len(entries) {
		return entries, nil
	}

	casOut, writeErr := base.WriteCasRaw(bucket, d.Key, d.value, d.cas, 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		d.value = value
		d.clock = nil
		pendingRemoval = d.removeEntries(entries)

		// If nothing was removed, cancel the write
		if len(pendingRemoval) == len(entries) {
			return nil, nil
		}
		return d.value, nil
	})
	if writeErr != nil {
		base.LogTo("ChannelStorage+", "Error writing block to database. %v", err)
		return entries, writeErr
	}
	d.cas = casOut
	if len(pendingRemoval) != len(entries) {
		base.LogTo("ChannelStorage+", "Successfully removed set from block. key:[%s] #removed:[%d] #pending:[%d]",
			d.Key, len(entries)-len(pendingRemoval), len(pendingRemoval))
	}
	return pendingRemoval, nil
}
Ejemplo n.º 4
0
func (dc *DatabaseContext) TakeDbOffline(reason string) error {
	base.LogTo("CRUD", "Taking Database : %v, offline", dc.Name)
	dbState := atomic.LoadUint32(&dc.State)
	//If the DB is already trasitioning to: offline or is offline silently return
	if dbState == DBOffline || dbState == DBResyncing || dbState == DBStopping {
		return nil
	}

	if atomic.CompareAndSwapUint32(&dc.State, DBOnline, DBStopping) {

		//notify all active _changes feeds to close
		close(dc.ExitChanges)

		base.LogTo("CRUD", "Waiting for all active calls to complete on Database : %v", dc.Name)
		//Block until all current calls have returned, including _changes feeds
		dc.AccessLock.Lock()
		defer dc.AccessLock.Unlock()

		base.LogTo("CRUD", "Database : %v, is offline", dc.Name)
		//set DB state to Offline
		atomic.StoreUint32(&dc.State, DBOffline)

		if dc.EventMgr.HasHandlerForEvent(DBStateChange) {
			dc.EventMgr.RaiseDBStateChangeEvent(dc.Name, "offline", reason, *dc.Options.AdminInterface)
		}

		return nil
	} else {
		base.LogTo("CRUD", "Unable to take Database offline, database must be in Online state")
		return base.HTTPErrorf(http.StatusServiceUnavailable, "Unable to take Database offline, database must be in Online state")
	}
}
func (k *kvChangeIndexReader) getOrCreateReader(channelName string, options ChangesOptions) (*KvChannelIndex, error) {

	// For continuous or longpoll processing, use the shared reader from the channelindexReaders map to coordinate
	// polling.
	if options.Wait {
		var err error
		index := k.getChannelReader(channelName)
		if index == nil {
			index, err = k.newChannelReader(channelName)
			IndexExpvars.Add("getOrCreateReader_create", 1)
			base.LogTo("DIndex+", "getOrCreateReader: Created new reader for channel %s", channelName)
		} else {
			IndexExpvars.Add("getOrCreateReader_get", 1)
			base.LogTo("DIndex+", "getOrCreateReader: Using existing reader for channel %s", channelName)
		}
		return index, err
	} else {
		// For non-continuous/non-longpoll, use a one-off reader, no onChange handling.
		indexPartitions, err := k.indexPartitionsCallback()
		if err != nil {
			return nil, err
		}
		return NewKvChannelIndex(channelName, k.indexReadBucket, indexPartitions, nil), nil

	}
}
Ejemplo n.º 6
0
// Main loop that pulls changes from the external bucket. (Runs in its own goroutine.)
func (s *Shadower) readTapFeed() {
	vbucketsFilling := 0
	for event := range s.tapFeed.Events() {
		switch event.Opcode {
		case sgbucket.TapBeginBackfill:
			if vbucketsFilling == 0 {
				base.LogTo("Shadow", "Reading history of external bucket")
			}
			vbucketsFilling++
			//base.LogTo("Shadow", "Reading history of external bucket")
		case sgbucket.TapMutation, sgbucket.TapDeletion:
			key := string(event.Key)
			if !s.docIDMatches(key) {
				break
			}
			isDeletion := event.Opcode == sgbucket.TapDeletion
			if !isDeletion && event.Expiry > 0 {
				break // ignore ephemeral documents
			}
			err := s.pullDocument(key, event.Value, isDeletion, event.Sequence, event.Flags)
			if err != nil {
				base.Warn("Error applying change from external bucket: %v", err)
			}
			atomic.AddUint64(&s.pullCount, 1)
		case sgbucket.TapEndBackfill:
			if vbucketsFilling--; vbucketsFilling == 0 {
				base.LogTo("Shadow", "Caught up with history of external bucket")
			}
		}
	}
	base.LogTo("Shadow", "End of tap feed(?)")
}
Ejemplo n.º 7
0
// Given a newly changed document (received from the tap feed), adds change entries to channels.
// The JSON must be the raw document from the bucket, with the metadata and all.
func (c *changeCache) DocChanged(docID string, docJSON []byte) {
	entryTime := time.Now()
	// ** This method does not directly access any state of c, so it doesn't lock.
	go func() {
		// Is this a user/role doc?
		if strings.HasPrefix(docID, auth.UserKeyPrefix) {
			c.processPrincipalDoc(docID, docJSON, true)
			return
		} else if strings.HasPrefix(docID, auth.RoleKeyPrefix) {
			c.processPrincipalDoc(docID, docJSON, false)
			return
		}

		// First unmarshal the doc (just its metadata, to save time/memory):
		doc, err := unmarshalDocumentSyncData(docJSON, false)
		if err != nil || !doc.hasValidSyncData() {
			base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
			return
		}

		if doc.Sequence <= c.initialSequence {
			return // Tap is sending us an old value from before I started up; ignore it
		}

		// Record a histogram of the Tap feed's lag:
		tapLag := time.Since(doc.TimeSaved) - time.Since(entryTime)
		lagMs := int(tapLag/(100*time.Millisecond)) * 100
		changeCacheExpvars.Add(fmt.Sprintf("lag-tap-%04dms", lagMs), 1)

		// If the doc update wasted any sequences due to conflicts, add empty entries for them:
		for _, seq := range doc.UnusedSequences {
			base.LogTo("Cache", "Received unused #%d for (%q / %q)", seq, docID, doc.CurrentRev)
			change := &LogEntry{
				Sequence:     seq,
				TimeReceived: time.Now(),
			}
			c.processEntry(change)
		}

		// Now add the entry for the new doc revision:
		change := &LogEntry{
			Sequence:     doc.Sequence,
			DocID:        docID,
			RevID:        doc.CurrentRev,
			Flags:        doc.Flags,
			TimeReceived: time.Now(),
			TimeSaved:    doc.TimeSaved,
			Channels:     doc.Channels,
		}
		base.LogTo("Cache", "Received #%d after %3dms (%q / %q)", change.Sequence, int(tapLag/time.Millisecond), change.DocID, change.RevID)

		changedChannels := c.processEntry(change)
		if c.onChange != nil && len(changedChannels) > 0 {
			c.onChange(changedChannels)
		}
	}()
}
Ejemplo n.º 8
0
// Prepends an array of entries to this one, skipping ones that I already have.
// The new array needs to overlap with my current log, i.e. must contain the same sequence as
// c.logs[0], otherwise nothing will be added because the method can't confirm that there are no
// missing sequences in between.
// Returns the number of entries actually prepended.
func (c *channelCache) prependChanges(changes LogEntries, changesValidFrom uint64, openEnded bool) int {
	c.lock.Lock()
	defer c.lock.Unlock()

	log := c.logs
	if len(log) == 0 {
		// If my cache is empty, just copy the new changes:
		if len(changes) > 0 {
			if !openEnded && changes[len(changes)-1].Sequence < c.validFrom {
				return 0 // changes might not go all the way to the current time
			}
			if excess := len(changes) - c.options.channelCacheMaxLength; excess > 0 {
				changes = changes[excess:]
				changesValidFrom = changes[0].Sequence
			}
			c.logs = make(LogEntries, len(changes))
			copy(c.logs, changes)
			base.LogTo("Cache", "  Initialized cache of %q with %d entries from view (#%d--#%d)",
				c.channelName, len(changes), changes[0].Sequence, changes[len(changes)-1].Sequence)
		}
		c.validFrom = changesValidFrom
		c.addDocIDs(changes)
		return len(changes)

	} else if len(changes) == 0 {
		if openEnded && changesValidFrom < c.validFrom {
			c.validFrom = changesValidFrom
		}
		return 0

	} else {
		// Look for an overlap, and prepend everything up to that point:
		firstSequence := log[0].Sequence
		if changes[0].Sequence <= firstSequence {
			for i := len(changes) - 1; i >= 0; i-- {
				if changes[i].Sequence == firstSequence {
					if excess := i + len(log) - c.options.channelCacheMaxLength; excess > 0 {
						changes = changes[excess:]
						changesValidFrom = changes[0].Sequence
						i -= excess
					}
					if i > 0 {
						newLog := make(LogEntries, 0, i+len(log))
						newLog = append(newLog, changes[0:i]...)
						newLog = append(newLog, log...)
						c.logs = newLog
						base.LogTo("Cache", "  Added %d entries from view (#%d--#%d) to cache of %q",
							i, changes[0].Sequence, changes[i-1].Sequence, c.channelName)
					}
					c.validFrom = changesValidFrom
					return i
				}
			}
		}
		return 0
	}
}
Ejemplo n.º 9
0
// Gets an external document and applies it as a new revision to the managed document.
func (s *Shadower) pullDocument(key string, value []byte, isDeletion bool, cas uint64, flags uint32) error {
	var body Body
	if isDeletion {
		body = Body{"_deleted": true}
	} else {
		if err := json.Unmarshal(value, &body); err != nil {
			base.LogTo("Shadow", "Doc %q is not JSON; skipping", key)
			return nil
		}
	}

	db, _ := CreateDatabase(s.context)
	expiry, err := body.getExpiry()
	if err != nil {
		return base.HTTPErrorf(http.StatusBadRequest, "Invalid expiry: %v", err)
	}
	_, err = db.updateDoc(key, false, expiry, func(doc *document) (Body, AttachmentData, error) {
		// (Be careful: this block can be invoked multiple times if there are races!)
		if doc.UpstreamCAS != nil && *doc.UpstreamCAS == cas {
			return nil, nil, couchbase.UpdateCancel // we already have this doc revision
		}
		base.LogTo("Shadow+", "Pulling %q, CAS=%x ... have UpstreamRev=%q, UpstreamCAS=%x", key, cas, doc.UpstreamRev, doc.UpstreamCAS)

		// Compare this body to the current revision body to see if it's an echo:
		parentRev := doc.UpstreamRev
		newRev := doc.CurrentRev
		if !reflect.DeepEqual(body, doc.getRevision(newRev)) {
			// Nope, it's not. Assign it a new rev ID
			generation, _ := parseRevID(parentRev)
			newRev = createRevID(generation+1, parentRev, body)
		}
		doc.UpstreamRev = newRev
		doc.UpstreamCAS = &cas
		body["_rev"] = newRev
		if doc.History[newRev] == nil {
			// It's a new rev, so add it to the history:
			if parentRev != "" && !doc.History.contains(parentRev) {
				// parent rev does not exist in the doc history
				// set parentRev to "", this will create a  new conflicting
				//branch in the revtree
				base.Warn("Shadow: Adding revision as conflict branch, parent id %q is missing", parentRev)
				parentRev = ""
			}
			doc.History.addRevision(RevInfo{ID: newRev, Parent: parentRev, Deleted: isDeletion})
			base.LogTo("Shadow", "Pulling %q, CAS=%x --> rev %q", key, cas, newRev)
		} else {
			// We already have this rev; but don't cancel, because we do need to update the
			// doc's UpstreamRev/UpstreamCAS fields.
			base.LogTo("Shadow+", "Not pulling %q, CAS=%x (echo of rev %q)", key, cas, newRev)
		}
		return body, nil, nil
	})
	if err == couchbase.UpdateCancel {
		err = nil
	}
	return err
}
Ejemplo n.º 10
0
// Looks up the raw JSON data of a revision that's been archived to a separate doc.
// If the revision isn't found (e.g. has been deleted by compaction) returns 404 error.
func (db *DatabaseContext) getOldRevisionJSON(docid string, revid string) ([]byte, error) {
	data, _, err := db.Bucket.GetRaw(oldRevisionKey(docid, revid))
	if base.IsDocNotFoundError(err) {
		base.LogTo("CRUD+", "No old revision %q / %q", docid, revid)
		err = base.HTTPErrorf(404, "missing")
	}
	if data != nil {
		base.LogTo("CRUD+", "Got old revision %q / %q --> %d bytes", docid, revid, len(data))
	}
	return data, err
}
Ejemplo n.º 11
0
// Take a DB online, first reload the DB config
func (h *handler) handleDbOnline() error {
	h.assertAdminOnly()
	dbState := atomic.LoadUint32(&h.db.State)
	//If the DB is already trasitioning to: online or is online silently return
	if dbState == db.DBOnline || dbState == db.DBStarting {
		return nil
	}

	//If the DB is currently re-syncing return an error asking the user to retry later
	if dbState == db.DBResyncing {
		return base.HTTPErrorf(http.StatusServiceUnavailable, "Database _resync is in progress, this may take some time, try again later")
	}

	body, err := h.readBody()
	if err != nil {
		return err
	}

	var input struct {
		Delay int `json:"delay"`
	}

	input.Delay = kDefaultDBOnlineDelay

	json.Unmarshal(body, &input)

	base.LogTo("CRUD", "Taking Database : %v, online in %v seconds", h.db.Name, input.Delay)

	timer := time.NewTimer(time.Duration(input.Delay) * time.Second)
	go func() {
		<-timer.C

		//Take a write lock on the Database context, so that we can cycle the underlying Database
		// without any other call running concurrently
		h.db.AccessLock.Lock()
		defer h.db.AccessLock.Unlock()

		//We can only transition to Online from Offline state
		if atomic.CompareAndSwapUint32(&h.db.State, db.DBOffline, db.DBStarting) {

			if _, err := h.server.ReloadDatabaseFromConfig(h.db.Name, true); err != nil {
				base.LogError(err)
				return
			}

			//Set DB state to DBOnline, this wil cause new API requests to be be accepted
			atomic.StoreUint32(&h.server.databases_[h.db.Name].State, db.DBOnline)
		} else {
			base.LogTo("CRUD", "Unable to take Database : %v, online after %v seconds, database must be in Offline state", h.db.Name, input.Delay)
		}
	}()

	return nil
}
Ejemplo n.º 12
0
// Handles a newly-arrived LogEntry.
func (c *changeCache) processEntry(change *LogEntry) base.Set {
	c.lock.Lock()
	defer c.lock.Unlock()

	if c.logsDisabled {
		return nil
	}

	sequence := change.Sequence
	nextSequence := c.nextSequence
	if _, found := c.receivedSeqs[sequence]; found {
		base.LogTo("Cache+", "  Ignoring duplicate of #%d", sequence)
		return nil
	}
	c.receivedSeqs[sequence] = struct{}{}
	// FIX: c.receivedSeqs grows monotonically. Need a way to remove old sequences.

	var changedChannels base.Set
	if sequence == nextSequence || nextSequence == 0 {
		// This is the expected next sequence so we can add it now:
		changedChannels = c._addToCache(change)
		// Also add any pending sequences that are now contiguous:
		changedChannels = changedChannels.Union(c._addPendingLogs())
	} else if sequence > nextSequence {
		// There's a missing sequence (or several), so put this one on ice until it arrives:
		heap.Push(&c.pendingLogs, change)
		numPending := len(c.pendingLogs)
		base.LogTo("Cache", "  Deferring #%d (%d now waiting for #%d...#%d)",
			sequence, numPending, nextSequence, c.pendingLogs[0].Sequence-1)
		changeCacheExpvars.Get("maxPending").(*base.IntMax).SetIfMax(int64(numPending))
		if numPending > c.options.CachePendingSeqMaxNum {
			// Too many pending; add the oldest one:
			changedChannels = c._addPendingLogs()
		}
	} else if sequence > c.initialSequence {
		// Out-of-order sequence received!
		// Remove from skipped sequence queue
		if !c.WasSkipped(sequence) {
			// Error removing from skipped sequences
			base.LogTo("Cache", "  Received unexpected out-of-order change - not in skippedSeqs (seq %d, expecting %d) doc %q / %q", sequence, nextSequence, change.DocID, change.RevID)
		} else {
			base.LogTo("Cache", "  Received previously skipped out-of-order change (seq %d, expecting %d) doc %q / %q ", sequence, nextSequence, change.DocID, change.RevID)
			change.Skipped = true
		}

		changedChannels = c._addToCache(change)
		// Add to cache before removing from skipped, to ensure lowSequence doesn't get incremented until results are available
		// in cache
		c.RemoveSkipped(sequence)
	}
	return changedChannels
}
Ejemplo n.º 13
0
/*
 * This is not part of the OAuth 2.0 spec, it is used to handle the
 * user credentials entered in the login form
 * Authenticate the user credentials POST'd from the Web form
 * against the db users
 * Return an OAuth 2.0 Authorization Response
 */
func (h *handler) handleOidcTestProviderAuthenticate() error {
	if !h.db.DatabaseContext.Options.UnsupportedOptions.OidcTestProvider.Enabled {
		return base.HTTPErrorf(http.StatusForbidden, "OIDC test provider is not enabled")
	}

	requestParams := h.rq.URL.Query()
	username := h.rq.FormValue("username")
	tokenttl, err := strconv.Atoi(h.rq.FormValue("tokenttl"))
	if err != nil {
		tokenttl = defaultIdTokenTTL
	}

	tokenDuration := time.Duration(tokenttl) * time.Second

	authenticated := h.rq.FormValue("authenticated")

	redirect_uri := requestParams.Get("redirect_uri")

	base.LogTo("OIDC+", "handleOidcTestProviderAuthenticate() called.  username: %s authenticated: %s", username, authenticated)

	if username == "" || authenticated == "" {
		base.LogTo("OIDC+", "user did not enter valid credentials -- username or authenticated is empty")
		error := "?error=invalid_request&error_description=User failed authentication"
		h.setHeader("Location", requestParams.Get("redirect_uri")+error)
		h.response.WriteHeader(http.StatusFound)
		return nil

	}

	scope := requestParams.Get("scope")
	scopeMap := scopeStringToMap(scope)

	//Generate the return code by base64 encoding the username
	code := base64.StdEncoding.EncodeToString([]byte(username))

	authCodeTokenMap[username] = AuthState{CallbackURL: redirect_uri, TokenTTL: tokenDuration, Scopes: scopeMap}

	location_url, err := url.Parse(redirect_uri)
	if err != nil {
		return err
	}
	query := location_url.Query()
	query.Set("code", code)
	query.Set("state", "af0ifjsldkj")
	location_url.RawQuery = query.Encode()
	h.setHeader("Location", location_url.String())
	h.response.WriteHeader(http.StatusFound)

	return nil

}
// Creates a new block, and adds to the block list
func (l *DenseBlockList) AddBlock() (*DenseBlock, error) {

	// Mark previous block inactive
	if l.activeBlock != nil {
		l.activeBlock.MarkInactive()
	}

	nextIndex := l.generateNextBlockIndex()
	var nextStartClock PartitionClock
	if l.activeBlock == nil {
		// No previous active block - new block list
		nextStartClock = make(PartitionClock)
	} else {
		// Determine index and startclock from previous active block
		nextStartClock = l.activeBlock.getCumulativeClock()
	}

	base.LogTo("ChannelStorage+", "Adding block to list. channel:[%s] partition:[%d] index:[%d]", l.channelName, l.partition, nextIndex)

	nextBlockKey := l.generateBlockKey(nextIndex)
	block := NewDenseBlock(nextBlockKey, nextStartClock)

	// Add the new block to the list
	listEntry := DenseBlockListEntry{
		BlockIndex: nextIndex,
		StartClock: nextStartClock,
	}
	l.blocks = append(l.blocks, listEntry)
	// Do a CAS-safe write of the active list
	value, err := l.marshalActive()
	if err != nil {
		return nil, err
	}

	casOut, err := l.indexBucket.WriteCas(l.activeKey, 0, 0, l.activeCas, value, sgbucket.Raw)
	if err != nil {
		// CAS error.  If there's a concurrent writer for this partition, assume they have created the new block.
		//  Re-initialize the current block list, and get the active block key from there.
		l.initDenseBlockList()
		if len(l.blocks) == 0 {
			return nil, fmt.Errorf("Unable to determine active block after DenseBlockList cas write failure")
		}
		latestEntry := l.blocks[len(l.blocks)-1]
		return NewDenseBlock(l.generateBlockKey(latestEntry.BlockIndex), latestEntry.StartClock), nil
	}
	l.activeCas = casOut
	l.activeBlock = block
	base.LogTo("ChannelStorage+", "Successfully added block to list. channel:[%s] partition:[%d] index:[%d]", l.channelName, l.partition, nextIndex)

	return block, nil
}
Ejemplo n.º 15
0
// Creates a Go-channel of all the changes made on a channel.
// Does NOT handle the Wait option. Does NOT check authorization.
func (db *Database) changesFeed(channel string, options ChangesOptions) (<-chan *ChangeEntry, error) {
	dbExpvars.Add("channelChangesFeeds", 1)
	log, err := db.changeCache.GetChanges(channel, options)
	base.LogTo("DIndex+", "[changesFeed] Found %d changes for channel %s", len(log), channel)
	if err != nil {
		return nil, err
	}

	if len(log) == 0 {
		// There are no entries newer than 'since'. Return an empty feed:
		feed := make(chan *ChangeEntry)
		close(feed)
		return feed, nil
	}

	feed := make(chan *ChangeEntry, 1)
	go func() {
		defer close(feed)
		// Now write each log entry to the 'feed' channel in turn:
		for _, logEntry := range log {
			if !options.Conflicts && (logEntry.Flags&channels.Hidden) != 0 {
				//continue  // FIX: had to comment this out.
				// This entry is shadowed by a conflicting one. We would like to skip it.
				// The problem is that if this is the newest revision of this doc, then the
				// doc will appear under this sequence # in the changes view, which means
				// we won't emit the doc at all because we already stopped emitting entries
				// from the view before this point.
			}
			if logEntry.Sequence >= options.Since.TriggeredBy {
				options.Since.TriggeredBy = 0
			}
			seqID := SequenceID{
				Seq:         logEntry.Sequence,
				TriggeredBy: options.Since.TriggeredBy,
			}

			change := makeChangeEntry(logEntry, seqID, channel)

			base.LogTo("Changes+", "Sending seq:%v from channel %s", seqID, channel)
			select {
			case <-options.Terminator:
				base.LogTo("Changes+", "Aborting changesFeed")
				return
			case feed <- &change:
			}
		}
	}()
	return feed, nil
}
func (k *kvChangeIndexReader) GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) {

	var sinceClock base.SequenceClock
	if options.Since.Clock == nil {
		// If there's no since clock, we may be in backfill for another channel - revert to the triggered by clock.
		if options.Since.TriggeredByClock != nil {
			sinceClock = options.Since.TriggeredByClock
		} else {
			sinceClock = base.NewSequenceClockImpl()
		}
	} else {
		sinceClock = options.Since.Clock
	}

	reader, err := k.getOrCreateReader(channelName, options)
	if err != nil {
		base.Warn("Error obtaining channel reader (need partition index?) for channel %s", channelName)
		return nil, err
	}
	changes, err := reader.getChanges(sinceClock)
	if err != nil {
		base.LogTo("DIndex+", "No clock found for channel %s, assuming no entries in index", channelName)
		return nil, nil
	}

	// Limit handling
	if options.Limit > 0 && len(changes) > options.Limit {
		limitResult := make([]*LogEntry, options.Limit)
		copy(limitResult[0:], changes[0:])
		return limitResult, nil
	}

	return changes, nil
}
Ejemplo n.º 17
0
func (c *changeCache) processPrincipalDoc(docID string, docJSON []byte, isUser bool) {
	// Currently the cache isn't really doing much with user docs; mostly it needs to know about
	// them because they have sequence numbers, so without them the sequence of sequences would
	// have gaps in it, causing later sequences to get stuck in the queue.
	princ, err := c.unmarshalPrincipal(docJSON, isUser)
	if princ == nil {
		base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
		return
	}
	sequence := princ.Sequence()
	c.lock.RLock()
	initialSequence := c.initialSequence
	c.lock.RUnlock()
	if sequence <= initialSequence {
		return // Tap is sending us an old value from before I started up; ignore it
	}

	// Now add the (somewhat fictitious) entry:
	change := &LogEntry{
		Sequence:     sequence,
		TimeReceived: time.Now(),
	}
	if isUser {
		change.DocID = "_user/" + princ.Name()
	} else {
		change.DocID = "_role/" + princ.Name()
	}

	base.LogTo("Cache", "Received #%d (%q)", change.Sequence, change.DocID)

	changedChannels := c.processEntry(change)
	if c.onChange != nil && len(changedChannels) > 0 {
		c.onChange(changedChannels)
	}
}
Ejemplo n.º 18
0
func (db *Database) setOldRevisionJSON(docid string, revid string, body []byte) error {
	base.LogTo("CRUD+", "Saving old revision %q / %q (%d bytes)", docid, revid, len(body))

	// Set old revisions to expire after 5 minutes.  Future enhancement to make this a config
	// setting might be appropriate.
	return db.Bucket.SetRaw(oldRevisionKey(docid, revid), 300, body)
}
Ejemplo n.º 19
0
// Moves a revision's ancestor's body out of the document object and into a separate db doc.
func (db *Database) backupAncestorRevs(doc *document, revid string) error {
	// Find an ancestor that still has JSON in the document:
	var json []byte
	for {
		if revid = doc.History.getParent(revid); revid == "" {
			return nil // No ancestors with JSON found
		} else if json = doc.getRevisionJSON(revid); json != nil {
			break
		}
	}

	// Store the JSON as a separate doc in the bucket:
	if err := db.setOldRevisionJSON(doc.ID, revid, json); err != nil {
		// This isn't fatal since we haven't lost any information; just warn about it.
		base.Warn("backupAncestorRevs failed: doc=%q rev=%q err=%v", doc.ID, revid, err)
		return err
	}

	// Nil out the rev's body in the document struct:
	if revid == doc.CurrentRev {
		doc.body = nil
	} else {
		doc.History.setRevisionBody(revid, nil)
	}
	base.LogTo("CRUD+", "Backed up obsolete rev %q/%q", doc.ID, revid)
	return nil
}
Ejemplo n.º 20
0
// Updates a document's channel/role UserAccessMap with new access settings from an AccessMap.
// Returns an array of the user/role names whose access has changed as a result.
func (accessMap *UserAccessMap) updateAccess(doc *document, newAccess channels.AccessMap) (changedUsers []string) {
	// Update users already appearing in doc.Access:
	for name, access := range *accessMap {
		if access.UpdateAtSequence(newAccess[name], doc.Sequence) {
			if len(access) == 0 {
				delete(*accessMap, name)
			}
			changedUsers = append(changedUsers, name)
		}
	}
	// Add new users who are in newAccess but not accessMap:
	for name, access := range newAccess {
		if _, existed := (*accessMap)[name]; !existed {
			if *accessMap == nil {
				*accessMap = UserAccessMap{}
			}
			(*accessMap)[name] = channels.AtSequence(access, doc.Sequence)
			changedUsers = append(changedUsers, name)
		}
	}
	if changedUsers != nil {
		what := "channel"
		if accessMap == &doc.RoleAccess {
			what = "role"
		}
		base.LogTo("Access", "Doc %q grants %s access: %v", doc.ID, what, *accessMap)
	}
	return changedUsers
}
Ejemplo n.º 21
0
// Starts the listener queue for the event manager
func (em *EventManager) Start(maxProcesses uint, waitTime int) {

	if maxProcesses == 0 {
		maxProcesses = kMaxActiveEvents
	}
	if waitTime < 0 {
		em.waitTime = kEventWaitTime
	} else {
		em.waitTime = waitTime
	}

	base.LogTo("Events", "Starting event manager with max processes:%d, wait time:%d ms", maxProcesses, em.waitTime)
	// activeCountChannel limits the number of concurrent events being processed
	em.activeCountChannel = make(chan bool, maxProcesses)

	// asyncEventChannel stores the incoming events.  It's set to 3x activeCountChannel, to
	// handle temporary spikes in event inflow
	em.asyncEventChannel = make(chan Event, 3*maxProcesses)

	// Start the event channel worker go routine, which will work the event queue and spawn goroutines to process the
	// event.  Blocks if the activeCountChannel is full, to prevent spawning more than cap(activeCountChannel)
	// goroutines.
	go func() {
		for event := range em.asyncEventChannel {
			em.activeCountChannel <- true
			go em.ProcessEvent(event)
		}
	}()

}
Ejemplo n.º 22
0
// HTTP handler for _dumpchannel
func (h *handler) handleDumpChannel() error {
	channelName := h.PathVar("channel")
	since := h.getIntQuery("since", 0)
	base.LogTo("HTTP", "Dump channel %q", channelName)

	chanLog := h.db.GetChangeLog(channelName, since)
	if chanLog == nil {
		return base.HTTPErrorf(http.StatusNotFound, "no such channel")
	}
	title := fmt.Sprintf("/%s: “%s” Channel", html.EscapeString(h.db.Name), html.EscapeString(channelName))
	h.setHeader("Content-Type", `text/html; charset="UTF-8"`)
	h.response.Write([]byte(fmt.Sprintf(
		`<!DOCTYPE html><html><head><title>%s</title></head><body>
		<h1>%s</h1><code>
		<p>Since = %d</p>
		<table border=1>
		`,
		title, title, chanLog[0].Sequence-1)))
	h.response.Write([]byte("\t<tr><th>Seq</th><th>Doc</th><th>Rev</th><th>Flags</th></tr>\n"))
	for _, entry := range chanLog {
		h.response.Write([]byte(fmt.Sprintf("\t<tr><td>%d</td><td>%s</td><td>%s</td><td>%08b</td>",
			entry.Sequence,
			html.EscapeString(entry.DocID), html.EscapeString(entry.RevID), entry.Flags)))
		h.response.Write([]byte("</tr>\n"))
	}
	h.response.Write([]byte("</table>\n</code></html></body>"))
	return nil
}
Ejemplo n.º 23
0
// HTTP handler for _dump
func (h *handler) handleDump() error {
	viewName := h.PathVar("view")
	base.LogTo("HTTP", "Dump view %q", viewName)
	opts := db.Body{"stale": false, "reduce": false}
	result, err := h.db.Bucket.View(db.DesignDocSyncGateway, viewName, opts)
	if err != nil {
		return err
	}
	title := fmt.Sprintf("/%s: “%s” View", html.EscapeString(h.db.Name), html.EscapeString(viewName))
	h.setHeader("Content-Type", `text/html; charset="UTF-8"`)
	h.response.Write([]byte(fmt.Sprintf(
		`<!DOCTYPE html><html><head><title>%s</title></head><body>
		<h1>%s</h1><code>
		<table border=1>
		`,
		title, title)))
	h.response.Write([]byte("\t<tr><th>Key</th><th>Value</th><th>ID</th></tr>\n"))
	for _, row := range result.Rows {
		key, _ := json.Marshal(row.Key)
		value, _ := json.Marshal(row.Value)
		h.response.Write([]byte(fmt.Sprintf("\t<tr><td>%s</td><td>%s</td><td><em>%s</em></td>",
			html.EscapeString(string(key)), html.EscapeString(string(value)), html.EscapeString(row.ID))))
		h.response.Write([]byte("</tr>\n"))
	}
	h.response.Write([]byte("</table>\n</code></html></body>"))
	return nil
}
Ejemplo n.º 24
0
// Saves the information for a user/role.
func (auth *Authenticator) Save(p Principal) error {
	if err := p.validate(); err != nil {
		return err
	}

	data, err := json.Marshal(p)
	if err != nil {
		return err
	}
	if err := auth.bucket.SetRaw(p.DocID(), 0, data); err != nil {
		return err
	}
	if user, ok := p.(User); ok {
		if user.Email() != "" {
			info := userByEmailInfo{user.Name()}
			if err := auth.bucket.Set(docIDForUserEmail(user.Email()), 0, info); err != nil {
				return err
			}
			//FIX: Fail if email address is already registered to another user
			//FIX: Unregister old email address if any
		}
	}
	base.LogTo("Auth", "Saved %s: %s", p.DocID(), data)
	return nil
}
Ejemplo n.º 25
0
func (auth *Authenticator) rebuildChannels(princ Principal) error {
	channels := princ.ExplicitChannels().Copy()

	// Changes for vbucket sequence management.  We can't determine relative ordering of sequences
	// across vbuckets.  To avoid redundant channel backfills during changes processing, we maintain
	// the previous vb/seq for a channel in PreviousChannels.  If that channel is still present during
	// this rebuild, we reuse the vb/seq from PreviousChannels (using UpdateIfPresent).  If PreviousChannels
	// is nil, reverts to normal sequence handling.

	previousChannels := princ.PreviousChannels().Copy()
	if previousChannels != nil {
		channels.UpdateIfPresent(previousChannels)
	}

	if auth.channelComputer != nil {
		viewChannels, err := auth.channelComputer.ComputeChannelsForPrincipal(princ)
		if err != nil {
			base.Warn("channelComputer.ComputeChannelsForPrincipal failed on %v: %v", princ, err)
			return err
		}
		if previousChannels != nil {
			viewChannels.UpdateIfPresent(previousChannels)
		}
		channels.Add(viewChannels)
	}
	// always grant access to the public document channel
	channels.AddChannel(ch.DocumentStarChannel, 1)

	base.LogTo("Access", "Computed channels for %q: %s", princ.Name(), channels)
	princ.SetPreviousChannels(nil)
	princ.setChannels(channels)

	return nil

}
Ejemplo n.º 26
0
func (db *Database) MultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error) {
	if len(chans) == 0 {
		return nil, nil
	}

	if (options.Continuous || options.Wait) && options.Terminator == nil {
		base.Warn("MultiChangesFeed: Terminator missing for Continuous/Wait mode")
	}
	if db.SequenceType == IntSequenceType {
		base.LogTo("Changes+", "Int sequence multi changes feed...")
		return db.SimpleMultiChangesFeed(chans, options)
	} else {
		base.LogTo("Changes+", "Vector multi changes feed...")
		return db.VectorMultiChangesFeed(chans, options)
	}
}
Ejemplo n.º 27
0
func (h *handler) handleExpvar() error {
	base.LogTo("HTTP", "debuggin'")
	grTracker.recordSnapshot()
	h.rq.URL.Path = strings.Replace(h.rq.URL.Path, kDebugURLPathPrefix, "/debug/vars", 1)
	http.DefaultServeMux.ServeHTTP(h.response, h.rq)
	return nil
}
Ejemplo n.º 28
0
// Updates the Channels property of a document object with current & past channels.
// Returns the set of channels that have changed (document joined or left in this revision)
func (doc *document) updateChannels(newChannels base.Set) (changedChannels base.Set) {
	var changed []string
	oldChannels := doc.Channels
	if oldChannels == nil {
		oldChannels = channels.ChannelMap{}
		doc.Channels = oldChannels
	} else {
		// Mark every no-longer-current channel as unsubscribed:
		curSequence := doc.Sequence
		for channel, removal := range oldChannels {
			if removal == nil && !newChannels.Contains(channel) {
				oldChannels[channel] = &channels.ChannelRemoval{
					Seq:     curSequence,
					RevID:   doc.CurrentRev,
					Deleted: doc.hasFlag(channels.Deleted)}
				changed = append(changed, channel)
			}
		}
	}

	// Mark every current channel as subscribed:
	for channel, _ := range newChannels {
		if value, exists := oldChannels[channel]; value != nil || !exists {
			oldChannels[channel] = nil
			changed = append(changed, channel)
		}
	}
	if changed != nil {
		base.LogTo("CRUD", "\tDoc %q in channels %q", doc.ID, newChannels)
		changedChannels = channels.SetOf(changed...)
	}
	return
}
Ejemplo n.º 29
0
// Adds a set
func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (clockUpdates base.SequenceClock, err error) {

	// Update the sequences in the appropriate cache block
	if len(entries) == 0 {
		return clockUpdates, nil
	}

	// The set of updates may be distributed over multiple partitions and blocks.
	// To support this, iterate over the set, and define groups of sequences by block
	// TODO: this approach feels like it's generating a lot of GC work.  Considered an iterative
	//       approach where a set update returned a list of entries that weren't targeted at the
	//       same block as the first entry in the list, but this would force sequential
	//       processing of the blocks.  Might be worth revisiting if we see high GC overhead.
	blockSets := make(BlockSet)
	clockUpdates = base.NewSequenceClockImpl()
	for _, entry := range entries {
		// Update the sequence in the appropriate cache block
		base.LogTo("DIndex+", "Add to channel index [%s], vbNo=%d, isRemoval:%v", b.channelName, entry.VbNo, entry.isRemoved())
		blockKey := GenerateBlockKey(b.channelName, entry.Sequence, b.partitions.VbMap[entry.VbNo])
		if _, ok := blockSets[blockKey]; !ok {
			blockSets[blockKey] = make([]*LogEntry, 0)
		}
		blockSets[blockKey] = append(blockSets[blockKey], entry)
		clockUpdates.SetMaxSequence(entry.VbNo, entry.Sequence)
	}

	err = b.writeBlockSetsWithCas(blockSets)
	if err != nil {
		base.Warn("Error writing blockSets with cas for block %s: %+v", blockSets, err)
	}

	return clockUpdates, err
}
Ejemplo n.º 30
0
// Wait until the change-cache has caught up with the latest writes to the database.
func (context *DatabaseContext) WaitForSequenceWithMissing(sequence uint64) (err error) {
	base.LogTo("Debug", "Waiting for sequence: %d", sequence)
	if err == nil {
		context.changeCache.waitForSequenceWithMissing(sequence)
	}
	return
}