Example #1
0
func encodeChannelLog(log *channels.ChangeLog) []byte {
	if log == nil {
		return nil
	}
	raw := bytes.NewBuffer(make([]byte, 0, 50000))
	log.Encode(raw)
	return raw.Bytes()
}
Example #2
0
// Saves a channel log, _if_ there isn't already one in the database.
func (c *channelLogWriter) addChangeLog_(log *channels.ChangeLog) (added bool, err error) {
	added, err = c.bucket.AddRaw(channelLogDocID(c.channelName), 0, encodeChannelLog(log))
	if added {
		base.LogTo("ChannelLog", "Added missing channel-log %q with %d entries",
			c.channelName, log.Len())
	} else {
		base.LogTo("ChannelLog", "Didn't add channel-log %q with %d entries (err=%v)",
			c.channelName, log.Len())
	}
	return
}
Example #3
0
// Adds a new change to a channel log.
func (db *Database) AddToChangeLog(channelName string, entry channels.LogEntry, parentRevID string) error {
	if channelName == "*" && !EnableStarChannelLog {
		return nil
	}
	var fullUpdate bool
	var removedCount int
	fullUpdateAttempts := 0

	logDocID := channelLogDocID(channelName)
	err := db.Bucket.WriteUpdate(logDocID, 0, func(currentValue []byte) ([]byte, walrus.WriteOptions, error) {
		// (Be careful: this block can be invoked multiple times if there are races!)
		// Should I do a full update of the change log, removing older entries to limit its size?
		// This has to be done occasionaly, but it's slower than simply appending to it. This
		// test is a heuristic that seems to strike a good balance in practice:
		fullUpdate = AlwaysCompactChangeLog ||
			(len(currentValue) > 20000 && (rand.Intn(100) < len(currentValue)/5000))
		removedCount = 0

		if len(currentValue) == 0 {
			channelLog := channels.ChangeLog{}
			channelLog.Add(entry)
			return encodeChannelLog(&channelLog), walrus.Raw, nil
		}

		if fullUpdate {
			fullUpdateAttempts++
			var newValue bytes.Buffer
			removedCount = channels.TruncateEncodedChangeLog(bytes.NewReader(currentValue),
				MaxChangeLogLength-1, &newValue)
			if removedCount > 0 {
				entry.Encode(&newValue, parentRevID)
				return newValue.Bytes(), walrus.Raw, nil
			}
		}

		w := bytes.NewBuffer(make([]byte, 0, 50000))
		entry.Encode(w, parentRevID)
		currentValue = append(currentValue, w.Bytes()...)
		return currentValue, walrus.Raw, nil
	})

	/*if fullUpdate {
		base.Log("Removed %d entries from %q", removedCount, channelName)
	} else if fullUpdateAttempts > 0 {
		base.Log("Attempted to remove entries %d times but failed", fullUpdateAttempts)
	}*/
	return err
}
Example #4
0
// Returns a list of all the changes made on a channel.
// Does NOT handle the Wait option. Does NOT check authorization.
func (db *Database) changesFeed(channel string, options ChangesOptions) (<-chan *ChangeEntry, error) {
	since := options.Since[channel]
	channelLog, err := db.changesWriter.getChangeLog(channel, since)
	if err != nil {
		base.Warn("Error reading channel-log %q (using view instead): %v", channel, err)
		channelLog = nil
	}
	rebuildLog := channelLog == nil && err == nil && (EnableStarChannelLog || channel != "*")
	var log []*channels.LogEntry
	if channelLog != nil {
		log = channelLog.Entries
	}

	var viewFeed <-chan *ChangeEntry
	if channelLog == nil || channelLog.Since > since {
		var upToSeq uint64
		if channelLog != nil {
			upToSeq = channelLog.Since
		}
		// Channel log may not go back far enough, so also fetch view-based change feed:
		viewFeed, err = db.changesFeedFromView(channel, options, upToSeq)
		if err != nil {
			return nil, err
		}
	}

	feed := make(chan *ChangeEntry, 5)
	go func() {
		defer close(feed)

		// First, if we need to backfill from the view, write its early entries to the channel:
		if viewFeed != nil {
			newLog := channels.ChangeLog{Since: since}
			for change := range viewFeed {
				if channelLog != nil && change.seqNo > channelLog.Since {
					// TODO: Close the view-based feed somehow
					break
				}

				select {
				case <-options.Terminator:
					base.LogTo("Changes+", "Aborting changesFeed (reading from view)")
					return
				case feed <- change:
				}

				if rebuildLog {
					// If there wasn't any channel log, build up a new one from the view:
					entry := channels.LogEntry{
						Sequence: change.seqNo,
						DocID:    change.ID,
						RevID:    change.Changes[0]["rev"],
					}
					if change.Deleted {
						entry.Flags |= channels.Deleted
					}
					if change.Removed != nil {
						entry.Flags |= channels.Removed
					}
					newLog.Add(entry)
					newLog.TruncateTo(MaxChangeLogLength)
				}
			}

			if rebuildLog {
				// Save the missing channel log we just rebuilt:
				base.LogTo("Changes", "Saving rebuilt channel log %q with %d sequences",
					channel, len(newLog.Entries))
				db.changesWriter.addChangeLog(channel, &newLog)
			}
		}

		// Now write each log entry to the 'feed' channel in turn:
		for _, logEntry := range log {
			if !options.Conflicts && (logEntry.Flags&channels.Hidden) != 0 {
				//continue  // FIX: had to comment this out.
				// This entry is shadowed by a conflicting one. We would like to skip it.
				// The problem is that if this is the newest revision of this doc, then the
				// doc will appear under this sequence # in the changes view, which means
				// we won't emit the doc at all because we already stopped emitting entries
				// from the view before this point.
			}
			change := ChangeEntry{
				seqNo:   logEntry.Sequence,
				ID:      logEntry.DocID,
				Deleted: (logEntry.Flags & channels.Deleted) != 0,
				Changes: []ChangeRev{{"rev": logEntry.RevID}},
			}
			if logEntry.Flags&channels.Removed != 0 {
				change.Removed = channels.SetOf(channel)
			} else if options.IncludeDocs || options.Conflicts {
				doc, _ := db.GetDoc(logEntry.DocID)
				db.addDocToChangeEntry(doc, &change, options.IncludeDocs, false)
			}

			select {
			case <-options.Terminator:
				base.LogTo("Changes+", "Aborting changesFeed")
				return
			case feed <- &change:
			}

			if options.Limit > 0 {
				options.Limit--
				if options.Limit == 0 {
					break
				}
			}
		}
	}()
	return feed, nil
}
Example #5
0
// Writes new changes to my channel log document.
func (c *channelLogWriter) addToChangeLog_(entries []*changeEntry) {
	var err error
	dbExpvars.Add("channelLogAdds", 1)
	logDocID := channelLogDocID(c.channelName)

	// A fraction of the time we will do a full update and clean stuff out.
	fullUpdate := AlwaysCompactChangeLog || len(entries) > MaxChangeLogLength/2 || rand.Intn(MaxChangeLogLength/len(entries)) == 0
	if !fullUpdate {
		// Non-full update; just append the new entries:
		w := bytes.NewBuffer(make([]byte, 0, 100*len(entries)))
		for _, entry := range entries {
			entry.logEntry.Encode(w, entry.parentRevID)
		}
		data := w.Bytes()
		err = c.bucket.Append(logDocID, data)
		if err == nil {
			base.LogTo("ChannelLog", "Appended %d sequence(s) to %q", len(entries), c.channelName)
			dbExpvars.Add("channelLogAppends", 1)
		} else if base.IsDocNotFoundError(err) {
			// Append failed due to doc not existing, so fall back to full update
			err = nil
			fullUpdate = true
		} else {
			base.Warn("Error appending to %q -- %v", len(entries), c.channelName, err)
		}
	}

	if fullUpdate {
		// Full update: do a CAS-based read+write:
		fullUpdateAttempts := 0
		var oldChangeLogCount, newChangeLogCount int
		err = c.bucket.WriteUpdate(logDocID, 0, func(currentValue []byte) ([]byte, walrus.WriteOptions, error) {
			fullUpdateAttempts++
			numToKeep := MaxChangeLogLength - len(entries)
			if len(currentValue) == 0 || numToKeep <= 0 {
				// If log was missing or empty, or will be entirely overwritten, create a new one:
				entriesToWrite := entries
				if numToKeep < 0 {
					entriesToWrite = entries[-numToKeep:]
				}
				channelLog := channels.ChangeLog{}
				for _, entry := range entriesToWrite {
					channelLog.Add(*entry.logEntry)
				}
				newChangeLogCount = len(entriesToWrite)
				oldChangeLogCount = newChangeLogCount
				return encodeChannelLog(&channelLog), walrus.Raw, nil
			} else {
				// Append to an already existing change log:
				var newValue bytes.Buffer
				var nRemoved int
				nRemoved, newChangeLogCount = channels.TruncateEncodedChangeLog(
					bytes.NewReader(currentValue), numToKeep, numToKeep/2, &newValue)
				for _, entry := range entries {
					entry.logEntry.Encode(&newValue, entry.parentRevID)
				}
				oldChangeLogCount = nRemoved + newChangeLogCount
				newChangeLogCount += len(entries)
				return newValue.Bytes(), walrus.Raw, nil
			}
		})
		if err == nil {
			dbExpvars.Add("channelLogRewrites", 1)
			dbExpvars.Add("channelLogRewriteCollisions", int64(fullUpdateAttempts-1))
			base.LogTo("ChannelLog", "Wrote %d sequences (was %d now %d) to %q in %d attempts",
				len(entries), oldChangeLogCount, newChangeLogCount, c.channelName, fullUpdateAttempts)
		} else {
			base.Warn("Error writing %d sequence(s) to %q -- %v", len(entries), c.channelName, err)
		}
	}
}
Example #6
0
// Returns a list of all the changes made on a channel.
// Does NOT handle the Wait option. Does NOT check authorization.
func (db *Database) changesFeed(channel string, options ChangesOptions) (<-chan *ChangeEntry, error) {
	since := options.Since[channel]
	channelLog, err := db.GetChangeLog(channel, since)
	if err != nil {
		base.Warn("Error reading channel-log %q (using view instead) %v", channel, err)
		channelLog = nil
	}
	rebuildLog := channelLog == nil && err == nil && (EnableStarChannelLog || channel != "*")
	var log []*channels.LogEntry
	if channelLog != nil {
		log = channelLog.Entries
	}

	var viewFeed <-chan *ChangeEntry
	if channelLog == nil || channelLog.Since > since {
		// Channel log may not go back far enough, so also fetch view-based change feed:
		viewFeed, err = db.changesFeedFromView(channel, options)
		if err != nil {
			return nil, err
		}
	}

	feed := make(chan *ChangeEntry, 5)
	go func() {
		defer close(feed)

		// First, if we need to backfill from the view, write its early entries to the channel:
		if viewFeed != nil {
			newLog := channels.ChangeLog{Since: since}
			for change := range viewFeed {
				if len(log) > 0 && change.seqNo >= log[0].Sequence {
					// TODO: Close the view-based feed somehow
					break
				}
				feed <- change
				if rebuildLog {
					// If there wasn't any channel log, build up a new one from the view:
					entry := channels.LogEntry{
						Sequence: change.seqNo,
						DocID:    change.ID,
						RevID:    change.Changes[0]["rev"],
					}
					if change.Deleted {
						entry.Flags |= channels.Deleted
					}
					if change.Removed != nil {
						entry.Flags |= channels.Removed
					}
					newLog.Add(entry)
					newLog.TruncateTo(MaxChangeLogLength)
				}
			}

			if rebuildLog {
				// Save the missing channel log we just rebuilt:
				base.LogTo("Changes", "Saving rebuilt channel log %q with %d sequences",
					channel, len(newLog.Entries))
				if _, err := db.AddChangeLog(channel, &newLog); err != nil {
					base.Warn("ChangesFeed: AddChangeLog failed, %v", err)
				}
			}
		}

		// Now write each log entry to the 'feed' channel in turn:
		for _, logEntry := range log {
			hidden := (logEntry.Flags & channels.Hidden) != 0
			if logEntry.RevID == "" || (hidden && !options.Conflicts) {
				continue
			}
			change := ChangeEntry{
				seqNo:   logEntry.Sequence,
				ID:      logEntry.DocID,
				Deleted: (logEntry.Flags & channels.Deleted) != 0,
				Changes: []ChangeRev{{"rev": logEntry.RevID}},
			}
			if logEntry.Flags&channels.Removed != 0 {
				change.Removed = channels.SetOf(channel)
			} else if options.IncludeDocs || options.Conflicts {
				doc, _ := db.getDoc(logEntry.DocID)
				db.addDocToChangeEntry(doc, &change, options.IncludeDocs, false)
			}
			feed <- &change

			if options.Limit > 0 {
				options.Limit--
				if options.Limit == 0 {
					break
				}
			}
		}
	}()
	return feed, nil
}