// Test backfill of late arriving sequences to the channel caches
func TestChannelCacheBackfill(t *testing.T) {

	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Simulate seq 3 being delayed - write 1,2,4,5
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC", "PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	// Test that retrieval isn't blocked by skipped sequences
	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	lastSeq := changes[len(changes)-1].Seq

	// Validate insert to various cache states
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
	WriteDirect(db, []string{"CBS"}, 7)
	db.changeCache.waitForSequence(7)
	// verify insert at start (PBS)
	pbsCache := db.changeCache.channelCaches["PBS"]
	assert.True(t, verifyCacheSequences(pbsCache, []uint64{3, 5, 6}))
	// verify insert at middle (ABC)
	abcCache := db.changeCache.channelCaches["ABC"]
	assert.True(t, verifyCacheSequences(abcCache, []uint64{1, 2, 3, 5, 6}))
	// verify insert at end (NBC)
	nbcCache := db.changeCache.channelCaches["NBC"]
	assert.True(t, verifyCacheSequences(nbcCache, []uint64{1, 3}))
	// verify insert to empty cache (TBS)
	tbsCache := db.changeCache.channelCaches["TBS"]
	assert.True(t, verifyCacheSequences(tbsCache, []uint64{3}))

	// verify changes has three entries (needs to resend all since previous LowSeq, which
	// will be the late arriver (3) along with 5, 6)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3, LowSeq: 3},
		ID:      "doc-3",
		Changes: []ChangeRev{{"rev": "1-a"}}})

}
Example #2
0
func TestIndexChangesAdminBackfill(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create docs on multiple channels:
	db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
	db.Put("doc0000609", Body{"channels": []string{"PBS"}})
	db.Put("doc0000799", Body{"channels": []string{"ABC"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 3)

	// Modify user to have access to both channels:
	log.Println("Get Principal")
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")
	time.Sleep(100 * time.Millisecond)

	// Write a few more docs (that should be returned as non-backfill)
	db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
	db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	log.Println("Get User")
	db.user, _ = authenticator.GetUser("naomi")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 5)
	verifyChange(t, changes, "both_1", true)
	verifyChange(t, changes, "doc0000609", true)
	verifyChange(t, changes, "doc_nobackfill_1", false)
	verifyChange(t, changes, "doc_nobackfill_2", false)

}
func TestChangeIndexChanges(t *testing.T) {
	base.EnableLogKey("DIndex+")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Write an entry to the bucket
	WriteDirectWithKey(db, "1c856b5724dcf4273c3993619900ce7f", []string{}, 1)

	time.Sleep(20 * time.Millisecond)
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 1)

	time.Sleep(20 * time.Millisecond)
	// Write a few more entries to the bucket
	WriteDirectWithKey(db, "12389b182ababd12fff662848edeb908", []string{}, 1)
	time.Sleep(20 * time.Millisecond)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 2)
}
// Test retrieval of skipped sequence using view.  Unit test catches panic, but we don't currently have a way
// to simulate an entry that makes it to the bucket (and so is accessible to the view), but doesn't show up on the TAP feed.
// Cache logging in this test validates that view retrieval is working because of TAP latency (item is in the bucket, but hasn't
// been seen on the TAP feed yet).  Longer term could consider enhancing leaky bucket to 'miss' the entry on the tap feed.
func TestSkippedViewRetrieval(t *testing.T) {
	base.LogKeys["Cache"] = true
	base.LogKeys["Cache+"] = true

	// Use leaky bucket to have the tap feed 'lose' document 3
	leakyConfig := base.LeakyBucketConfig{
		TapFeedMissingDocs: []string{"doc-3"},
	}
	db := setupTestLeakyDBWithCacheOptions(t, shortWaitCache(), leakyConfig)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Allow db to initialize and run initial CleanSkippedSequenceQueue
	time.Sleep(10 * time.Millisecond)

	// Write sequences direct
	WriteDirect(db, []string{"ABC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC"}, 3)

	// Artificially add 3 skipped, and back date skipped entry by 2 hours to trigger attempted view retrieval during Clean call
	db.changeCache.skippedSeqs.Push(&SkippedSequence{3, time.Now().Add(time.Duration(time.Hour * -2))})
	db.changeCache.skippedSeqs.Push(&SkippedSequence{5, time.Now().Add(time.Duration(time.Hour * -2))})
	db.changeCache.CleanSkippedSequenceQueue()

	// Validate that 3 is in the channel cache, 5 isn't
	entries, err := db.changeCache.GetChangesInChannel("ABC", ChangesOptions{Since: SequenceID{Seq: 2}})
	assertNoError(t, err, "Get Changes returned error")
	assertTrue(t, len(entries) == 1, "Incorrect number of entries returned")
	assert.Equals(t, entries[0].DocID, "doc-3")

}
Example #5
0
// Unit test for bug #673
func TestUpdatePrincipal(t *testing.T) {
	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Validate that a call to UpdatePrincipals with no changes to the user doesn't allocate a sequence
	userInfo, err := db.GetPrincipal("naomi", true)
	userInfo.ExplicitChannels = base.SetOf("ABC")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "Unable to update principal")

	nextSeq, err := db.sequences.nextSequence()
	assert.Equals(t, nextSeq, uint64(1))

	// Validate that a call to UpdatePrincipals with changes to the user does allocate a sequence
	userInfo, err = db.GetPrincipal("naomi", true)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "Unable to update principal")

	nextSeq, err = db.sequences.nextSequence()
	assert.Equals(t, nextSeq, uint64(3))
}
// Test notification when buffered entries are processed after a user doc arrives.
func TestChannelCacheBufferingWithUserDoc(t *testing.T) {

	base.EnableLogKey("Cache")
	base.EnableLogKey("Cache+")
	base.EnableLogKey("Changes")
	base.EnableLogKey("Changes+")
	db := setupTestDBWithCacheOptions(t, CacheOptions{})
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Simulate seq 1 (user doc) being delayed - write 2 first
	WriteDirect(db, []string{"ABC"}, 2)

	// Start wait for doc in ABC
	waiter := db.tapListener.NewWaiterWithChannels(channels.SetOf("ABC"), nil)

	successChan := make(chan bool)
	go func() {
		waiter.Wait()
		close(successChan)
	}()

	// Simulate a user doc update
	WriteUserDirect(db, "bernard", 1)

	// Wait 3 seconds for notification, else fail the test.
	select {
	case <-successChan:
		log.Println("notification successful")
	case <-time.After(time.Second * 3):
		assertFailed(t, "No notification after 3 seconds")
	}

}
// Test low sequence handling of late arriving sequences to a continuous changes feed, when the
// user doesn't have visibility to some of the late arriving sequences
func TestLowSequenceHandlingAcrossChannels(t *testing.T) {

	//base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 3)

	// Validate the initial sequences arrive as expected
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 3)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6"}))

	// Test backfill of sequence the user doesn't have visibility to
	WriteDirect(db, []string{"PBS"}, 3)
	WriteDirect(db, []string{"ABC"}, 9)

	db.changeCache.waitForSequenceWithMissing(9)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 1)
	assert.Equals(t, len(changes), 4)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6", "3::9"}))

	close(options.Terminator)
}
Example #8
0
func TestInvalidChannel(t *testing.T) {
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	db.ChannelMapper = channels.NewDefaultChannelMapper()

	body := Body{"channels": []string{"bad name"}}
	_, err := db.Put("doc", body)
	assertHTTPError(t, err, 500)
}
Example #9
0
// Unit test for bug #314
func TestChangesAfterChannelAdded(t *testing.T) {
	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	revid, _ := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 1}})
	assertNoError(t, err, "Couldn't GetChanges")

	assert.Equals(t, len(changes), 2)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 2},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[1], &ChangeEntry{
		Seq:     SequenceID{Seq: 2},
		ID:      "_user/naomi",
		Changes: []ChangeRev{}})

	// Add a new doc (sequence 3):
	revid, _ = db.Put("doc2", Body{"channels": []string{"PBS"}})

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 2}})
	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3},
		ID:      "doc2",
		Changes: []ChangeRev{{"rev": revid}}})
}
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultLongRunningContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on channel (ABC).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcChanges, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		log.Printf("Got %d changes", len(abcChanges))
		log.Println("Continuous completed")

	}()

	for i := 0; i < 10000; i++ {
		WriteDirectWithKey(db, fmt.Sprintf("docABC_%d", i), []string{"ABC"}, 3)
		time.Sleep(1 * time.Millisecond)
	}

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"ABC"}, 1)

	wg.Wait()

}
func TestPollResultReuseLongpoll(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a longpoll changes, use waitgroup to delay the test until it returns.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		assertTrue(t, err == nil, "Error parsing sequence ID")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true})
		assertTrue(t, err == nil, "Error getting changes")
		// Expects two changes - the nil that's sent on initial wait, and then docABC_2
		assert.Equals(t, len(abcHboChanges), 2)
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel ABC to notify the waiting longpoll
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 2)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect one poll hit (the longpoll request), and one miss (the basic changes request)
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "1")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

}
// Test size config
func TestChannelCacheSize(t *testing.T) {

	base.EnableLogKey("Cache")
	channelOptions := ChannelCacheOptions{
		ChannelCacheMinLength: 600,
		ChannelCacheMaxLength: 600,
	}
	options := CacheOptions{
		ChannelCacheOptions: channelOptions,
	}

	log.Printf("Options in test:%+v", options)
	db := setupTestDBWithCacheOptions(t, options)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Write 750 docs to channel ABC
	for i := 1; i <= 750; i++ {
		WriteDirect(db, []string{"ABC"}, uint64(i))
	}

	// Validate that retrieval returns expected sequences
	db.changeCache.waitForSequence(750)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 750)

	// Validate that cache stores the expected number of values
	changeCache, ok := db.changeCache.(*changeCache)
	assertTrue(t, ok, "Testing skipped sequences without a change cache")
	abcCache := changeCache.channelCaches["ABC"]
	assert.Equals(t, len(abcCache.logs), 600)
}
Example #13
0
func TestConflicts(t *testing.T) {
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	/*
		var logKeys = map[string]bool {
			"Cache": true,
			"Changes": true,
			"Changes+": true,
		}

		base.UpdateLogKeys(logKeys, true)
	*/

	// Create rev 1 of "doc":
	body := Body{"n": 1, "channels": []string{"all", "1"}}
	assertNoError(t, db.PutExistingRev("doc", body, []string{"1-a"}), "add 1-a")

	time.Sleep(50 * time.Millisecond) // Wait for tap feed to catch up

	log := db.GetChangeLog("all", 0)
	assert.Equals(t, len(log), 1)

	// Create two conflicting changes:
	body["n"] = 2
	body["channels"] = []string{"all", "2b"}
	assertNoError(t, db.PutExistingRev("doc", body, []string{"2-b", "1-a"}), "add 2-b")
	body["n"] = 3
	body["channels"] = []string{"all", "2a"}
	assertNoError(t, db.PutExistingRev("doc", body, []string{"2-a", "1-a"}), "add 2-a")

	time.Sleep(50 * time.Millisecond) // Wait for tap feed to catch up

	// Verify the change with the higher revid won:
	gotBody, err := db.Get("doc")
	assert.DeepEquals(t, gotBody, Body{"_id": "doc", "_rev": "2-b", "n": int64(2),
		"channels": []interface{}{"all", "2b"}})

	// Verify we can still get the other two revisions:
	gotBody, err = db.GetRev("doc", "1-a", false, nil)
	assert.DeepEquals(t, gotBody, Body{"_id": "doc", "_rev": "1-a", "n": 1,
		"channels": []string{"all", "1"}})
	gotBody, err = db.GetRev("doc", "2-a", false, nil)
	assert.DeepEquals(t, gotBody, Body{"_id": "doc", "_rev": "2-a", "n": 3,
		"channels": []string{"all", "2a"}})

	// Verify the change-log of the "all" channel:
	db.changeCache.waitForSequence(3)
	log = db.GetChangeLog("all", 0)
	assert.Equals(t, len(log), 1)
	assert.Equals(t, log[0].Sequence, uint64(3))
	assert.Equals(t, log[0].DocID, "doc")
	assert.Equals(t, log[0].RevID, "2-b")
	assert.Equals(t, log[0].Flags, uint8(channels.Hidden|channels.Branched|channels.Conflict))

	// Verify the _changes feed:
	options := ChangesOptions{
		Conflicts: true,
	}
	changes, err := db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:      SequenceID{Seq: 3},
		ID:       "doc",
		Changes:  []ChangeRev{{"rev": "2-b"}, {"rev": "2-a"}},
		branched: true})

	// Delete 2-b; verify this makes 2-a current:
	rev3, err := db.DeleteDoc("doc", "2-b")
	assertNoError(t, err, "delete 2-b")
	gotBody, err = db.Get("doc")
	assert.DeepEquals(t, gotBody, Body{"_id": "doc", "_rev": "2-a", "n": int64(3),
		"channels": []interface{}{"all", "2a"}})

	// Verify channel assignments are correct for channels defined by 2-a:
	doc, _ := db.GetDoc("doc")
	chan2a, found := doc.Channels["2a"]
	assert.True(t, found)
	assert.True(t, chan2a == nil)             // currently in 2a
	assert.True(t, doc.Channels["2b"] != nil) // has been removed from 2b

	// Verify the _changes feed:
	db.changeCache.waitForSequence(4)
	changes, err = db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:      SequenceID{Seq: 4},
		ID:       "doc",
		Changes:  []ChangeRev{{"rev": "2-a"}, {"rev": rev3}},
		branched: true})
}
Example #14
0
func TestAllDocs(t *testing.T) {
	// base.LogKeys["Cache"] = true
	// base.LogKeys["Changes"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	// Lower the log expiration time to zero so no more than 50 items will be kept.
	oldChannelCacheAge := DefaultChannelCacheAge
	DefaultChannelCacheAge = 0
	defer func() { DefaultChannelCacheAge = oldChannelCacheAge }()

	/*
		base.LogKeys["Changes"] = true
		base.LogKeys["Changes+"] = true
		defer func() {
			base.LogKeys["Changes"] = false
			base.LogKeys["Changes+"] = false
		}()
	*/

	db.ChannelMapper = channels.NewDefaultChannelMapper()

	ids := make([]AllDocsEntry, 100)
	for i := 0; i < 100; i++ {
		channels := []string{"all"}
		if i%10 == 0 {
			channels = append(channels, "KFJC")
		}
		body := Body{"serialnumber": i, "channels": channels}
		ids[i].DocID = fmt.Sprintf("alldoc-%02d", i)
		revid, err := db.Put(ids[i].DocID, body)
		ids[i].RevID = revid
		ids[i].Sequence = uint64(i + 1)
		ids[i].Channels = channels
		assertNoError(t, err, "Couldn't create document")
	}

	alldocs, err := allDocIDs(db)
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 100)
	for i, entry := range alldocs {
		assert.True(t, entry.Equal(ids[i]))
	}

	// Now delete one document and try again:
	_, err = db.DeleteDoc(ids[23].DocID, ids[23].RevID)
	assertNoError(t, err, "Couldn't delete doc 23")

	alldocs, err = allDocIDs(db)
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 99)
	for i, entry := range alldocs {
		j := i
		if i >= 23 {
			j++
		}
		assert.True(t, entry.Equal(ids[j]))
	}

	// Inspect the channel log to confirm that it's only got the last 50 sequences.
	// There are 101 sequences overall, so the 1st one it has should be #52.
	db.changeCache.waitForSequence(101)
	log := db.GetChangeLog("all", 0)
	assert.Equals(t, len(log), 50)
	assert.Equals(t, int(log[0].Sequence), 52)

	// Now check the changes feed:
	var options ChangesOptions
	options.Terminator = make(chan bool)
	defer close(options.Terminator)
	changes, err := db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 100)
	for i, change := range changes {
		seq := i + 1
		if i >= 23 {
			seq++
		}
		assert.Equals(t, change.Seq, SequenceID{Seq: uint64(seq)})
		assert.Equals(t, change.Deleted, i == 99)
		var removed base.Set
		if i == 99 {
			removed = channels.SetOf("all")
		}
		assert.DeepEquals(t, change.Removed, removed)
	}

	options.IncludeDocs = true
	changes, err = db.GetChanges(channels.SetOf("KFJC"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 10)
	for i, change := range changes {
		assert.Equals(t, change.Seq, SequenceID{Seq: uint64(10*i + 1)})
		assert.Equals(t, change.ID, ids[10*i].DocID)
		assert.Equals(t, change.Deleted, false)
		assert.DeepEquals(t, change.Removed, base.Set(nil))
		assert.Equals(t, change.Doc["serialnumber"], int64(10*i))
	}
}
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultReuseContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on a different channel (CBS).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		// Expect 2 entries + 3 nil entries (one per wait)
		assert.Equals(t, len(abcHboChanges), 5)
		for i := 0; i < len(abcHboChanges); i++ {
			log.Printf("Got change:%+v", abcHboChanges[i])
		}
		log.Println("Continuous completed")
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel HBO to shift the continuous since value ahead
	WriteDirectWithKey(db, "docHBO_1", []string{"HBO"}, 3)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	// Write an entry to channel ABC - last polled should be used
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 4)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"HBO"}, 1)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect two poll hits (docHBO_1, docABC_2), and one miss (the initial changes request)

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

	time.Sleep(100 * time.Millisecond)

	// Make a changes request prior to the last polled range, ensure it doesn't reuse polled results
	changes, err = db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "2")

}
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollingChangesFeed(t *testing.T) {
	//base.LogKeys["DIndex+"] = true
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)

	dbExpvars.Init()
	db.ChannelMapper = channels.NewDefaultChannelMapper()
	// Start a longpoll changes
	go func() {
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
		assertTrue(t, err == nil, "Error getting changes")
		// Expects two changes - the nil that's sent on initial wait, and then docABC_1
		for _, change := range abcHboChanges {
			log.Printf("abcHboChange:%v", change)
		}
		assert.Equals(t, len(abcHboChanges), 2)
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel ABC to notify the waiting longpoll
	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)

	// Start a continuous changes on a different channel (CBS).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		cbsChanges, err := db.GetChanges(base.SetOf("CBS"), ChangesOptions{Since: simpleClockSequence(0), Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		// Expect 15 entries + 16 nil entries (one per wait)
		assert.Equals(t, len(cbsChanges), 25)
		log.Println("Continuous completed")

	}()

	// Write another entry to channel ABC to start the clock for unread polls
	time.Sleep(1000 * time.Millisecond)
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 1)

	// Verify that the channel is initially in the polled set
	changeIndex, _ := db.changeCache.(*kvChangeIndex)
	assertTrue(t, changeIndex.reader.getChannelReader("ABC") != nil, "Channel reader should not be nil")
	log.Printf("changeIndex readers: %+v", changeIndex.reader.channelIndexReaders)

	// Send multiple docs to channels HBO, PBS, CBS.  Expected results:
	//   ABC - Longpoll has ended - should trigger "nobody listening" expiry of channel reader
	//   CBS - Active continuous feed - channel reader shouldn't expire
	//   PBS - No changes feeds have requested this channel - no channel reader should be created
	//   HBO - New longpoll started mid-way before poll limit reached, channel reader shouldn't expire
	time.Sleep(20 * time.Millisecond)
	for i := 0; i < 12; i++ {
		log.Printf("writing multiDoc_%d", i)
		WriteDirectWithKey(db, fmt.Sprintf("multiDoc_%d", i), []string{"PBS", "HBO", "CBS"}, 1)
		// Midway through, read from HBO
		if i == 9 {
			// wait for polling cycle
			time.Sleep(600 * time.Millisecond)
			hboChanges, err := db.GetChanges(base.SetOf("HBO"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
			assertTrue(t, err == nil, "Error getting changes")
			assert.Equals(t, len(hboChanges), 10)
		}
		time.Sleep(kPollFrequency * time.Millisecond)
	}

	// Verify that the changes feed has been started (avoids test race conditions where we close the terminator before
	// starting the changes feed
	for i := 0; i <= 40; i++ {
		channelChangesCount := getExpvarAsString(dbExpvars, "channelChangesFeeds")
		log.Printf("channelChangesCount:%s", channelChangesCount)
		if channelChangesCount != "" {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}

	close(continuousTerminator)
	log.Println("closed terminator")

	// Send another entry to continuous CBS feed in order to trigger the terminator check
	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"CBS"}, 1)

	time.Sleep(100 * time.Millisecond)
	// Validate that the ABC reader was deleted due to inactivity
	log.Printf("channel reader ABC:%+v", changeIndex.reader.getChannelReader("ABC"))
	assertTrue(t, changeIndex.reader.getChannelReader("ABC") == nil, "Channel reader should be nil")

	// Validate that the HBO reader is still present
	assertTrue(t, changeIndex.reader.getChannelReader("HBO") != nil, "Channel reader should be exist")

	// Start another longpoll changes for ABC, ensure it's successful (will return the existing 2 records, no wait)
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 2)

	// Repeat, verify use of existing channel reader
	changes, err = db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 2)

	wg.Wait()

}
Example #17
0
func TestIndexChangesAfterChannelAdded(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	_, err := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	assertNoError(t, err, "Put failed")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(250 * time.Millisecond)
	assert.Equals(t, len(changes), 2)
	verifyChange(t, changes, "_user/naomi", false)
	verifyChange(t, changes, "doc1", false)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	_, err = db.Put("doc2", Body{"channels": []string{"PBS"}})
	assertNoError(t, err, "Put failed")

	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	// changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: db.ParseSequenceID(lastSeq)})
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	printChanges(changes)
	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)

	verifyChange(t, changes, "doc2", false)

	// validate from zero
	log.Println("From zero:")
	//changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 1, TriggeredBy: 2}})
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}
// Test current fails intermittently on concurrent access to var changes.  Disabling for now - should be refactored.
func FailingTestChannelRace(t *testing.T) {

	base.LogKeys["Sequences"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channels "Odd", "Even"
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("Even", "Odd"))
	authenticator.Save(user)

	// Write initial sequences
	WriteDirect(db, []string{"Odd"}, 1)
	WriteDirect(db, []string{"Even"}, 2)
	WriteDirect(db, []string{"Odd"}, 3)

	db.changeCache.waitForSequence(3)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("Even", "Odd"), options)
	assert.True(t, err == nil)
	feedClosed := false

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)
	go func() {
		for feedClosed == false {
			select {
			case entry, ok := <-feed:
				if ok {
					// feed sends nil after each continuous iteration
					if entry != nil {
						log.Println("Changes entry:", entry.Seq)
						changes = append(changes, entry)
					}
				} else {
					log.Println("Closing feed")
					feedClosed = true
				}
			}
		}
	}()

	// Wait for processing of two channels (100 ms each)
	time.Sleep(250 * time.Millisecond)
	// Validate the initial sequences arrive as expected
	assert.Equals(t, len(changes), 3)

	// Send update to trigger the start of the next changes iteration
	WriteDirect(db, []string{"Even"}, 4)
	time.Sleep(150 * time.Millisecond)
	// After read of "Even" channel, but before read of "Odd" channel, send three new entries
	WriteDirect(db, []string{"Odd"}, 5)
	WriteDirect(db, []string{"Even"}, 6)
	WriteDirect(db, []string{"Odd"}, 7)

	time.Sleep(100 * time.Millisecond)

	// At this point we've haven't sent sequence 6, but the continuous changes feed has since=7

	// Write a few more to validate that we're not catching up on the missing '6' later
	WriteDirect(db, []string{"Even"}, 8)
	WriteDirect(db, []string{"Odd"}, 9)
	time.Sleep(750 * time.Millisecond)
	assert.Equals(t, len(changes), 9)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9"}))
	changesString := ""
	for _, change := range changes {
		changesString = fmt.Sprintf("%s%d, ", changesString, change.Seq.Seq)
	}
	fmt.Println("changes: ", changesString)

	close(options.Terminator)
}
// Test low sequence handling of late arriving sequences to a continuous changes feed, when the
// user gets added to a new channel with existing entries (and existing backfill)
func TestLowSequenceHandlingWithAccessGrant(t *testing.T) {

	base.LogKeys["Sequence"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)

	// Validate the initial sequences arrive as expected
	err = appendFromFeed(&changes, feed, 3)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 3)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6"}))

	db.Bucket.Incr("_sync:seq", 7, 0, 0)
	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	WriteDirect(db, []string{"PBS"}, 9)

	db.changeCache.waitForSequence(9)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 4)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 7)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6", "2:8:5", "2:8:6", "2::8", "2::9"}))
	// Notes:
	// 1. 2::8 is the user sequence
	// 2. The duplicate send of sequence '6' is the standard behaviour when a channel is added - we don't know
	// whether the user has already seen the documents on the channel previously, so it gets resent

	close(options.Terminator)
}
Example #20
0
func _testChangesAfterChannelAdded(t *testing.T, db *Database) {
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	revid, _ := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(1000 * time.Millisecond)
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{ // Seq 1, from ABC
		Seq:     SequenceID{Seq: 1},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[1], &ChangeEntry{ // Seq 1, from PBS backfill
		Seq:     SequenceID{Seq: 1, TriggeredBy: 2},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[2], &ChangeEntry{ // Seq 2, from ABC and PBS
		Seq:     SequenceID{Seq: 2},
		ID:      "_user/naomi",
		Changes: []ChangeRev{}})
	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	revid, _ = db.Put("doc2", Body{"channels": []string{"PBS"}})

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3},
		ID:      "doc2",
		Changes: []ChangeRev{{"rev": revid}}})

	// validate from zero
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}
// Test low sequence handling of late arriving sequences to a continuous changes feed
func TestLowSequenceHandling(t *testing.T) {

	//base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC", "PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Array to read changes from feed to support assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 4)

	// Validate the initial sequences arrive as expected
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	// Test backfill clear - sequence numbers go back to standard handling
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
	WriteDirect(db, []string{"ABC", "PBS"}, 4)

	db.changeCache.waitForSequenceWithMissing(4)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 2)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 6)
	assert.True(t, verifyChangesSequences(changes, []uint64{1, 2, 5, 6, 3, 4}))

	WriteDirect(db, []string{"ABC"}, 7)
	WriteDirect(db, []string{"ABC", "NBC"}, 8)
	WriteDirect(db, []string{"ABC", "PBS"}, 9)
	db.changeCache.waitForSequence(9)
	err = appendFromFeed(&changes, feed, 3)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 9)
	assert.True(t, verifyChangesSequences(changes, []uint64{1, 2, 5, 6, 3, 4, 7, 8, 9}))

	close(options.Terminator)
}
// Test backfill of late arriving sequences to a continuous changes feed
func TestContinuousChangesBackfill(t *testing.T) {

	base.LogKeys["Sequences"] = true
	base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "CBS"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"CBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Array to read changes from feed to support assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)

	// Validate the initial sequences arrive as expected
	err = appendFromFeed(&changes, feed, 4)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	WriteDirect(db, []string{"CBS"}, 3)
	WriteDirect(db, []string{"PBS"}, 12)

	db.changeCache.waitForSequence(12)
	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 2)

	assert.Equals(t, len(changes), 6)
	assert.True(t, verifyChangesFullSequences(changes, []string{
		"1", "2", "2::5", "2::6", "3", "3::12"}))
	// Test multiple backfill in single changes loop iteration
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 4)
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 7)
	WriteDirect(db, []string{"ABC", "PBS"}, 8)
	WriteDirect(db, []string{"ABC", "PBS"}, 13)
	db.changeCache.waitForSequence(13)
	time.Sleep(50 * time.Millisecond)

	err = appendFromFeed(&changes, feed, 4)
	assert.Equals(t, len(changes), 10)
	// We can't guarantee how compound sequences will be generated in a multi-core test - will
	// depend on timing of arrival in late sequence logs.  e.g. could come through as any one of
	// the following (where all are valid), depending on timing:
	//  ..."4","7","8","8::13"
	//  ..."4", "6::7", "6::8", "6::13"
	//  ..."3::4", "3::7", "3::8", "3::13"
	// For this reason, we're just verifying that the expected sequences come through (ignoring
	// prefix)
	assert.True(t, verifyChangesSequences(changes, []uint64{
		1, 2, 5, 6, 3, 12, 4, 7, 8, 13}))

	close(options.Terminator)
}