示例#1
0
func TestIndexChangesAdminBackfill(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create docs on multiple channels:
	db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
	db.Put("doc0000609", Body{"channels": []string{"PBS"}})
	db.Put("doc0000799", Body{"channels": []string{"ABC"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 3)

	// Modify user to have access to both channels:
	log.Println("Get Principal")
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")
	time.Sleep(100 * time.Millisecond)

	// Write a few more docs (that should be returned as non-backfill)
	db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
	db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	log.Println("Get User")
	db.user, _ = authenticator.GetUser("naomi")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 5)
	verifyChange(t, changes, "both_1", true)
	verifyChange(t, changes, "doc0000609", true)
	verifyChange(t, changes, "doc_nobackfill_1", false)
	verifyChange(t, changes, "doc_nobackfill_2", false)

}
// Test notification when buffered entries are processed after a user doc arrives.
func TestChannelCacheBufferingWithUserDoc(t *testing.T) {

	base.EnableLogKey("Cache")
	base.EnableLogKey("Cache+")
	base.EnableLogKey("Changes")
	base.EnableLogKey("Changes+")
	db := setupTestDBWithCacheOptions(t, CacheOptions{})
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Simulate seq 1 (user doc) being delayed - write 2 first
	WriteDirect(db, []string{"ABC"}, 2)

	// Start wait for doc in ABC
	waiter := db.tapListener.NewWaiterWithChannels(channels.SetOf("ABC"), nil)

	successChan := make(chan bool)
	go func() {
		waiter.Wait()
		close(successChan)
	}()

	// Simulate a user doc update
	WriteUserDirect(db, "bernard", 1)

	// Wait 3 seconds for notification, else fail the test.
	select {
	case <-successChan:
		log.Println("notification successful")
	case <-time.After(time.Second * 3):
		assertFailed(t, "No notification after 3 seconds")
	}

}
// Test backfill of late arriving sequences to the channel caches
func TestChannelCacheBackfill(t *testing.T) {

	base.EnableLogKey("Cache")
	base.EnableLogKey("Changes+")
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Simulate seq 3 being delayed - write 1,2,4,5
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC", "PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	// Test that retrieval isn't blocked by skipped sequences
	db.changeCache.waitForSequenceID(SequenceID{Seq: 6})
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	lastSeq := changes[len(changes)-1].Seq

	// Validate insert to various cache states
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
	WriteDirect(db, []string{"CBS"}, 7)
	db.changeCache.waitForSequenceID(SequenceID{Seq: 7})
	// verify insert at start (PBS)
	pbsCache := db.changeCache.getChannelCache("PBS")
	assert.True(t, verifyCacheSequences(pbsCache, []uint64{3, 5, 6}))
	// verify insert at middle (ABC)
	abcCache := db.changeCache.getChannelCache("ABC")
	assert.True(t, verifyCacheSequences(abcCache, []uint64{1, 2, 3, 5, 6}))
	// verify insert at end (NBC)
	nbcCache := db.changeCache.getChannelCache("NBC")
	assert.True(t, verifyCacheSequences(nbcCache, []uint64{1, 3}))
	// verify insert to empty cache (TBS)
	tbsCache := db.changeCache.getChannelCache("TBS")
	assert.True(t, verifyCacheSequences(tbsCache, []uint64{3}))

	// verify changes has three entries (needs to resend all since previous LowSeq, which
	// will be the late arriver (3) along with 5, 6)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3, LowSeq: 3},
		ID:      "doc-3",
		Changes: []ChangeRev{{"rev": "1-a"}}})

}
func TestDenseBlockMultipleUpdates(t *testing.T) {
	base.EnableLogKey("ChannelStorage")
	base.EnableLogKey("ChannelStorage+")
	indexBucket := testIndexBucket()
	defer indexBucket.Close()

	block := NewDenseBlock("block1", nil)

	// Inserts
	entries := make([]*LogEntry, 10)
	for i := 0; i < 10; i++ {
		vbno := 10*i + 1
		sequence := i + 1
		entries[i] = makeBlockEntry(fmt.Sprintf("doc%d", i), "1-abc", vbno, sequence, IsNotRemoval, IsAdded)
	}
	overflow, pendingRemoval, updateClock, err := block.AddEntrySet(entries, indexBucket)
	assertNoError(t, err, "Error adding entry set")
	assert.Equals(t, len(overflow), 0)
	assert.Equals(t, len(pendingRemoval), 0)
	assert.Equals(t, block.getEntryCount(), uint16(10))

	foundEntries := block.GetAllEntries()
	assert.Equals(t, len(foundEntries), 10)
	for i := 0; i < 10; i++ {
		vbno := 10*i + 1
		sequence := i + 1
		assertLogEntry(t, foundEntries[i], fmt.Sprintf("doc%d", i), "1-abc", vbno, sequence)
		assert.Equals(t, updateClock.GetSequence(uint16(i*10+1)), uint64(i+1))

	}

	// Updates
	entries = make([]*LogEntry, 10)
	for i := 0; i < 10; i++ {
		vbno := 10*i + 1
		sequence := i + 21
		entries[i] = makeBlockEntry(fmt.Sprintf("doc%d", i), "2-abc", vbno, sequence, IsNotRemoval, IsNotAdded)
		entries[i].PrevSequence = uint64(i + 1)
	}
	overflow, pendingRemoval, updateClock, err = block.AddEntrySet(entries, indexBucket)
	assertNoError(t, err, "Error adding entry set")
	assert.Equals(t, len(overflow), 0)
	assert.Equals(t, len(pendingRemoval), 0)
	assert.Equals(t, int(block.getEntryCount()), 10)

	foundEntries = block.GetAllEntries()
	assert.Equals(t, len(foundEntries), 10)
	for i := 0; i < 10; i++ {
		assertLogEntry(t, foundEntries[i], fmt.Sprintf("doc%d", i), "2-abc", 10*i+1, 21+i)
		assert.Equals(t, updateClock.GetSequence(uint16(i*10+1)), uint64(i+21))
	}

}
func TestChangeIndexChanges(t *testing.T) {
	base.EnableLogKey("DIndex+")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Write an entry to the bucket
	WriteDirectWithKey(db, "1c856b5724dcf4273c3993619900ce7f", []string{}, 1)

	time.Sleep(20 * time.Millisecond)
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 1)

	time.Sleep(20 * time.Millisecond)
	// Write a few more entries to the bucket
	WriteDirectWithKey(db, "12389b182ababd12fff662848edeb908", []string{}, 1)
	time.Sleep(20 * time.Millisecond)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 2)
}
func TestChangeIndexAddSet(t *testing.T) {

	base.EnableLogKey("DIndex+")
	changeIndex, bucket := testKvChangeIndex("indexBucket")
	defer changeIndex.Stop()

	entries := make([]*LogEntry, 1000)
	for vb := 0; vb < 1000; vb++ {
		entries[vb] = channelEntry(uint16(vb), 1, fmt.Sprintf("foo%d", vb), "1-a", []string{"ABC"})
	}

	indexPartitions := testPartitionMap()
	channelStorage := NewChannelStorage(bucket, "", indexPartitions)
	changeIndex.writer.indexEntries(entries, indexPartitions.VbMap, channelStorage)

	// wait for add to complete
	time.Sleep(50 * time.Millisecond)

	// Verify channel clocks
	channelClock := base.SequenceClockImpl{}
	chanClockBytes, _, err := bucket.GetRaw(getChannelClockKey("ABC"))
	err = channelClock.Unmarshal(chanClockBytes)
	assertNoError(t, err, "Unmarshal channel clock sequence")

	starChannelClock := base.SequenceClockImpl{}
	chanClockBytes, _, err = bucket.GetRaw(getChannelClockKey("*"))
	err = starChannelClock.Unmarshal(chanClockBytes)

	for vb := uint16(0); vb < 1000; vb++ {
		assert.Equals(t, channelClock.GetSequence(vb), uint64(1))
		assert.Equals(t, starChannelClock.GetSequence(vb), uint64(1))
	}
}
func TestLateSequenceAsFirst(t *testing.T) {

	base.EnableLogKey("Cache")
	context := testBucketContext()
	cache := newChannelCache(context, "Test1", 0)

	// Add some entries to cache
	cache.addToCache(e(5, "doc1", "1-a"), false)
	cache.addToCache(e(10, "doc2", "2-a"), false)
	cache.addToCache(e(15, "doc3", "3-a"), false)

	entries, err := cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 3)
	assert.True(t, verifyChannelSequences(entries, []uint64{5, 10, 15}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc2", "doc3"}))
	assert.True(t, err == nil)

	// Add a late-arriving sequence
	cache.AddLateSequence(e(3, "doc0", "0-a"))
	cache.addToCache(e(3, "doc0", "0-a"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{3, 5, 10, 15}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc0", "doc1", "doc2", "doc3"}))
	assert.True(t, err == nil)

}
func TestChangeIndexAddEntry(t *testing.T) {

	base.EnableLogKey("DIndex+")
	changeIndex, bucket := testKvChangeIndex("indexBucket")
	defer changeIndex.Stop()
	changeIndex.writer.addToCache(channelEntry(1, 1, "foo1", "1-a", []string{"ABC", "CBS"}))

	// wait for add
	time.Sleep(50 * time.Millisecond)

	// Verify entry
	var entry LogEntry
	entryBytes, _, err := bucket.GetRaw("_idx_entry:1:1")
	assert.True(t, err == nil)
	json.Unmarshal(entryBytes, &entry)
	assert.Equals(t, entry.DocID, "foo1")
	assert.Equals(t, entry.Sequence, uint64(1))
	assert.Equals(t, entry.RevID, "1-a")

	// Verify Channel Index Block
	partitions, err := changeIndex.getIndexPartitions()
	assertNoError(t, err, "Get index partitions")
	block := NewIndexBlock("ABC", 1, 1, partitions)
	blockBytes, _, err := bucket.GetRaw(getIndexBlockKey("ABC", 0, 0))
	bucket.Dump()
	err = block.Unmarshal(blockBytes)
	assertNoError(t, err, "Unmarshal block")
	allEntries := block.GetAllEntries()
	assert.Equals(t, len(allEntries), 1)

	// Verify stable sequence
	stableClock, err := changeIndex.GetStableClock(false)
	assertNoError(t, err, "Get stable clock")
	assert.Equals(t, stableClock.GetSequence(1), uint64(1))
	assert.Equals(t, stableClock.GetSequence(2), uint64(0))

	// Verify channel sequences
	channelClock := base.SequenceClockImpl{}
	chanClockBytes, _, err := bucket.GetRaw(getChannelClockKey("ABC"))
	log.Println("key:", getChannelClockKey("ABC"))
	log.Println("bytes:", chanClockBytes)
	err = channelClock.Unmarshal(chanClockBytes)
	log.Println("chan ABC", channelClock.GetSequence(1))
	assertNoError(t, err, "Unmarshal channel clock sequence")
	assert.Equals(t, channelClock.GetSequence(1), uint64(1))
	assert.Equals(t, channelClock.GetSequence(2), uint64(0))

	channelClock = base.SequenceClockImpl{}
	chanClockBytes, _, err = bucket.GetRaw(getChannelClockKey("CBS"))
	err = channelClock.Unmarshal(chanClockBytes)
	assertNoError(t, err, "Unmarshal channel clock sequence")
	assert.Equals(t, channelClock.GetSequence(1), uint64(1))
	assert.Equals(t, channelClock.GetSequence(2), uint64(0))
}
func BenchmarkChannelCacheRepeatedDocs95(b *testing.B) {

	base.EnableLogKey("CacheTest")
	//base.SetLogLevel(2) // disables logging
	context := testBucketContext()
	cache := newChannelCache(context, "Benchmark", 0)
	// generate doc IDs

	docIDs, revStrings := generateDocs(95.0, b.N)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		cache.addToCache(e(uint64(i), docIDs[i], revStrings[i]), false)
	}
}
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultLongRunningContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on channel (ABC).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcChanges, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		log.Printf("Got %d changes", len(abcChanges))
		log.Println("Continuous completed")

	}()

	for i := 0; i < 10000; i++ {
		WriteDirectWithKey(db, fmt.Sprintf("docABC_%d", i), []string{"ABC"}, 3)
		time.Sleep(1 * time.Millisecond)
	}

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"ABC"}, 1)

	wg.Wait()

}
// --------------------
// DenseBlockList Tests
// --------------------
func TestDenseBlockList(t *testing.T) {

	base.EnableLogKey("ChannelStorage+")
	indexBucket := testIndexBucket()
	defer indexBucket.Close()

	// Initialize a new block list.  Will initialize with first block
	list := NewDenseBlockList("ABC", 1, indexBucket)

	// Simple insert
	partitionClock := makePartitionClock(
		[]uint16{1, 3, 6, 11},
		[]uint64{0, 0, 0, 0},
	)
	_, err := list.AddBlock()
	assertNoError(t, err, "Error adding block to blocklist")

	indexBucket.Dump()

	// Create a new instance of the same block list, validate contents
	newList := NewDenseBlockList("ABC", 1, indexBucket)
	assert.Equals(t, len(newList.blocks), 2)
	assert.Equals(t, newList.blocks[0].BlockIndex, 0)

	// Add a few more blocks to the new list

	partitionClock.incrementPartitionClock(1)
	_, err = newList.AddBlock()
	assertNoError(t, err, "Error adding block2 to blocklist")
	assert.Equals(t, len(newList.blocks), 3)
	assert.Equals(t, newList.blocks[0].BlockIndex, 0)
	assert.Equals(t, newList.blocks[1].BlockIndex, 1)

	// Attempt to add a block via original list.  Should be cancelled due to cas
	// mismatch, and reload the current state (i.e. newList)
	partitionClock.incrementPartitionClock(1)
	list.AddBlock()
	assert.Equals(t, len(list.blocks), 3)
	assert.Equals(t, newList.blocks[0].BlockIndex, 0)
	assert.Equals(t, newList.blocks[1].BlockIndex, 1)

}
func TestDuplicateDocID(t *testing.T) {

	base.EnableLogKey("Cache")
	context := testBucketContext()
	cache := newChannelCache(context, "Test1", 0)

	// Add some entries to cache
	cache.addToCache(e(1, "doc1", "1-a"), false)
	cache.addToCache(e(2, "doc3", "3-a"), false)
	cache.addToCache(e(3, "doc5", "5-a"), false)

	entries, err := cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 3)
	assert.True(t, verifyChannelSequences(entries, []uint64{1, 2, 3}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc3", "doc5"}))
	assert.True(t, err == nil)

	// Add a new revision matching mid-list
	cache.addToCache(e(4, "doc3", "3-b"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 3)
	assert.True(t, verifyChannelSequences(entries, []uint64{1, 3, 4}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc5", "doc3"}))
	assert.True(t, err == nil)

	// Add a new revision matching first
	cache.addToCache(e(5, "doc1", "1-b"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 3)
	assert.True(t, verifyChannelSequences(entries, []uint64{3, 4, 5}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc5", "doc3", "doc1"}))
	assert.True(t, err == nil)

	// Add a new revision matching last
	cache.addToCache(e(6, "doc1", "1-c"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 3)
	assert.True(t, verifyChannelSequences(entries, []uint64{3, 4, 6}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc5", "doc3", "doc1"}))
	assert.True(t, err == nil)

}
func TestPollResultReuseLongpoll(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a longpoll changes, use waitgroup to delay the test until it returns.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		assertTrue(t, err == nil, "Error parsing sequence ID")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true})
		assertTrue(t, err == nil, "Error getting changes")
		// Expects two changes - the nil that's sent on initial wait, and then docABC_2
		assert.Equals(t, len(abcHboChanges), 2)
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel ABC to notify the waiting longpoll
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 2)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect one poll hit (the longpoll request), and one miss (the basic changes request)
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "1")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

}
func TestCalculateChangedPartitions(t *testing.T) {
	base.EnableLogKey("ChannelStorage+")
	indexBucket := testIndexBucket()
	defer indexBucket.Close()

	reader := NewDenseStorageReader(indexBucket, "ABC", testPartitionMap())

	startClock := getClockForMap(map[uint16]uint64{
		0:   0,
		100: 0,
		200: 0,
	})
	endClock := getClockForMap(map[uint16]uint64{
		0:   5,
		100: 10,
		200: 15,
	})

	changedVbs, changedPartitions := reader.calculateChanged(startClock, endClock)
	assert.Equals(t, len(changedVbs), 3)
	assert.Equals(t, changedVbs[0], uint16(0))   // Partition 0
	assert.Equals(t, changedVbs[1], uint16(100)) // Partition 6
	assert.Equals(t, changedVbs[2], uint16(200)) // Partition 12

	changedPartitionCount := 0
	for partition, partitionRange := range changedPartitions {
		if partitionRange != nil {
			changedPartitionCount++
			assertTrue(t, partition == 0 || partition == 6 || partition == 12, "Unexpected changed partition")
		}
	}
	assert.Equals(t, changedPartitions[0].Since.GetSequence(0), uint64(0))
	assert.Equals(t, changedPartitions[6].Since.GetSequence(100), uint64(0))
	assert.Equals(t, changedPartitions[12].Since.GetSequence(200), uint64(0))
	assert.Equals(t, changedPartitions[0].To.GetSequence(0), uint64(5))
	assert.Equals(t, changedPartitions[6].To.GetSequence(100), uint64(10))
	assert.Equals(t, changedPartitions[12].To.GetSequence(200), uint64(15))
	assert.Equals(t, changedPartitionCount, 3)

}
示例#15
0
// Test size config
func TestChannelCacheSize(t *testing.T) {

	base.EnableLogKey("Cache")
	channelOptions := ChannelCacheOptions{
		ChannelCacheMinLength: 600,
		ChannelCacheMaxLength: 600,
	}
	options := CacheOptions{
		ChannelCacheOptions: channelOptions,
	}

	log.Printf("Options in test:%+v", options)
	db := setupTestDBWithCacheOptions(t, options)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Write 750 docs to channel ABC
	for i := 1; i <= 750; i++ {
		WriteDirect(db, []string{"ABC"}, uint64(i))
	}

	// Validate that retrieval returns expected sequences
	db.changeCache.waitForSequence(750)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 750)

	// Validate that cache stores the expected number of values
	changeCache, ok := db.changeCache.(*changeCache)
	assertTrue(t, ok, "Testing skipped sequences without a change cache")
	abcCache := changeCache.channelCaches["ABC"]
	assert.Equals(t, len(abcCache.logs), 600)
}
示例#16
0
// Reads the command line flags and the optional config file.
func ParseCommandLine() {

	siteURL := flag.String("personaOrigin", "", "Base URL that clients use to connect to the server")
	addr := flag.String("interface", DefaultInterface, "Address to bind to")
	authAddr := flag.String("adminInterface", DefaultAdminInterface, "Address to bind admin interface to")
	profAddr := flag.String("profileInterface", "", "Address to bind profile interface to")
	configServer := flag.String("configServer", "", "URL of server that can return database configs")
	deploymentID := flag.String("deploymentID", "", "Customer/project identifier for stats reporting")
	couchbaseURL := flag.String("url", DefaultServer, "Address of Couchbase server")
	poolName := flag.String("pool", DefaultPool, "Name of pool")
	bucketName := flag.String("bucket", "sync_gateway", "Name of bucket")
	dbName := flag.String("dbname", "", "Name of Couchbase Server database (defaults to name of bucket)")
	pretty := flag.Bool("pretty", false, "Pretty-print JSON responses")
	verbose := flag.Bool("verbose", false, "Log more info about requests")
	logKeys := flag.String("log", "", "Log keywords, comma separated")
	logFilePath := flag.String("logFilePath", "", "Path to log file")
	skipRunModeValidation := flag.Bool("skipRunModeValidation", false, "Skip config validation for runmode (accel vs normal sg)")

	flag.Parse()

	if flag.NArg() > 0 {
		// Read the configuration file(s), if any:
		for i := 0; i < flag.NArg(); i++ {
			filename := flag.Arg(i)
			c, err := ReadServerConfig(filename)
			if err != nil {
				base.LogFatal("Error reading config file %s: %v", filename, err)
			}
			if config == nil {
				config = c
			} else {
				if err := config.MergeWith(c); err != nil {
					base.LogFatal("Error reading config file %s: %v", filename, err)
				}
			}
		}

		// Override the config file with global settings from command line flags:
		if *addr != DefaultInterface {
			config.Interface = addr
		}
		if *authAddr != DefaultAdminInterface {
			config.AdminInterface = authAddr
		}
		if *profAddr != "" {
			config.ProfileInterface = profAddr
		}
		if *configServer != "" {
			config.ConfigServer = configServer
		}
		if *deploymentID != "" {
			config.DeploymentID = deploymentID
		}
		if *pretty {
			config.Pretty = *pretty
		}
		if config.Log != nil {
			base.ParseLogFlags(config.Log)
		}

		// If the interfaces were not specified in either the config file or
		// on the command line, set them to the default values
		if config.Interface == nil {
			config.Interface = &DefaultInterface
		}
		if config.AdminInterface == nil {
			config.AdminInterface = &DefaultAdminInterface
		}

		if *logFilePath != "" {
			config.LogFilePath = logFilePath
		}

		if *skipRunModeValidation == true {
			config.SkipRunmodeValidation = *skipRunModeValidation
		}

	} else {
		// If no config file is given, create a default config, filled in from command line flags:
		if *dbName == "" {
			*dbName = *bucketName
		}

		// At this point the addr is either:
		//   - A value provided by the user, in which case we want to leave it as is
		//   - The default value (":4984"), which is actually _not_ the default value we
		//     want for this case, since we are enabling insecure mode.  We want "localhost:4984" instead.
		// See #708 for more details
		if *addr == DefaultInterface {
			*addr = "localhost:4984"
		}

		config = &ServerConfig{
			Interface:        addr,
			AdminInterface:   authAddr,
			ProfileInterface: profAddr,
			Pretty:           *pretty,
			Databases: map[string]*DbConfig{
				*dbName: {
					Name: *dbName,
					BucketConfig: BucketConfig{
						Server: couchbaseURL,
						Bucket: bucketName,
						Pool:   poolName,
					},
					Users: map[string]*db.PrincipalConfig{
						base.GuestUsername: &db.PrincipalConfig{
							Disabled:         false,
							ExplicitChannels: base.SetFromArray([]string{"*"}),
						},
					},
				},
			},
		}
	}

	if *siteURL != "" {
		if config.Persona == nil {
			config.Persona = new(PersonaConfig)
		}
		config.Persona.Origin = *siteURL
	}

	base.EnableLogKey("HTTP")
	if *verbose {
		base.EnableLogKey("HTTP+")
	}
	base.ParseLogFlag(*logKeys)

	//return config
}
func TestDuplicateLateArrivingSequence(t *testing.T) {

	base.EnableLogKey("Cache")
	context := testBucketContext()
	cache := newChannelCache(context, "Test1", 0)

	// Add some entries to cache
	cache.addToCache(e(10, "doc1", "1-a"), false)
	cache.addToCache(e(20, "doc2", "2-a"), false)
	cache.addToCache(e(30, "doc3", "3-a"), false)
	cache.addToCache(e(40, "doc4", "4-a"), false)

	entries, err := cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	assert.True(t, verifyChannelSequences(entries, []uint64{10, 20, 30, 40}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc2", "doc3", "doc4"}))
	assert.True(t, err == nil)

	// Add a late-arriving sequence that should replace earlier sequence
	cache.AddLateSequence(e(25, "doc1", "1-c"))
	cache.addToCache(e(25, "doc1", "1-c"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{20, 25, 30, 40}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc2", "doc1", "doc3", "doc4"}))
	assert.True(t, err == nil)

	// Add a late-arriving sequence that should be ignored (later sequence exists for that docID)
	cache.AddLateSequence(e(15, "doc1", "1-b"))
	cache.addToCache(e(15, "doc1", "1-b"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{20, 25, 30, 40}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc2", "doc1", "doc3", "doc4"}))
	assert.True(t, err == nil)

	// Add a late-arriving sequence adjacent to same ID (cache inserts differently)
	cache.AddLateSequence(e(27, "doc1", "1-d"))
	cache.addToCache(e(27, "doc1", "1-d"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{20, 27, 30, 40}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc2", "doc1", "doc3", "doc4"}))
	assert.True(t, err == nil)

	// Add a late-arriving sequence adjacent to same ID (cache inserts differently)
	cache.AddLateSequence(e(41, "doc4", "4-b"))
	cache.addToCache(e(41, "doc4", "4-b"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{20, 27, 30, 41}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc2", "doc1", "doc3", "doc4"}))
	assert.True(t, err == nil)

	// Add late arriving that's duplicate of oldest in cache
	cache.AddLateSequence(e(45, "doc2", "2-b"))
	cache.addToCache(e(45, "doc2", "2-b"), false)
	entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
	assert.Equals(t, len(entries), 4)
	writeEntries(entries)
	assert.True(t, verifyChannelSequences(entries, []uint64{27, 30, 41, 45}))
	assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc3", "doc4", "doc2"}))
	assert.True(t, err == nil)

}
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultReuseContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on a different channel (CBS).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		// Expect 2 entries + 3 nil entries (one per wait)
		assert.Equals(t, len(abcHboChanges), 5)
		for i := 0; i < len(abcHboChanges); i++ {
			log.Printf("Got change:%+v", abcHboChanges[i])
		}
		log.Println("Continuous completed")
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel HBO to shift the continuous since value ahead
	WriteDirectWithKey(db, "docHBO_1", []string{"HBO"}, 3)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	// Write an entry to channel ABC - last polled should be used
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 4)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"HBO"}, 1)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect two poll hits (docHBO_1, docABC_2), and one miss (the initial changes request)

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

	time.Sleep(100 * time.Millisecond)

	// Make a changes request prior to the last polled range, ensure it doesn't reuse polled results
	changes, err = db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "2")

}
示例#19
0
func CouchbaseTestIndexChangesAccessBackfill(t *testing.T) {

	// Not walrus compatible, until we add support for meta.vb and meta.vbseq to walrus views.
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
		if (doc.accessGrant) {
			console.log("access grant to " + doc.accessGrant);
			access(doc.accessGrant, "PBS");
		}
		channel(doc.channels);
	}`)

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create docs on multiple channels:
	_, err := db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
	assertNoError(t, err, "Put failed")
	_, err = db.Put("doc0000609", Body{"channels": []string{"PBS"}})
	assertNoError(t, err, "Put failed")
	_, err = db.Put("doc0000799", Body{"channels": []string{"ABC"}})
	assertNoError(t, err, "Put failed")

	time.Sleep(2000 * time.Millisecond)

	// Check the _changes feed:
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 2)

	// Write a doc to grant user access to PBS:
	db.Put("doc_grant", Body{"accessGrant": "naomi"})
	time.Sleep(1000 * time.Millisecond)

	// Write a few more docs (that should be returned as non-backfill)
	db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
	db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
	time.Sleep(1000 * time.Millisecond)

	// Check the _changes feed:
	log.Println("Get User")
	db.user, _ = authenticator.GetUser("naomi")
	db.changeCache.waitForSequence(1)
	time.Sleep(1000 * time.Millisecond)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())
	log.Println("Get Changes")
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 5)
	verifyChange(t, changes, "both_1", true)
	verifyChange(t, changes, "doc0000609", true)
	verifyChange(t, changes, "doc_nobackfill_1", false)
	verifyChange(t, changes, "doc_nobackfill_2", false)

}
func TestChangeIndexGetChanges(t *testing.T) {

	base.EnableLogKey("DIndex+")
	changeIndex, bucket := testKvChangeIndex("indexBucket")
	defer changeIndex.Stop()
	// Add entries across multiple partitions
	changeIndex.writer.addToCache(channelEntry(100, 1, "foo1", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(300, 5, "foo3", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(500, 1, "foo5", "1-a", []string{"ABC", "CBS"}))

	// wait for add
	time.Sleep(100 * time.Millisecond)

	// Verify entries
	entries, err := changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(0)})
	assert.Equals(t, len(entries), 3)
	assert.True(t, err == nil)

	// Add entries across multiple partitions in the same block
	changeIndex.writer.addToCache(channelEntry(101, 1, "foo101-1", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(100, 8, "foo100-8", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(498, 3, "foo498-3", "1-a", []string{"ABC", "CBS"}))

	// wait for add
	time.Sleep(100 * time.Millisecond)
	bucket.Dump()
	// Verify entries
	entries, err = changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(0)})
	assert.Equals(t, len(entries), 6)
	assert.True(t, err == nil)

	// Add entries across multiple partitions, multiple blocks
	changeIndex.writer.addToCache(channelEntry(101, 10001, "foo101-10001", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(100, 10100, "foo100-10100", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(498, 20003, "foo498-20003", "1-a", []string{"ABC", "CBS"}))

	// wait for add
	time.Sleep(100 * time.Millisecond)
	// Verify entries
	entries, err = changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(0)})
	assert.Equals(t, len(entries), 9)
	assert.True(t, err == nil)

	// Retrieval for a more restricted range
	entries, err = changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(100)})
	assert.Equals(t, len(entries), 3)
	assert.True(t, err == nil)

	// Retrieval for a more restricted range where the since matches a valid sequence number (since border case)
	entries, err = changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(10100)})
	assert.Equals(t, len(entries), 1)
	assert.True(t, err == nil)

	// Add entries that skip a block in a partition
	changeIndex.writer.addToCache(channelEntry(800, 100, "foo800-100", "1-a", []string{"ABC", "CBS"}))
	changeIndex.writer.addToCache(channelEntry(800, 20100, "foo800-20100", "1-a", []string{"ABC", "CBS"}))

	// wait for add
	time.Sleep(100 * time.Millisecond)
	// Verify entries
	entries, err = changeIndex.GetChanges("ABC", ChangesOptions{Since: simpleClockSequence(0)})
	assert.Equals(t, len(entries), 11)
	assert.True(t, err == nil)

	// Test deduplication by doc id, including across empty blocks
	changeIndex.writer.addToCache(channelEntry(700, 100, "foo700", "1-a", []string{"DUP"}))
	changeIndex.writer.addToCache(channelEntry(700, 200, "foo700", "1-b", []string{"DUP"}))
	changeIndex.writer.addToCache(channelEntry(700, 300, "foo700", "1-c", []string{"DUP"}))
	changeIndex.writer.addToCache(channelEntry(700, 10100, "foo700", "1-d", []string{"DUP"}))
	changeIndex.writer.addToCache(channelEntry(700, 30100, "foo700", "1-e", []string{"DUP"}))
	// wait for add
	time.Sleep(100 * time.Millisecond)
	// Verify entries
	entries, err = changeIndex.GetChanges("DUP", ChangesOptions{Since: simpleClockSequence(0)})
	assert.Equals(t, len(entries), 1)
	assert.True(t, err == nil)

	bucket.Dump()
}
func TestDenseBlockOverflow(t *testing.T) {
	base.EnableLogKey("ChannelStorage")
	indexBucket := testIndexBucket()
	defer indexBucket.Close()

	block := NewDenseBlock("block1", nil)

	// Insert 100 entries, no overflow
	entries := make([]*LogEntry, 100)
	for i := 0; i < 100; i++ {
		vbno := 100
		sequence := i + 1
		entries[i] = makeBlockEntry(fmt.Sprintf("longerDocumentID-%d", sequence), "1-abcdef01234567890", vbno, sequence, IsNotRemoval, IsAdded)
	}
	overflow, pendingRemoval, updateClock, err := block.AddEntrySet(entries, indexBucket)
	assertNoError(t, err, "Error adding entry set")
	assert.Equals(t, len(overflow), 0)
	assert.Equals(t, len(pendingRemoval), 0)
	assert.Equals(t, int(block.getEntryCount()), 100)
	assert.Equals(t, updateClock.GetSequence(100), uint64(100))

	foundEntries := block.GetAllEntries()
	assert.Equals(t, len(foundEntries), 100)
	for i := 0; i < 100; i++ {
		assertLogEntriesEqual(t, foundEntries[i], entries[i])
	}

	// Insert 100 more entries, expect overflow.  Based on this test's doc/rev ids, expect to fit 188 entries in
	// the default block size.
	entries = make([]*LogEntry, 100)
	for i := 0; i < 100; i++ {
		vbno := 100
		sequence := i + 101
		entries[i] = makeBlockEntry(fmt.Sprintf("longerDocumentID-%d", sequence), "1-abcdef01234567890", vbno, sequence, IsNotRemoval, IsAdded)
	}
	overflow, pendingRemoval, updateClock, err = block.AddEntrySet(entries, indexBucket)
	assertNoError(t, err, "Error adding entry set")
	assert.Equals(t, len(overflow), 12)
	assert.Equals(t, len(pendingRemoval), 0)
	assert.Equals(t, int(block.getEntryCount()), 188)
	assert.Equals(t, len(block.value), 10046)
	assert.Equals(t, updateClock.GetSequence(100), uint64(188))

	// Validate overflow contents (last 12 entries)
	for i := 0; i < 12; i++ {
		assertLogEntriesEqual(t, overflow[i], entries[i+88])
	}

	foundEntries = block.GetAllEntries()
	assert.Equals(t, len(foundEntries), 188)
	for i := 0; i < 188; i++ {
		vbno := 100
		sequence := i + 1
		assertLogEntry(t, foundEntries[i], fmt.Sprintf("longerDocumentID-%d", sequence), "1-abcdef01234567890", vbno, sequence)
	}

	// Retry the 12 entries, all should overflow
	var newOverflow []*LogEntry
	newOverflow, pendingRemoval, updateClock, err = block.AddEntrySet(overflow, indexBucket)
	assertNoError(t, err, "Error adding entry set")
	assert.Equals(t, len(newOverflow), 12)
	assert.Equals(t, len(pendingRemoval), 0)
	assert.Equals(t, int(block.getEntryCount()), 188)
	assert.Equals(t, len(block.value), 10046)
	assert.Equals(t, len(updateClock), 0)

}
示例#22
0
func TestIndexChangesAfterChannelAdded(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	_, err := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	assertNoError(t, err, "Put failed")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(250 * time.Millisecond)
	assert.Equals(t, len(changes), 2)
	verifyChange(t, changes, "_user/naomi", false)
	verifyChange(t, changes, "doc1", false)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	_, err = db.Put("doc2", Body{"channels": []string{"PBS"}})
	assertNoError(t, err, "Put failed")

	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	// changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: db.ParseSequenceID(lastSeq)})
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	printChanges(changes)
	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)

	verifyChange(t, changes, "doc2", false)

	// validate from zero
	log.Println("From zero:")
	//changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 1, TriggeredBy: 2}})
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}
示例#23
0
func _testChangesAfterChannelAdded(t *testing.T, db *Database) {
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	revid, _ := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(1000 * time.Millisecond)
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{ // Seq 1, from ABC
		Seq:     SequenceID{Seq: 1},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[1], &ChangeEntry{ // Seq 1, from PBS backfill
		Seq:     SequenceID{Seq: 1, TriggeredBy: 2},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[2], &ChangeEntry{ // Seq 2, from ABC and PBS
		Seq:     SequenceID{Seq: 2},
		ID:      "_user/naomi",
		Changes: []ChangeRev{}})
	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	revid, _ = db.Put("doc2", Body{"channels": []string{"PBS"}})

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3},
		ID:      "doc2",
		Changes: []ChangeRev{{"rev": revid}}})

	// validate from zero
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}