Ejemplo n.º 1
0
// Unit test for bug #673
func TestUpdatePrincipal(t *testing.T) {
	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Validate that a call to UpdatePrincipals with no changes to the user doesn't allocate a sequence
	userInfo, err := db.GetPrincipal("naomi", true)
	userInfo.ExplicitChannels = base.SetOf("ABC")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "Unable to update principal")

	nextSeq, err := db.sequences.nextSequence()
	assert.Equals(t, nextSeq, uint64(1))

	// Validate that a call to UpdatePrincipals with changes to the user does allocate a sequence
	userInfo, err = db.GetPrincipal("naomi", true)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "Unable to update principal")

	nextSeq, err = db.sequences.nextSequence()
	assert.Equals(t, nextSeq, uint64(3))
}
Ejemplo n.º 2
0
func TestIndexChangesAdminBackfill(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create docs on multiple channels:
	db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
	db.Put("doc0000609", Body{"channels": []string{"PBS"}})
	db.Put("doc0000799", Body{"channels": []string{"ABC"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 3)

	// Modify user to have access to both channels:
	log.Println("Get Principal")
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")
	time.Sleep(100 * time.Millisecond)

	// Write a few more docs (that should be returned as non-backfill)
	db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
	db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed:
	log.Println("Get User")
	db.user, _ = authenticator.GetUser("naomi")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 5)
	verifyChange(t, changes, "both_1", true)
	verifyChange(t, changes, "doc0000609", true)
	verifyChange(t, changes, "doc_nobackfill_1", false)
	verifyChange(t, changes, "doc_nobackfill_2", false)

}
Ejemplo n.º 3
0
func TestChangeIndexChanges(t *testing.T) {
	base.EnableLogKey("DIndex+")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Write an entry to the bucket
	WriteDirectWithKey(db, "1c856b5724dcf4273c3993619900ce7f", []string{}, 1)

	time.Sleep(20 * time.Millisecond)
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 1)

	time.Sleep(20 * time.Millisecond)
	// Write a few more entries to the bucket
	WriteDirectWithKey(db, "12389b182ababd12fff662848edeb908", []string{}, 1)
	time.Sleep(20 * time.Millisecond)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 2)
}
Ejemplo n.º 4
0
// Test backfill of late arriving sequences to the channel caches
func TestChannelCacheBackfill(t *testing.T) {

	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Simulate seq 3 being delayed - write 1,2,4,5
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC", "PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	// Test that retrieval isn't blocked by skipped sequences
	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	lastSeq := changes[len(changes)-1].Seq

	// Validate insert to various cache states
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
	WriteDirect(db, []string{"CBS"}, 7)
	db.changeCache.waitForSequence(7)
	// verify insert at start (PBS)
	pbsCache := db.changeCache.channelCaches["PBS"]
	assert.True(t, verifyCacheSequences(pbsCache, []uint64{3, 5, 6}))
	// verify insert at middle (ABC)
	abcCache := db.changeCache.channelCaches["ABC"]
	assert.True(t, verifyCacheSequences(abcCache, []uint64{1, 2, 3, 5, 6}))
	// verify insert at end (NBC)
	nbcCache := db.changeCache.channelCaches["NBC"]
	assert.True(t, verifyCacheSequences(nbcCache, []uint64{1, 3}))
	// verify insert to empty cache (TBS)
	tbsCache := db.changeCache.channelCaches["TBS"]
	assert.True(t, verifyCacheSequences(tbsCache, []uint64{3}))

	// verify changes has three entries (needs to resend all since previous LowSeq, which
	// will be the late arriver (3) along with 5, 6)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3, LowSeq: 3},
		ID:      "doc-3",
		Changes: []ChangeRev{{"rev": "1-a"}}})

}
Ejemplo n.º 5
0
// Unit test for bug #314
func TestChangesAfterChannelAdded(t *testing.T) {
	base.LogKeys["Cache"] = true
	base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	revid, _ := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 1}})
	assertNoError(t, err, "Couldn't GetChanges")

	assert.Equals(t, len(changes), 2)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 2},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[1], &ChangeEntry{
		Seq:     SequenceID{Seq: 2},
		ID:      "_user/naomi",
		Changes: []ChangeRev{}})

	// Add a new doc (sequence 3):
	revid, _ = db.Put("doc2", Body{"channels": []string{"PBS"}})

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 2}})
	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3},
		ID:      "doc2",
		Changes: []ChangeRev{{"rev": revid}}})
}
Ejemplo n.º 6
0
func TestRebuildUserRoles(t *testing.T) {
	computer := mockComputer{roles: ch.AtSequence(base.SetOf("role1", "role2"), 3)}
	auth := NewAuthenticator(gTestBucket, &computer)
	user, _ := auth.NewUser("testUser", "letmein", nil)
	user.SetExplicitRoles(ch.TimedSet{"role3": ch.NewVbSimpleSequence(1), "role1": ch.NewVbSimpleSequence(1)})
	err := auth.InvalidateRoles(user)
	assert.Equals(t, err, nil)

	user2, err := auth.GetUser("testUser")
	assert.Equals(t, err, nil)
	expected := ch.AtSequence(base.SetOf("role1", "role3"), 1)
	expected.AddChannel("role2", 3)
	assert.DeepEquals(t, user2.RoleNames(), expected)
}
Ejemplo n.º 7
0
func TestSyncFnOnPush(t *testing.T) {
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
		log("doc _id = "+doc._id+", _rev = "+doc._rev);
		if (oldDoc)
			log("oldDoc _id = "+oldDoc._id+", _rev = "+oldDoc._rev);
		channel(doc.channels);
	}`)

	// Create first revision:
	body := Body{"key1": "value1", "key2": 1234, "channels": []string{"public"}}
	rev1id, err := db.Put("doc1", body)
	assertNoError(t, err, "Couldn't create document")

	// Add several revisions at once to a doc, as on a push:
	log.Printf("Check PutExistingRev...")
	body["_rev"] = "4-four"
	body["key1"] = "fourth value"
	body["key2"] = int64(4444)
	body["channels"] = "clibup"
	history := []string{"4-four", "3-three", "2-488724414d0ed6b398d6d2aeb228d797",
		rev1id}
	err = db.PutExistingRev("doc1", body, history)
	assertNoError(t, err, "PutExistingRev failed")

	// Check that the doc has the correct channel (test for issue #300)
	doc, err := db.GetDoc("doc1")
	assert.DeepEquals(t, doc.Channels, channels.ChannelMap{
		"clibup": nil, // i.e. it is currently in this channel (no removal)
		"public": &channels.ChannelRemoval{Seq: 2, RevID: "4-four"},
	})
	assert.DeepEquals(t, doc.History["4-four"].Channels, base.SetOf("clibup"))
}
Ejemplo n.º 8
0
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultLongRunningContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on channel (ABC).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcChanges, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		log.Printf("Got %d changes", len(abcChanges))
		log.Println("Continuous completed")

	}()

	for i := 0; i < 10000; i++ {
		WriteDirectWithKey(db, fmt.Sprintf("docABC_%d", i), []string{"ABC"}, 3)
		time.Sleep(1 * time.Millisecond)
	}

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"ABC"}, 1)

	wg.Wait()

}
Ejemplo n.º 9
0
// Test low sequence handling of late arriving sequences to a continuous changes feed, when the
// user doesn't have visibility to some of the late arriving sequences
func TestLowSequenceHandlingAcrossChannels(t *testing.T) {

	//base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 3)

	// Validate the initial sequences arrive as expected
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 3)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6"}))

	// Test backfill of sequence the user doesn't have visibility to
	WriteDirect(db, []string{"PBS"}, 3)
	WriteDirect(db, []string{"ABC"}, 9)

	db.changeCache.waitForSequenceWithMissing(9)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 1)
	assert.Equals(t, len(changes), 4)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6", "3::9"}))

	close(options.Terminator)
}
Ejemplo n.º 10
0
func TestPollResultReuseLongpoll(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a longpoll changes, use waitgroup to delay the test until it returns.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		assertTrue(t, err == nil, "Error parsing sequence ID")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true})
		assertTrue(t, err == nil, "Error getting changes")
		// Expects two changes - the nil that's sent on initial wait, and then docABC_2
		assert.Equals(t, len(abcHboChanges), 2)
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel ABC to notify the waiting longpoll
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 2)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect one poll hit (the longpoll request), and one miss (the basic changes request)
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "1")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

}
Ejemplo n.º 11
0
// Starts a changeListener on a given Bucket.
func (listener *changeListener) Start(bucket base.Bucket, trackDocs bool, notify sgbucket.BucketNotifyFn) error {
	listener.bucket = bucket
	listener.TapArgs = sgbucket.TapArguments{
		Backfill: sgbucket.TapNoBackfill,
		Notify:   notify,
	}
	tapFeed, err := bucket.StartTapFeed(listener.TapArgs)
	if err != nil {
		return err
	}

	listener.tapFeed = tapFeed
	listener.counter = 1
	listener.terminateCheckCounter = 0
	listener.keyCounts = map[string]uint64{}
	listener.tapNotifier = sync.NewCond(&sync.Mutex{})
	if trackDocs {
		listener.DocChannel = make(chan sgbucket.TapEvent, 100)
	}

	// Start a goroutine to broadcast to the tapNotifier whenever a channel or user/role changes:
	go func() {
		defer func() {
			listener.notifyStopping()
			if listener.DocChannel != nil {
				close(listener.DocChannel)
			}
		}()
		for event := range tapFeed.Events() {
			if event.Opcode == sgbucket.TapMutation || event.Opcode == sgbucket.TapDeletion {
				key := string(event.Key)
				if strings.HasPrefix(key, auth.UserKeyPrefix) ||
					strings.HasPrefix(key, auth.RoleKeyPrefix) {
					if listener.OnDocChanged != nil {
						listener.OnDocChanged(key, event.Value, event.Sequence, event.VbNo)
					}
					listener.Notify(base.SetOf(key))
				} else if trackDocs && !strings.HasPrefix(key, KSyncKeyPrefix) && !strings.HasPrefix(key, kIndexPrefix) {
					if listener.OnDocChanged != nil {
						listener.OnDocChanged(key, event.Value, event.Sequence, event.VbNo)
					}
					listener.DocChannel <- event
				}
			}
		}
	}()

	return nil
}
Ejemplo n.º 12
0
// Creates a new Set from an array of strings. Returns an error if any names are invalid.
func SetFromArray(names []string, mode StarMode) (base.Set, error) {
	for _, name := range names {
		if !IsValidChannel(name) {
			return nil, illegalChannelError(name)
		}
	}
	result := base.SetFromArray(names)
	switch mode {
	case RemoveStar:
		result = result.Removing(UserStarChannel)
	case ExpandStar:
		if result.Contains(UserStarChannel) {
			result = base.SetOf(UserStarChannel)
		}
	}
	return result, nil
}
func TestLoaderFunction(t *testing.T) {
	var callsToLoader = 0
	loader := func(id IDAndRev) (body Body, history Body, channels base.Set, err error) {
		callsToLoader++
		if id.DocID[0] != 'J' {
			err = base.HTTPErrorf(404, "missing")
		} else {
			body = Body{
				"_id":  id.DocID,
				"_rev": id.RevID,
			}
			history = Body{"start": 1}
			channels = base.SetOf("*")
		}
		return
	}
	cache := NewRevisionCache(10, loader)

	body, history, channels, err := cache.Get("Jens", "1")
	assert.Equals(t, body["_id"], "Jens")
	assert.True(t, history != nil)
	assert.True(t, channels != nil)
	assert.Equals(t, err, error(nil))
	assert.Equals(t, callsToLoader, 1)

	body, history, channels, err = cache.Get("Peter", "1")
	assert.DeepEquals(t, body, Body(nil))
	assert.DeepEquals(t, err, base.HTTPErrorf(404, "missing"))
	assert.Equals(t, callsToLoader, 2)

	body, history, channels, err = cache.Get("Jens", "1")
	assert.Equals(t, body["_id"], "Jens")
	assert.True(t, history != nil)
	assert.True(t, channels != nil)
	assert.Equals(t, err, error(nil))
	assert.Equals(t, callsToLoader, 2)

	body, history, channels, err = cache.Get("Peter", "1")
	assert.DeepEquals(t, body, Body(nil))
	assert.DeepEquals(t, err, base.HTTPErrorf(404, "missing"))
	assert.Equals(t, callsToLoader, 3)
}
Ejemplo n.º 14
0
// Test size config
func TestChannelCacheSize(t *testing.T) {

	base.EnableLogKey("Cache")
	channelOptions := ChannelCacheOptions{
		ChannelCacheMinLength: 600,
		ChannelCacheMaxLength: 600,
	}
	options := CacheOptions{
		ChannelCacheOptions: channelOptions,
	}

	log.Printf("Options in test:%+v", options)
	db := setupTestDBWithCacheOptions(t, options)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Write 750 docs to channel ABC
	for i := 1; i <= 750; i++ {
		WriteDirect(db, []string{"ABC"}, uint64(i))
	}

	// Validate that retrieval returns expected sequences
	db.changeCache.waitForSequence(750)
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: SequenceID{Seq: 0}})
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 750)

	// Validate that cache stores the expected number of values
	changeCache, ok := db.changeCache.(*changeCache)
	assertTrue(t, ok, "Testing skipped sequences without a change cache")
	abcCache := changeCache.channelCaches["ABC"]
	assert.Equals(t, len(abcCache.logs), 600)
}
Ejemplo n.º 15
0
func CouchbaseTestIndexChangesAccessBackfill(t *testing.T) {

	// Not walrus compatible, until we add support for meta.vb and meta.vbseq to walrus views.
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewChannelMapper(`function(doc, oldDoc) {
		if (doc.accessGrant) {
			console.log("access grant to " + doc.accessGrant);
			access(doc.accessGrant, "PBS");
		}
		channel(doc.channels);
	}`)

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create docs on multiple channels:
	_, err := db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
	assertNoError(t, err, "Put failed")
	_, err = db.Put("doc0000609", Body{"channels": []string{"PBS"}})
	assertNoError(t, err, "Put failed")
	_, err = db.Put("doc0000799", Body{"channels": []string{"ABC"}})
	assertNoError(t, err, "Put failed")

	time.Sleep(2000 * time.Millisecond)

	// Check the _changes feed:
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 2)

	// Write a doc to grant user access to PBS:
	db.Put("doc_grant", Body{"accessGrant": "naomi"})
	time.Sleep(1000 * time.Millisecond)

	// Write a few more docs (that should be returned as non-backfill)
	db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
	db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
	time.Sleep(1000 * time.Millisecond)

	// Check the _changes feed:
	log.Println("Get User")
	db.user, _ = authenticator.GetUser("naomi")
	db.changeCache.waitForSequence(1)
	time.Sleep(1000 * time.Millisecond)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())
	log.Println("Get Changes")
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	assert.Equals(t, len(changes), 5)
	verifyChange(t, changes, "both_1", true)
	verifyChange(t, changes, "doc0000609", true)
	verifyChange(t, changes, "doc_nobackfill_1", false)
	verifyChange(t, changes, "doc_nobackfill_2", false)

}
Ejemplo n.º 16
0
// If the set contains "*", returns a set of only "*". Else returns the original set.
func ExpandingStar(set base.Set) base.Set {
	if _, exists := set[UserStarChannel]; exists {
		return base.SetOf(UserStarChannel)
	}
	return set
}
Ejemplo n.º 17
0
func TestIndexChangesAfterChannelAdded(t *testing.T) {
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	base.EnableLogKey("Changes+")
	base.EnableLogKey("Backfill")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	user.SetSequence(1)
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	_, err := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	assertNoError(t, err, "Put failed")
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(250 * time.Millisecond)
	assert.Equals(t, len(changes), 2)
	verifyChange(t, changes, "_user/naomi", false)
	verifyChange(t, changes, "doc1", false)

	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	_, err = db.Put("doc2", Body{"channels": []string{"PBS"}})
	assertNoError(t, err, "Put failed")

	time.Sleep(100 * time.Millisecond)

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	// changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: db.ParseSequenceID(lastSeq)})
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	printChanges(changes)
	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)

	verifyChange(t, changes, "doc2", false)

	// validate from zero
	log.Println("From zero:")
	//changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 1, TriggeredBy: 2}})
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}
Ejemplo n.º 18
0
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultReuseContinuous(t *testing.T) {
	// Reset the index expvars
	indexExpvars.Init()
	base.EnableLogKey("IndexPoll")
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
	time.Sleep(100 * time.Millisecond)
	// Do a basic changes to trigger start of polling for channel
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 1)
	log.Printf("Changes:%+v", changes[0])

	// Start a continuous changes on a different channel (CBS).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		since, err := db.ParseSequenceID("2-0")
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		// Expect 2 entries + 3 nil entries (one per wait)
		assert.Equals(t, len(abcHboChanges), 5)
		for i := 0; i < len(abcHboChanges); i++ {
			log.Printf("Got change:%+v", abcHboChanges[i])
		}
		log.Println("Continuous completed")
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel HBO to shift the continuous since value ahead
	WriteDirectWithKey(db, "docHBO_1", []string{"HBO"}, 3)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	// Write an entry to channel ABC - last polled should be used
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 4)

	time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
	close(continuousTerminator)
	log.Println("closed terminator")

	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"HBO"}, 1)

	wg.Wait()

	// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
	// or reloaded from index).  Expect two poll hits (docHBO_1, docABC_2), and one miss (the initial changes request)

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")

	time.Sleep(100 * time.Millisecond)

	// Make a changes request prior to the last polled range, ensure it doesn't reuse polled results
	changes, err = db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})

	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "2")
	assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "2")

}
Ejemplo n.º 19
0
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator.  We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollingChangesFeed(t *testing.T) {
	//base.LogKeys["DIndex+"] = true
	db := setupTestDBForChangeIndex(t)
	defer tearDownTestDB(t, db)

	dbExpvars.Init()
	db.ChannelMapper = channels.NewDefaultChannelMapper()
	// Start a longpoll changes
	go func() {
		abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
		assertTrue(t, err == nil, "Error getting changes")
		// Expects two changes - the nil that's sent on initial wait, and then docABC_1
		for _, change := range abcHboChanges {
			log.Printf("abcHboChange:%v", change)
		}
		assert.Equals(t, len(abcHboChanges), 2)
	}()

	time.Sleep(100 * time.Millisecond)
	// Write an entry to channel ABC to notify the waiting longpoll
	WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)

	// Start a continuous changes on a different channel (CBS).  Waitgroup keeps test open until continuous is terminated
	var wg sync.WaitGroup
	continuousTerminator := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		cbsChanges, err := db.GetChanges(base.SetOf("CBS"), ChangesOptions{Since: simpleClockSequence(0), Wait: true, Continuous: true, Terminator: continuousTerminator})
		assertTrue(t, err == nil, "Error getting changes")
		// Expect 15 entries + 16 nil entries (one per wait)
		assert.Equals(t, len(cbsChanges), 25)
		log.Println("Continuous completed")

	}()

	// Write another entry to channel ABC to start the clock for unread polls
	time.Sleep(1000 * time.Millisecond)
	WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 1)

	// Verify that the channel is initially in the polled set
	changeIndex, _ := db.changeCache.(*kvChangeIndex)
	assertTrue(t, changeIndex.reader.getChannelReader("ABC") != nil, "Channel reader should not be nil")
	log.Printf("changeIndex readers: %+v", changeIndex.reader.channelIndexReaders)

	// Send multiple docs to channels HBO, PBS, CBS.  Expected results:
	//   ABC - Longpoll has ended - should trigger "nobody listening" expiry of channel reader
	//   CBS - Active continuous feed - channel reader shouldn't expire
	//   PBS - No changes feeds have requested this channel - no channel reader should be created
	//   HBO - New longpoll started mid-way before poll limit reached, channel reader shouldn't expire
	time.Sleep(20 * time.Millisecond)
	for i := 0; i < 12; i++ {
		log.Printf("writing multiDoc_%d", i)
		WriteDirectWithKey(db, fmt.Sprintf("multiDoc_%d", i), []string{"PBS", "HBO", "CBS"}, 1)
		// Midway through, read from HBO
		if i == 9 {
			// wait for polling cycle
			time.Sleep(600 * time.Millisecond)
			hboChanges, err := db.GetChanges(base.SetOf("HBO"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
			assertTrue(t, err == nil, "Error getting changes")
			assert.Equals(t, len(hboChanges), 10)
		}
		time.Sleep(kPollFrequency * time.Millisecond)
	}

	// Verify that the changes feed has been started (avoids test race conditions where we close the terminator before
	// starting the changes feed
	for i := 0; i <= 40; i++ {
		channelChangesCount := getExpvarAsString(dbExpvars, "channelChangesFeeds")
		log.Printf("channelChangesCount:%s", channelChangesCount)
		if channelChangesCount != "" {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}

	close(continuousTerminator)
	log.Println("closed terminator")

	// Send another entry to continuous CBS feed in order to trigger the terminator check
	time.Sleep(100 * time.Millisecond)
	WriteDirectWithKey(db, "terminatorCheck", []string{"CBS"}, 1)

	time.Sleep(100 * time.Millisecond)
	// Validate that the ABC reader was deleted due to inactivity
	log.Printf("channel reader ABC:%+v", changeIndex.reader.getChannelReader("ABC"))
	assertTrue(t, changeIndex.reader.getChannelReader("ABC") == nil, "Channel reader should be nil")

	// Validate that the HBO reader is still present
	assertTrue(t, changeIndex.reader.getChannelReader("HBO") != nil, "Channel reader should be exist")

	// Start another longpoll changes for ABC, ensure it's successful (will return the existing 2 records, no wait)
	changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 2)

	// Repeat, verify use of existing channel reader
	changes, err = db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0), Wait: true})
	assertTrue(t, err == nil, "Error getting changes")
	assert.Equals(t, len(changes), 2)

	wg.Wait()

}
Ejemplo n.º 20
0
func _testChangesAfterChannelAdded(t *testing.T, db *Database) {
	base.EnableLogKey("IndexChanges")
	base.EnableLogKey("Hash+")
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Create a doc on two channels (sequence 1):
	revid, _ := db.Put("doc1", Body{"channels": []string{"ABC", "PBS"}})
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)

	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	// Check the _changes feed:
	db.changeCache.waitForSequence(1)
	time.Sleep(100 * time.Millisecond)
	db.Bucket.Dump()
	if changeCache, ok := db.changeCache.(*kvChangeIndex); ok {
		changeCache.reader.indexReadBucket.Dump()
	}
	db.user, _ = authenticator.GetUser("naomi")
	changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
	time.Sleep(1000 * time.Millisecond)
	assert.Equals(t, len(changes), 3)
	assert.DeepEquals(t, changes[0], &ChangeEntry{ // Seq 1, from ABC
		Seq:     SequenceID{Seq: 1},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[1], &ChangeEntry{ // Seq 1, from PBS backfill
		Seq:     SequenceID{Seq: 1, TriggeredBy: 2},
		ID:      "doc1",
		Changes: []ChangeRev{{"rev": revid}}})
	assert.DeepEquals(t, changes[2], &ChangeEntry{ // Seq 2, from ABC and PBS
		Seq:     SequenceID{Seq: 2},
		ID:      "_user/naomi",
		Changes: []ChangeRev{}})
	lastSeq := getLastSeq(changes)
	lastSeq, _ = db.ParseSequenceID(lastSeq.String())

	// Add a new doc (sequence 3):
	revid, _ = db.Put("doc2", Body{"channels": []string{"PBS"}})

	// Check the _changes feed -- this is to make sure the changeCache properly received
	// sequence 2 (the user doc) and isn't stuck waiting for it.
	db.changeCache.waitForSequence(3)
	changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})

	assertNoError(t, err, "Couldn't GetChanges (2nd)")

	assert.Equals(t, len(changes), 1)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 3},
		ID:      "doc2",
		Changes: []ChangeRev{{"rev": revid}}})

	// validate from zero
	changes, err = db.GetChanges(base.SetOf("*"), getZeroSequence(db))
	assertNoError(t, err, "Couldn't GetChanges")
	printChanges(changes)
}
Ejemplo n.º 21
0
// Test current fails intermittently on concurrent access to var changes.  Disabling for now - should be refactored.
func FailingTestChannelRace(t *testing.T) {

	base.LogKeys["Sequences"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channels "Odd", "Even"
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("Even", "Odd"))
	authenticator.Save(user)

	// Write initial sequences
	WriteDirect(db, []string{"Odd"}, 1)
	WriteDirect(db, []string{"Even"}, 2)
	WriteDirect(db, []string{"Odd"}, 3)

	db.changeCache.waitForSequence(3)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("Even", "Odd"), options)
	assert.True(t, err == nil)
	feedClosed := false

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)
	go func() {
		for feedClosed == false {
			select {
			case entry, ok := <-feed:
				if ok {
					// feed sends nil after each continuous iteration
					if entry != nil {
						log.Println("Changes entry:", entry.Seq)
						changes = append(changes, entry)
					}
				} else {
					log.Println("Closing feed")
					feedClosed = true
				}
			}
		}
	}()

	// Wait for processing of two channels (100 ms each)
	time.Sleep(250 * time.Millisecond)
	// Validate the initial sequences arrive as expected
	assert.Equals(t, len(changes), 3)

	// Send update to trigger the start of the next changes iteration
	WriteDirect(db, []string{"Even"}, 4)
	time.Sleep(150 * time.Millisecond)
	// After read of "Even" channel, but before read of "Odd" channel, send three new entries
	WriteDirect(db, []string{"Odd"}, 5)
	WriteDirect(db, []string{"Even"}, 6)
	WriteDirect(db, []string{"Odd"}, 7)

	time.Sleep(100 * time.Millisecond)

	// At this point we've haven't sent sequence 6, but the continuous changes feed has since=7

	// Write a few more to validate that we're not catching up on the missing '6' later
	WriteDirect(db, []string{"Even"}, 8)
	WriteDirect(db, []string{"Odd"}, 9)
	time.Sleep(750 * time.Millisecond)
	assert.Equals(t, len(changes), 9)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9"}))
	changesString := ""
	for _, change := range changes {
		changesString = fmt.Sprintf("%s%d, ", changesString, change.Seq.Seq)
	}
	fmt.Println("changes: ", changesString)

	close(options.Terminator)
}
Ejemplo n.º 22
0
func (context *DatabaseContext) NotifyUser(username string) {
	context.tapListener.NotifyCheckForTermination(base.SetOf(auth.UserKeyPrefix + username))
}
Ejemplo n.º 23
0
func TestDocDeletionFromChannel(t *testing.T) {
	// See https://github.com/couchbase/couchbase-lite-ios/issues/59
	// base.LogKeys["Changes"] = true
	// base.LogKeys["Cache"] = true

	rt := restTester{syncFn: `function(doc) {channel(doc.channel)}`}
	a := rt.ServerContext().Database("db").Authenticator()

	// Create user:
	alice, _ := a.NewUser("alice", "letmein", channels.SetOf("zero"))
	a.Save(alice)

	// Create a doc Alice can see:
	response := rt.send(request("PUT", "/db/alpha", `{"channel":"zero"}`))

	// Check the _changes feed:
	rt.ServerContext().Database("db").WaitForPendingChanges()
	var changes struct {
		Results []db.ChangeEntry
	}
	response = rt.send(requestByUser("GET", "/db/_changes", "", "alice"))
	log.Printf("_changes looks like: %s", response.Body.Bytes())
	json.Unmarshal(response.Body.Bytes(), &changes)
	assert.Equals(t, len(changes.Results), 1)
	since := changes.Results[0].Seq
	assert.Equals(t, since.Seq, uint64(1))

	assert.Equals(t, changes.Results[0].ID, "alpha")
	rev1 := changes.Results[0].Changes[0]["rev"]

	// Delete the document:
	assertStatus(t, rt.send(request("DELETE", "/db/alpha?rev="+rev1, "")), 200)

	// Get the updates from the _changes feed:
	time.Sleep(100 * time.Millisecond)
	response = rt.send(requestByUser("GET", fmt.Sprintf("/db/_changes?since=%s", since),
		"", "alice"))
	log.Printf("_changes looks like: %s", response.Body.Bytes())
	changes.Results = nil
	json.Unmarshal(response.Body.Bytes(), &changes)
	assert.Equals(t, len(changes.Results), 1)

	assert.Equals(t, changes.Results[0].ID, "alpha")
	assert.Equals(t, changes.Results[0].Deleted, true)
	assert.DeepEquals(t, changes.Results[0].Removed, base.SetOf("zero"))
	rev2 := changes.Results[0].Changes[0]["rev"]

	// Now get the deleted revision:
	response = rt.send(requestByUser("GET", "/db/alpha?rev="+rev2, "", "alice"))
	assert.Equals(t, response.Code, 200)
	log.Printf("Deletion looks like: %s", response.Body.Bytes())
	var docBody db.Body
	json.Unmarshal(response.Body.Bytes(), &docBody)
	assert.DeepEquals(t, docBody, db.Body{"_id": "alpha", "_rev": rev2, "_deleted": true})

	// Access without deletion revID shouldn't be allowed (since doc is not in Alice's channels):
	response = rt.send(requestByUser("GET", "/db/alpha", "", "alice"))
	assert.Equals(t, response.Code, 403)

	// A bogus rev ID should return a 404:
	response = rt.send(requestByUser("GET", "/db/alpha?rev=bogus", "", "alice"))
	assert.Equals(t, response.Code, 404)

	// Get the old revision, which should still be accessible:
	response = rt.send(requestByUser("GET", "/db/alpha?rev="+rev1, "", "alice"))
	assert.Equals(t, response.Code, 200)
}
Ejemplo n.º 24
0
// Test low sequence handling of late arriving sequences to a continuous changes feed
func TestLowSequenceHandling(t *testing.T) {

	//base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"ABC", "PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Array to read changes from feed to support assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 4)

	// Validate the initial sequences arrive as expected
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	// Test backfill clear - sequence numbers go back to standard handling
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
	WriteDirect(db, []string{"ABC", "PBS"}, 4)

	db.changeCache.waitForSequenceWithMissing(4)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 2)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 6)
	assert.True(t, verifyChangesSequences(changes, []uint64{1, 2, 5, 6, 3, 4}))

	WriteDirect(db, []string{"ABC"}, 7)
	WriteDirect(db, []string{"ABC", "NBC"}, 8)
	WriteDirect(db, []string{"ABC", "PBS"}, 9)
	db.changeCache.waitForSequence(9)
	err = appendFromFeed(&changes, feed, 3)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 9)
	assert.True(t, verifyChangesSequences(changes, []uint64{1, 2, 5, 6, 3, 4, 7, 8, 9}))

	close(options.Terminator)
}
Ejemplo n.º 25
0
// Test backfill of late arriving sequences to a continuous changes feed
func TestContinuousChangesBackfill(t *testing.T) {

	base.LogKeys["Sequences"] = true
	base.LogKeys["Cache"] = true
	//base.LogKeys["Changes"] = true
	base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "CBS"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC", "NBC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"CBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Array to read changes from feed to support assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)

	// Validate the initial sequences arrive as expected
	err = appendFromFeed(&changes, feed, 4)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 4)
	assert.DeepEquals(t, changes[0], &ChangeEntry{
		Seq:     SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
		ID:      "doc-1",
		Changes: []ChangeRev{{"rev": "1-a"}}})

	WriteDirect(db, []string{"CBS"}, 3)
	WriteDirect(db, []string{"PBS"}, 12)

	db.changeCache.waitForSequence(12)
	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 2)

	assert.Equals(t, len(changes), 6)
	assert.True(t, verifyChangesFullSequences(changes, []string{
		"1", "2", "2::5", "2::6", "3", "3::12"}))
	// Test multiple backfill in single changes loop iteration
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 4)
	WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 7)
	WriteDirect(db, []string{"ABC", "PBS"}, 8)
	WriteDirect(db, []string{"ABC", "PBS"}, 13)
	db.changeCache.waitForSequence(13)
	time.Sleep(50 * time.Millisecond)

	err = appendFromFeed(&changes, feed, 4)
	assert.Equals(t, len(changes), 10)
	// We can't guarantee how compound sequences will be generated in a multi-core test - will
	// depend on timing of arrival in late sequence logs.  e.g. could come through as any one of
	// the following (where all are valid), depending on timing:
	//  ..."4","7","8","8::13"
	//  ..."4", "6::7", "6::8", "6::13"
	//  ..."3::4", "3::7", "3::8", "3::13"
	// For this reason, we're just verifying that the expected sequences come through (ignoring
	// prefix)
	assert.True(t, verifyChangesSequences(changes, []uint64{
		1, 2, 5, 6, 3, 12, 4, 7, 8, 13}))

	close(options.Terminator)
}
Ejemplo n.º 26
0
// Test low sequence handling of late arriving sequences to a continuous changes feed, when the
// user gets added to a new channel with existing entries (and existing backfill)
func TestLowSequenceHandlingWithAccessGrant(t *testing.T) {

	base.LogKeys["Sequence"] = true
	//base.LogKeys["Changes"] = true
	//base.LogKeys["Changes+"] = true
	db := setupTestDBWithCacheOptions(t, shortWaitCache())
	defer tearDownTestDB(t, db)
	db.ChannelMapper = channels.NewDefaultChannelMapper()

	// Create a user with access to channel ABC
	authenticator := db.Authenticator()
	user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
	authenticator.Save(user)

	// Simulate seq 3 and 4 being delayed - write 1,2,5,6
	WriteDirect(db, []string{"ABC"}, 1)
	WriteDirect(db, []string{"ABC"}, 2)
	WriteDirect(db, []string{"PBS"}, 5)
	WriteDirect(db, []string{"ABC", "PBS"}, 6)

	db.changeCache.waitForSequence(6)
	db.user, _ = authenticator.GetUser("naomi")

	// Start changes feed

	var options ChangesOptions
	options.Since = SequenceID{Seq: 0}
	options.Terminator = make(chan bool)
	options.Continuous = true
	options.Wait = true
	feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
	assert.True(t, err == nil)

	// Go-routine to work the feed channel and write to an array for use by assertions
	var changes = make([]*ChangeEntry, 0, 50)

	time.Sleep(50 * time.Millisecond)

	// Validate the initial sequences arrive as expected
	err = appendFromFeed(&changes, feed, 3)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 3)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6"}))

	db.Bucket.Incr("_sync:seq", 7, 0, 0)
	// Modify user to have access to both channels (sequence 2):
	userInfo, err := db.GetPrincipal("naomi", true)
	assert.True(t, userInfo != nil)
	userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
	_, err = db.UpdatePrincipal(*userInfo, true, true)
	assertNoError(t, err, "UpdatePrincipal failed")

	WriteDirect(db, []string{"PBS"}, 9)

	db.changeCache.waitForSequence(9)

	time.Sleep(50 * time.Millisecond)
	err = appendFromFeed(&changes, feed, 4)
	assert.True(t, err == nil)
	assert.Equals(t, len(changes), 7)
	assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6", "2:8:5", "2:8:6", "2::8", "2::9"}))
	// Notes:
	// 1. 2::8 is the user sequence
	// 2. The duplicate send of sequence '6' is the standard behaviour when a channel is added - we don't know
	// whether the user has already seen the documents on the channel previously, so it gets resent

	close(options.Terminator)
}
Ejemplo n.º 27
0
package db

import (
	"encoding/json"
	"fmt"
	"runtime"
	"sort"
	"strings"
	"testing"

	"github.com/couchbase/sync_gateway/base"
	"github.com/couchbaselabs/go.assert"
)

var testmap = RevTree{"3-three": {ID: "3-three", Parent: "2-two", Body: []byte("{}")},
	"2-two": {ID: "2-two", Parent: "1-one", Channels: base.SetOf("ABC", "CBS")},
	"1-one": {ID: "1-one", Channels: base.SetOf("ABC")}}
var branchymap = RevTree{"3-three": {ID: "3-three", Parent: "2-two"},
	"2-two":  {ID: "2-two", Parent: "1-one"},
	"1-one":  {ID: "1-one"},
	"3-drei": {ID: "3-drei", Parent: "2-two"}}

const testJSON = `{"revs": ["3-three", "2-two", "1-one"], "parents": [1, 2, -1], "bodies": ["{}", "", ""], "channels": [null, ["ABC", "CBS"], ["ABC"]]}`

func testUnmarshal(t *testing.T, jsonString string) RevTree {
	gotmap := RevTree{}
	assertNoError(t, json.Unmarshal([]byte(jsonString), &gotmap), "Couldn't parse RevTree from JSON")
	assert.DeepEquals(t, gotmap, testmap)
	return gotmap
}