Exemplo n.º 1
0
func TestRevisionCache(t *testing.T) {
	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	revForTest := func(i int) (Body, Body, base.Set) {
		body := Body{
			"_id":  ids[i],
			"_rev": "x",
		}
		history := Body{"start": i}
		return body, history, nil
	}
	verify := func(body Body, history Body, channels base.Set, i int) {
		if body == nil {
			t.Fatalf("nil body at #%d", i)
		}
		assert.True(t, body != nil)
		assert.Equals(t, body["_id"], ids[i])
		assert.True(t, history != nil)
		assert.Equals(t, history["start"], i)
		assert.DeepEquals(t, channels, base.Set(nil))
	}

	cache := NewRevisionCache(10, nil)
	for i := 0; i < 10; i++ {
		body, history, channels := revForTest(i)
		cache.Put(body, history, channels)
	}

	for i := 0; i < 10; i++ {
		body, history, channels, _ := cache.Get(ids[i], "x")
		verify(body, history, channels, i)
	}

	for i := 10; i < 13; i++ {
		body, history, channels := revForTest(i)
		cache.Put(body, history, channels)
	}

	for i := 0; i < 3; i++ {
		body, _, _, _ := cache.Get(ids[i], "x")
		assert.True(t, body == nil)
	}
	for i := 3; i < 13; i++ {
		body, history, channels, _ := cache.Get(ids[i], "x")
		verify(body, history, channels, i)
	}
}
Exemplo n.º 2
0
func TestAllDocs(t *testing.T) {
	AlwaysCompactChangeLog = true // Makes examining the change log deterministic
	defer func() { AlwaysCompactChangeLog = false }()

	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	// Lower the log capacity to 50 to ensure the test will overflow, causing logs to be truncated,
	// so the changes feed will have to backfill from its view.
	oldMaxLogLength := MaxChangeLogLength
	MaxChangeLogLength = 50
	defer func() { MaxChangeLogLength = oldMaxLogLength }()

	base.LogKeys["Changes"] = true
	defer func() { base.LogKeys["Changes"] = false }()

	db.ChannelMapper = channels.NewDefaultChannelMapper()

	ids := make([]IDAndRev, 100)
	for i := 0; i < 100; i++ {
		channels := []string{"all"}
		if i%10 == 0 {
			channels = append(channels, "KFJC")
		}
		body := Body{"serialnumber": i, "channels": channels}
		ids[i].DocID = fmt.Sprintf("alldoc-%02d", i)
		revid, err := db.Put(ids[i].DocID, body)
		ids[i].RevID = revid
		assertNoError(t, err, "Couldn't create document")
	}

	alldocs, err := db.AllDocIDs()
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 100)
	for i, entry := range alldocs {
		assert.DeepEquals(t, entry, ids[i])
	}

	// Now delete one document and try again:
	_, err = db.DeleteDoc(ids[23].DocID, ids[23].RevID)
	assertNoError(t, err, "Couldn't delete doc 23")

	alldocs, err = db.AllDocIDs()
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 99)
	for i, entry := range alldocs {
		j := i
		if i >= 23 {
			j++
		}
		assert.DeepEquals(t, entry, ids[j])
	}

	// Inspect the channel log to confirm that it's only got the last 50 sequences.
	// There are 101 sequences overall, so the 1st one it has should be #52.
	log, err := db.GetChangeLog("all", 0)
	assertNoError(t, err, "GetChangeLog")
	assert.Equals(t, log.Since, uint64(51))
	assert.Equals(t, len(log.Entries), 50)
	assert.Equals(t, int(log.Entries[0].Sequence), 52)

	// Now check the changes feed:
	var options ChangesOptions
	changes, err := db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 100)
	for i, change := range changes {
		seq := i + 1
		if i >= 23 {
			seq++
		}
		assert.Equals(t, change.Seq, fmt.Sprintf("all:%d", seq))
		assert.Equals(t, change.Deleted, i == 99)
		var removed base.Set
		if i == 99 {
			removed = channels.SetOf("all")
		}
		assert.DeepEquals(t, change.Removed, removed)
	}

	options.IncludeDocs = true
	changes, err = db.GetChanges(channels.SetOf("KFJC"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 10)
	for i, change := range changes {
		assert.Equals(t, change.Seq, fmt.Sprintf("KFJC:%d", 10*i+1))
		assert.Equals(t, change.ID, ids[10*i].DocID)
		assert.Equals(t, change.Deleted, false)
		assert.DeepEquals(t, change.Removed, base.Set(nil))
		assert.Equals(t, change.Doc["serialnumber"], int64(10*i))
	}

	// Trying to add the existing log should fail with no error
	added, err := db.AddChangeLog("all", log)
	assertNoError(t, err, "add channel log")
	assert.False(t, added)

	// Delete the channel log to test if it can be rebuilt:
	assertNoError(t, db.Bucket.Delete(channelLogDocID("all")), "delete channel log")

	// Get the changes feed; result should still be correct:
	changes, err = db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 100)

	// Verify it was rebuilt
	log, err = db.GetChangeLog("all", 0)
	assertNoError(t, err, "GetChangeLog")
	assert.Equals(t, len(log.Entries), 50)
	assert.Equals(t, int(log.Entries[0].Sequence), 52)
}
Exemplo n.º 3
0
func TestAllDocs(t *testing.T) {
	// base.LogKeys["Cache"] = true
	// base.LogKeys["Changes"] = true
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	// Lower the log expiration time to zero so no more than 50 items will be kept.
	oldChannelCacheAge := ChannelCacheAge
	ChannelCacheAge = 0
	defer func() { ChannelCacheAge = oldChannelCacheAge }()

	/*
		base.LogKeys["Changes"] = true
		base.LogKeys["Changes+"] = true
		defer func() {
			base.LogKeys["Changes"] = false
			base.LogKeys["Changes+"] = false
		}()
	*/

	db.ChannelMapper = channels.NewDefaultChannelMapper()

	ids := make([]AllDocsEntry, 100)
	for i := 0; i < 100; i++ {
		channels := []string{"all"}
		if i%10 == 0 {
			channels = append(channels, "KFJC")
		}
		body := Body{"serialnumber": i, "channels": channels}
		ids[i].DocID = fmt.Sprintf("alldoc-%02d", i)
		revid, err := db.Put(ids[i].DocID, body)
		ids[i].RevID = revid
		ids[i].Sequence = uint64(i + 1)
		ids[i].Channels = channels
		assertNoError(t, err, "Couldn't create document")
	}

	alldocs, err := allDocIDs(db)
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 100)
	for i, entry := range alldocs {
		assert.True(t, entry.Equal(ids[i]))
	}

	// Now delete one document and try again:
	_, err = db.DeleteDoc(ids[23].DocID, ids[23].RevID)
	assertNoError(t, err, "Couldn't delete doc 23")

	alldocs, err = allDocIDs(db)
	assertNoError(t, err, "AllDocIDs failed")
	assert.Equals(t, len(alldocs), 99)
	for i, entry := range alldocs {
		j := i
		if i >= 23 {
			j++
		}
		assert.True(t, entry.Equal(ids[j]))
	}

	// Inspect the channel log to confirm that it's only got the last 50 sequences.
	// There are 101 sequences overall, so the 1st one it has should be #52.
	db.changeCache.waitForSequence(101)
	log := db.GetChangeLog("all", 0)
	assert.Equals(t, len(log), 50)
	assert.Equals(t, int(log[0].Sequence), 52)

	// Now check the changes feed:
	var options ChangesOptions
	options.Terminator = make(chan bool)
	defer close(options.Terminator)
	changes, err := db.GetChanges(channels.SetOf("all"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 100)
	for i, change := range changes {
		seq := i + 1
		if i >= 23 {
			seq++
		}
		assert.Equals(t, change.Seq, SequenceID{Seq: uint64(seq)})
		assert.Equals(t, change.Deleted, i == 99)
		var removed base.Set
		if i == 99 {
			removed = channels.SetOf("all")
		}
		assert.DeepEquals(t, change.Removed, removed)
	}

	options.IncludeDocs = true
	changes, err = db.GetChanges(channels.SetOf("KFJC"), options)
	assertNoError(t, err, "Couldn't GetChanges")
	assert.Equals(t, len(changes), 10)
	for i, change := range changes {
		assert.Equals(t, change.Seq, SequenceID{Seq: uint64(10*i + 1)})
		assert.Equals(t, change.ID, ids[10*i].DocID)
		assert.Equals(t, change.Deleted, false)
		assert.DeepEquals(t, change.Removed, base.Set(nil))
		assert.Equals(t, change.Doc["serialnumber"], int64(10*i))
	}
}