func TestUnavailableWebhook(t *testing.T) {

	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	// Test unreachable webhook
	em := NewEventManager()
	em.Start(0, -1)
	webhookHandler, _ := NewWebhook("http://badhost:1000/echo", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, channels)
	}

	time.Sleep(50 * time.Millisecond)
}
Example #2
0
func TestEqualsWithUnequalSet(t *testing.T) {
	set1 := TimedSet{"ABC": 17, "CBS": 23, "BBC": 1}
	set2 := base.SetFromArray([]string{"ABC", "BBC"})
	assert.True(t, !set1.Equals(set2))
	set3 := base.SetFromArray([]string{"ABC", "BBC", "CBS", "FOO"})
	assert.True(t, !set1.Equals(set3))

}
Example #3
0
func TestEqualsWithUnequalSet(t *testing.T) {
	set1 := TimedSet{"ABC": NewVbSimpleSequence(17), "CBS": NewVbSimpleSequence(23), "BBC": NewVbSimpleSequence(1)}
	set2 := base.SetFromArray([]string{"ABC", "BBC"})
	assert.True(t, !set1.Equals(set2))
	set3 := base.SetFromArray([]string{"ABC", "BBC", "CBS", "FOO"})
	assert.True(t, !set1.Equals(set3))

}
func TestDocumentChangeEvent(t *testing.T) {

	em := NewEventManager()
	em.Start(0, -1)

	// Setup test data
	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}
	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}
	resultChannel := make(chan Body, 10)
	//Setup test handler
	testHandler := &TestingHandler{HandledEvent: DocumentChange}
	testHandler.SetChannel(resultChannel)
	em.RegisterEventHandler(testHandler, DocumentChange)
	//Raise events
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, "", channels)
	}
	// wait for Event Manager queue worker to process
	time.Sleep(100 * time.Millisecond)

	// Diagnostics for failures
	channelSize := len(resultChannel)
	if channelSize != 10 {
		log.Printf("Expected 10 change events, got %v", channelSize)
		for {
			select {
			case result := <-resultChannel:
				log.Printf("Change event: %v", result)
			default:
				break
			}
		}
	}

	assert.True(t, channelSize == 10)

}
// As long as there is one user in the db, there should be no warnings
func TestCollectAccessWarningsUsersInDb(t *testing.T) {

	sc := NewServerContext(&ServerConfig{})

	dbServer := "walrus:"
	dbConfig := &DbConfig{
		BucketConfig: BucketConfig{Server: &dbServer},
		Name:         "db",
	}
	_, err := sc.AddDatabaseFromConfig(dbConfig)
	if err != nil {
		panic(fmt.Sprintf("Error from AddDatabaseFromConfig: %v", err))
	}
	dbContext := sc.Database("db")

	// create user
	spec := map[string]*db.PrincipalConfig{
		"foo": {
			Disabled:         false,
			ExplicitChannels: base.SetFromArray([]string{"*"}),
		},
	}

	// add a user to the db
	sc.installPrincipals(dbContext, spec, "user")

	warnings := collectAccessRelatedWarnings(dbConfig, dbContext)
	assert.Equals(t, len(warnings), 0)

}
// Test sending many events with slow-running execution to validate they get dropped after hitting
// the max concurrent goroutines
func TestSlowExecutionProcessing(t *testing.T) {

	em := NewEventManager()
	em.Start(0, -1)

	var logKeys = map[string]bool{
		"Events": true,
	}

	base.UpdateLogKeys(logKeys, true)

	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	resultChannel := make(chan Body, 100)
	testHandler := &TestingHandler{HandledEvent: DocumentChange, handleDelay: 500}
	testHandler.SetChannel(resultChannel)
	em.RegisterEventHandler(testHandler, DocumentChange)

	for i := 0; i < 20; i++ {
		body, channels := eventForTest(i % 10)
		em.RaiseDocumentChangeEvent(body, "", channels)
	}
	// wait for Event Manager queue worker to process
	time.Sleep(2 * time.Second)
	fmt.Println("resultChannel:", len(resultChannel))

	assert.True(t, len(resultChannel) == 20)

}
Example #7
0
func (c *changeCache) _allChannels() base.Set {
	array := make([]string, len(c.channelCaches))
	i := 0
	for name := range c.channelCaches {
		array[i] = name
		i++
	}
	return base.SetFromArray(array)
}
func TestUnhandledEvent(t *testing.T) {

	em := NewEventManager()
	em.Start(0, -1)

	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	resultChannel := make(chan Body, 10)

	// create handler for UserAdd events
	testHandler := &TestingHandler{HandledEvent: UserAdd}
	testHandler.SetChannel(resultChannel)
	em.RegisterEventHandler(testHandler, UserAdd)

	// send DocumentChange events to handler
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	// Wait for Event Manager queue worker to process
	time.Sleep(50 * time.Millisecond)

	// Validate that no events were handled
	assert.True(t, len(resultChannel) == 0)

}
Example #9
0
// Converts a TimedSet to a Set
func (set TimedSet) AsSet() base.Set {
	if set == nil {
		return nil
	}
	result := make([]string, 0, len(set))
	for ch, _ := range set {
		result = append(result, ch)
	}
	return base.SetFromArray(result)
}
Example #10
0
// Adds an entry to the appropriate channels' caches, returning the affected channels.  lateSequence
// flag indicates whether it was a change arriving out of sequence
func (c *changeCache) _addToCache(change *LogEntry) base.Set {
	if change.Sequence >= c.nextSequence {
		c.nextSequence = change.Sequence + 1
	}
	if change.DocID == "" {
		return nil // this was a placeholder for an unused sequence
	}
	addedTo := make([]string, 0, 4)
	ch := change.Channels
	change.Channels = nil // not needed anymore, so free some memory

	// If it's a late sequence, we want to add to all channel late queues within a single write lock,
	// to avoid a changes feed seeing the same late sequence in different iteration loops (and sending
	// twice)
	func() {
		if change.Skipped {
			c.lateSeqLock.Lock()
			base.LogTo("Sequences", "Acquired late sequence lock for %d", change.Sequence)
			defer c.lateSeqLock.Unlock()
		}

		for channelName, removal := range ch {
			if removal == nil || removal.Seq == change.Sequence {
				channelCache := c._getChannelCache(channelName)
				channelCache.addToCache(change, removal != nil)
				addedTo = append(addedTo, channelName)
				if change.Skipped {
					channelCache.AddLateSequence(change)
				}
			}
		}

		if EnableStarChannelLog {
			channelCache := c._getChannelCache(channels.UserStarChannel)
			channelCache.addToCache(change, false)
			addedTo = append(addedTo, channels.UserStarChannel)
			if change.Skipped {
				channelCache.AddLateSequence(change)
			}
		}
	}()

	// Record a histogram of the overall lag from the time the doc was saved:
	lag := time.Since(change.TimeSaved)
	lagMs := int(lag/(100*time.Millisecond)) * 100
	changeCacheExpvars.Add(fmt.Sprintf("lag-total-%04dms", lagMs), 1)
	// ...and from the time the doc was received from Tap:
	lag = time.Since(change.TimeReceived)
	lagMs = int(lag/(100*time.Millisecond)) * 100
	changeCacheExpvars.Add(fmt.Sprintf("lag-queue-%04dms", lagMs), 1)

	return base.SetFromArray(addedTo)
}
func TestCustomHandler(t *testing.T) {

	em := NewEventManager()
	em.Start(0, -1)

	ids := make([]string, 20)
	for i := 0; i < 20; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	resultChannel := make(chan Body, 20)

	testHandler := &TestingHandler{HandledEvent: DocumentChange}
	testHandler.SetChannel(resultChannel)
	em.RegisterEventHandler(testHandler, DocumentChange)

	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, "", channels)
	}
	// wait for Event Manager queue worker to process
	time.Sleep(50 * time.Millisecond)

	assert.True(t, len(resultChannel) == 10)

}
Example #12
0
func (user *userImpl) UnmarshalJSON(data []byte) error {
	if err := json.Unmarshal(data, &user.userImplBody); err != nil {
		return err
	} else if err := json.Unmarshal(data, &user.roleImpl); err != nil {
		return err
	}

	// Migrate "admin_roles" field:
	if user.OldExplicitRoles_ != nil {
		user.ExplicitRoles_ = ch.AtSequence(base.SetFromArray(user.OldExplicitRoles_), 1)
		user.OldExplicitRoles_ = nil
	}

	return nil
}
Example #13
0
// Creates a new Set from an array of strings. Returns an error if any names are invalid.
func SetFromArray(names []string, mode StarMode) (base.Set, error) {
	for _, name := range names {
		if !IsValidChannel(name) {
			return nil, illegalChannelError(name)
		}
	}
	result := base.SetFromArray(names)
	switch mode {
	case RemoveStar:
		result = result.Removing(UserStarChannel)
	case ExpandStar:
		if result.Contains(UserStarChannel) {
			result = base.SetOf(UserStarChannel)
		}
	}
	return result, nil
}
Example #14
0
// Given a document ID and a set of revision IDs, looks up which ones are not known. Returns an
// array of the unknown revisions, and an array of known revisions that might be recent ancestors.
func (db *Database) RevDiff(docid string, revids []string) (missing, possible []string) {
	if strings.HasPrefix(docid, "_design/") && db.user != nil {
		return // Users can't upload design docs, so ignore them
	}
	doc, err := db.GetDoc(docid)
	if err != nil {
		if !base.IsDocNotFoundError(err) {
			base.Warn("RevDiff(%q) --> %T %v", docid, err, err)
			// If something goes wrong getting the doc, treat it as though it's nonexistent.
		}
		missing = revids
		return
	}
	// Check each revid to see if it's in the doc's rev tree:
	revtree := doc.History
	revidsSet := base.SetFromArray(revids)
	possibleSet := make(map[string]bool)
	for _, revid := range revids {
		if !revtree.contains(revid) {
			missing = append(missing, revid)
			// Look at the doc's leaves for a known possible ancestor:
			if gen, _ := ParseRevID(revid); gen > 1 {
				revtree.forEachLeaf(func(possible *RevInfo) {
					if !revidsSet.Contains(possible.ID) {
						possibleGen, _ := ParseRevID(possible.ID)
						if possibleGen < gen && possibleGen >= gen-100 {
							possibleSet[possible.ID] = true
						} else if possibleGen == gen && possible.Parent != "" {
							possibleSet[possible.Parent] = true // since parent is < gen
						}
					}
				})
			}
		}
	}

	// Convert possibleSet to an array (possible)
	if len(possibleSet) > 0 {
		possible = make([]string, 0, len(possibleSet))
		for revid, _ := range possibleSet {
			possible = append(possible, revid)
		}
	}
	return
}
// PollPrincipals checks the principal counts, stored in the index, to determine whether there's been
// a change to a user or role that should trigger a notification for that principal.
func (k *kvChangeIndexReader) pollPrincipals() {

	// Principal polling is strictly for notification handling, so skip if no notify function is defined
	if k.onChange == nil {
		return
	}

	k.activePrincipalCountsLock.Lock()
	defer k.activePrincipalCountsLock.Unlock()

	// Check whether ANY principals have been updated since last poll, before doing the work of retrieving individual keys
	overallCount, err := k.indexReadBucket.Incr(base.KTotalPrincipalCountKey, 0, 0, 0)
	if err != nil {
		base.Warn("Principal polling encountered error getting overall count:%v", err)
		return
	}
	if overallCount == k.overallPrincipalCount {
		return
	}
	k.overallPrincipalCount = overallCount

	// There's been a change - check whether any of our active principals have changed
	var changedWaitKeys []string
	for principalID, currentCount := range k.activePrincipalCounts {
		key := fmt.Sprintf(base.KPrincipalCountKeyFormat, principalID)
		newCount, err := k.indexReadBucket.Incr(key, 0, 0, 0)
		if err != nil {
			base.Warn("Principal polling encountered error getting overall count for key %s:%v", key, err)
			continue
		}
		if newCount != currentCount {
			k.activePrincipalCounts[principalID] = newCount
			waitKey := strings.TrimPrefix(key, base.KPrincipalCountKeyPrefix)
			changedWaitKeys = append(changedWaitKeys, waitKey)
		}
	}
	if len(changedWaitKeys) > 0 {
		k.onChange(base.SetFromArray(changedWaitKeys))
	}
}
func WriteDirectWithChannelGrant(db *Database, channelArray []string, sequence uint64, username string, channelGrantArray []string) {

	docId := fmt.Sprintf("doc-%v", sequence)
	rev := "1-a"
	chanMap := make(map[string]*channels.ChannelRemoval, 10)

	for _, channel := range channelArray {
		chanMap[channel] = nil
	}

	accessMap := make(map[string]channels.TimedSet)
	channelTimedSet := channels.AtSequence(base.SetFromArray(channelGrantArray), sequence)
	accessMap[username] = channelTimedSet

	syncData := &syncData{
		CurrentRev: rev,
		Sequence:   sequence,
		Channels:   chanMap,
		Access:     accessMap,
	}
	db.Bucket.Add(docId, 0, Body{"_sync": syncData, "key": docId})
}
// If a guest user and has channels, expect no warnings
func TestCollectAccessWarningsGuestWithChans(t *testing.T) {

	sc := NewServerContext(&ServerConfig{})

	dbServer := "walrus:"
	dbConfig := &DbConfig{
		BucketConfig: BucketConfig{Server: &dbServer},
		Name:         "db",
		Users: map[string]*db.PrincipalConfig{
			base.GuestUsername: {
				Disabled:         false,
				ExplicitChannels: base.SetFromArray([]string{"*"}),
			},
		},
	}
	_, err := sc.AddDatabaseFromConfig(dbConfig)
	if err != nil {
		panic(fmt.Sprintf("Error from AddDatabaseFromConfig: %v", err))
	}
	dbContext := sc.Database("db")
	warnings := collectAccessRelatedWarnings(dbConfig, dbContext)
	assert.Equals(t, len(warnings), 0)

}
/*
 * Test Webhook where there is an old doc revision and where the filter function
 * is expecting an old doc revision
 */
func TestWebhookOldDoc(t *testing.T) {

	if !testLiveHTTP {
		return
	}
	count, sum, _ := InitWebhookTest()
	ids := make([]string, 200)
	for i := 0; i < 200; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	time.Sleep(1 * time.Second)
	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	// Test basic webhook where an old doc is passed but not filtered
	em := NewEventManager()
	em.Start(0, -1)
	webhookHandler, _ := NewWebhook("http://localhost:8081/echo", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		oldBody, _ := eventForTest(-i)
		oldBodyBytes, _ := json.Marshal(oldBody)
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, string(oldBodyBytes), channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 10)

	// Test webhook where an old doc is passed and is not used by the filter
	log.Println("Test filter function with old doc which is not referenced")
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(0, -1)
	filterFunction := `function(doc) {
							if (doc.value < 6) {
								return false;
							} else {
								return true;
							}
							}`
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", filterFunction, nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		oldBody, _ := eventForTest(-i)
		oldBodyBytes, _ := json.Marshal(oldBody)
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, string(oldBodyBytes), channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 4)

	// Test webhook where an old doc is passed and is validated by the filter
	log.Println("Test filter function with old doc")
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(0, -1)
	filterFunction = `function(doc, oldDoc) {
							if (doc.value > 6 && doc.value = -oldDoc.value) {
								return false;
							} else {
								return true;
							}
							}`
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", filterFunction, nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		oldBody, _ := eventForTest(-i)
		oldBodyBytes, _ := json.Marshal(oldBody)
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, string(oldBodyBytes), channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 4)

	// Test webhook where an old doc is not passed but is referenced in the filter function args
	log.Println("Test filter function with old doc")
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(0, -1)
	filterFunction = `function(doc, oldDoc) {
							if (oldDoc) {
								return true;
							} else {
								return false;
							}
							}`
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", filterFunction, nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, "", channels)
	}
	for i := 10; i < 20; i++ {
		body, channels := eventForTest(i)
		oldBody, _ := eventForTest(-i)
		oldBodyBytes, _ := json.Marshal(oldBody)
		em.RaiseDocumentChangeEvent(body, string(oldBodyBytes), channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 10)

}
Example #19
0
// Reads the command line flags and the optional config file.
func ParseCommandLine() {

	siteURL := flag.String("personaOrigin", "", "Base URL that clients use to connect to the server")
	addr := flag.String("interface", DefaultInterface, "Address to bind to")
	authAddr := flag.String("adminInterface", DefaultAdminInterface, "Address to bind admin interface to")
	profAddr := flag.String("profileInterface", "", "Address to bind profile interface to")
	configServer := flag.String("configServer", "", "URL of server that can return database configs")
	deploymentID := flag.String("deploymentID", "", "Customer/project identifier for stats reporting")
	couchbaseURL := flag.String("url", DefaultServer, "Address of Couchbase server")
	poolName := flag.String("pool", DefaultPool, "Name of pool")
	bucketName := flag.String("bucket", "sync_gateway", "Name of bucket")
	dbName := flag.String("dbname", "", "Name of Couchbase Server database (defaults to name of bucket)")
	pretty := flag.Bool("pretty", false, "Pretty-print JSON responses")
	verbose := flag.Bool("verbose", false, "Log more info about requests")
	logKeys := flag.String("log", "", "Log keywords, comma separated")
	logFilePath := flag.String("logFilePath", "", "Path to log file")
	skipRunModeValidation := flag.Bool("skipRunModeValidation", false, "Skip config validation for runmode (accel vs normal sg)")

	flag.Parse()

	if flag.NArg() > 0 {
		// Read the configuration file(s), if any:
		for i := 0; i < flag.NArg(); i++ {
			filename := flag.Arg(i)
			c, err := ReadServerConfig(filename)
			if err != nil {
				base.LogFatal("Error reading config file %s: %v", filename, err)
			}
			if config == nil {
				config = c
			} else {
				if err := config.MergeWith(c); err != nil {
					base.LogFatal("Error reading config file %s: %v", filename, err)
				}
			}
		}

		// Override the config file with global settings from command line flags:
		if *addr != DefaultInterface {
			config.Interface = addr
		}
		if *authAddr != DefaultAdminInterface {
			config.AdminInterface = authAddr
		}
		if *profAddr != "" {
			config.ProfileInterface = profAddr
		}
		if *configServer != "" {
			config.ConfigServer = configServer
		}
		if *deploymentID != "" {
			config.DeploymentID = deploymentID
		}
		if *pretty {
			config.Pretty = *pretty
		}
		if config.Log != nil {
			base.ParseLogFlags(config.Log)
		}

		// If the interfaces were not specified in either the config file or
		// on the command line, set them to the default values
		if config.Interface == nil {
			config.Interface = &DefaultInterface
		}
		if config.AdminInterface == nil {
			config.AdminInterface = &DefaultAdminInterface
		}

		if *logFilePath != "" {
			config.LogFilePath = logFilePath
		}

		if *skipRunModeValidation == true {
			config.SkipRunmodeValidation = *skipRunModeValidation
		}

	} else {
		// If no config file is given, create a default config, filled in from command line flags:
		if *dbName == "" {
			*dbName = *bucketName
		}

		// At this point the addr is either:
		//   - A value provided by the user, in which case we want to leave it as is
		//   - The default value (":4984"), which is actually _not_ the default value we
		//     want for this case, since we are enabling insecure mode.  We want "localhost:4984" instead.
		// See #708 for more details
		if *addr == DefaultInterface {
			*addr = "localhost:4984"
		}

		config = &ServerConfig{
			Interface:        addr,
			AdminInterface:   authAddr,
			ProfileInterface: profAddr,
			Pretty:           *pretty,
			Databases: map[string]*DbConfig{
				*dbName: {
					Name: *dbName,
					BucketConfig: BucketConfig{
						Server: couchbaseURL,
						Bucket: bucketName,
						Pool:   poolName,
					},
					Users: map[string]*db.PrincipalConfig{
						base.GuestUsername: &db.PrincipalConfig{
							Disabled:         false,
							ExplicitChannels: base.SetFromArray([]string{"*"}),
						},
					},
				},
			},
		}
	}

	if *siteURL != "" {
		if config.Persona == nil {
			config.Persona = new(PersonaConfig)
		}
		config.Persona.Origin = *siteURL
	}

	base.EnableLogKey("HTTP")
	if *verbose {
		base.EnableLogKey("HTTP+")
	}
	base.ParseLogFlag(*logKeys)

	//return config
}
Example #20
0
func (e AllDocsEntry) Equal(e2 AllDocsEntry) bool {
	return e.DocID == e2.DocID && e.RevID == e2.RevID && e.Sequence == e2.Sequence &&
		base.SetFromArray(e.Channels).Equals(base.SetFromArray(e2.Channels))
}
Example #21
0
// Updates or creates a principal from a PrincipalConfig structure.
func (dbc *DatabaseContext) UpdatePrincipal(newInfo PrincipalConfig, isUser bool, allowReplace bool) (replaced bool, err error) {
	// Get the existing principal, or if this is a POST make sure there isn't one:
	var princ auth.Principal
	var user auth.User
	authenticator := dbc.Authenticator()
	if isUser {
		isValid, reason := newInfo.IsPasswordValid(dbc.AllowEmptyPassword)
		if !isValid {
			err = base.HTTPErrorf(http.StatusBadRequest, reason)
			return
		}
		user, err = authenticator.GetUser(*newInfo.Name)
		princ = user
	} else {
		princ, err = authenticator.GetRole(*newInfo.Name)
	}
	if err != nil {
		return
	}

	changed := false
	replaced = (princ != nil)
	if !replaced {
		// If user/role didn't exist already, instantiate a new one:
		if isUser {
			user, err = authenticator.NewUser(*newInfo.Name, "", nil)
			princ = user
		} else {
			princ, err = authenticator.NewRole(*newInfo.Name, nil)
		}
		if err != nil {
			return
		}
		changed = true
	} else if !allowReplace {
		err = base.HTTPErrorf(http.StatusConflict, "Already exists")
		return
	}

	updatedChannels := princ.ExplicitChannels()
	if updatedChannels == nil {
		updatedChannels = ch.TimedSet{}
	}
	if !updatedChannels.Equals(newInfo.ExplicitChannels) {
		changed = true
	}

	var updatedRoles ch.TimedSet

	// Then the user-specific fields like roles:
	if isUser {
		if newInfo.Email != user.Email() {
			user.SetEmail(newInfo.Email)
			changed = true
		}
		if newInfo.Password != nil {
			user.SetPassword(*newInfo.Password)
			changed = true
		}
		if newInfo.Disabled != user.Disabled() {
			user.SetDisabled(newInfo.Disabled)
			changed = true
		}

		updatedRoles = user.ExplicitRoles()
		if updatedRoles == nil {
			updatedRoles = ch.TimedSet{}
		}
		if !updatedRoles.Equals(base.SetFromArray(newInfo.ExplicitRoleNames)) {
			changed = true
		}

	}

	// And finally save the Principal:
	if changed {
		// Update the persistent sequence number of this principal (only allocate a sequence when needed - issue #673):
		nextSeq := uint64(0)
		if dbc.writeSequences() {
			var err error
			nextSeq, err = dbc.sequences.nextSequence()
			if err != nil {
				return replaced, err
			}
			princ.SetSequence(nextSeq)
		}

		// Now update the Principal object from the properties in the request, first the channels:
		if updatedChannels.UpdateAtSequence(newInfo.ExplicitChannels, nextSeq) {
			princ.SetExplicitChannels(updatedChannels)
		}

		if isUser {
			if updatedRoles.UpdateAtSequence(base.SetFromArray(newInfo.ExplicitRoleNames), nextSeq) {
				user.SetExplicitRoles(updatedRoles)
			}
		}
		err = authenticator.Save(princ)
	}
	return
}
func TestWebhookTimeout(t *testing.T) {

	if !testLiveHTTP {
		return
	}
	base.LogKeys["Events+"] = true
	count, sum, _ := InitWebhookTest()
	ids := make([]string, 200)
	for i := 0; i < 200; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	time.Sleep(1 * time.Second)
	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	// Test fast execution, short timeout.  All events processed
	log.Println("Test fast webhook, short timeout")
	em := NewEventManager()
	em.Start(0, -1)
	timeout := uint64(2)
	webhookHandler, _ := NewWebhook("http://localhost:8081/echo", "", &timeout)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 10)

	// Test slow webhook, short timeout, numProcess=1, waitForProcess > timeout.  All events should get processed.
	log.Println("Test slow webhook, short timeout")
	*count, *sum = 0, 0.0
	errCount := 0
	em = NewEventManager()
	em.Start(1, 1100)
	timeout = uint64(1)
	webhookHandler, _ = NewWebhook("http://localhost:8081/slow_2s", "", &timeout)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		err := em.RaiseDocumentChangeEvent(body, channels)
		time.Sleep(2 * time.Millisecond)
		if err != nil {
			errCount++
		}
	}
	time.Sleep(15 * time.Second)
	// Even though we timed out waiting for response on the SG side, POST still completed on target side.
	assert.Equals(t, *count, 10)

	// Test slow webhook, short timeout, numProcess=1, waitForProcess << timeout.  Events that don't fit in queues
	// should get dropped (1 immediately processed, 1 in normal queue, 3 in overflow queue, 5 dropped)
	log.Println("Test very slow webhook, short timeout")
	*count, *sum = 0, 0.0
	errCount = 0
	em = NewEventManager()
	em.Start(1, 100)
	timeout = uint64(9)
	webhookHandler, _ = NewWebhook("http://localhost:8081/slow_5s", "", &timeout)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		err := em.RaiseDocumentChangeEvent(body, channels)
		time.Sleep(2 * time.Millisecond)
		if err != nil {
			errCount++
		}
	}
	// wait for slow webhook to finish processing
	time.Sleep(25 * time.Second)
	assert.Equals(t, *count, 5)

	// Test slow webhook, no timeout, numProcess=1, waitForProcess=1s.  All events should complete.
	log.Println("Test slow webhook, no timeout, wait for process ")
	*count, *sum = 0, 0.0
	errCount = 0
	em = NewEventManager()
	em.Start(1, 1100)
	timeout = uint64(0)
	webhookHandler, _ = NewWebhook("http://localhost:8081/slow", "", &timeout)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		err := em.RaiseDocumentChangeEvent(body, channels)
		time.Sleep(2 * time.Millisecond)
		if err != nil {
			errCount++
		}
	}
	// wait for slow webhook to finish processing
	time.Sleep(15 * time.Second)
	assert.Equals(t, *count, 10)

}
func TestWebhookBasic(t *testing.T) {

	if !testLiveHTTP {
		return
	}
	count, sum, payloads := InitWebhookTest()
	ids := make([]string, 200)
	for i := 0; i < 200; i++ {
		ids[i] = fmt.Sprintf("%d", i)
	}

	time.Sleep(1 * time.Second)
	eventForTest := func(i int) (Body, base.Set) {
		testBody := Body{
			"_id":   ids[i],
			"value": i,
		}
		var channelSet base.Set
		if i%2 == 0 {
			channelSet = base.SetFromArray([]string{"Even"})
		} else {
			channelSet = base.SetFromArray([]string{"Odd"})
		}
		return testBody, channelSet
	}

	// Test basic webhook
	em := NewEventManager()
	em.Start(0, -1)
	webhookHandler, _ := NewWebhook("http://localhost:8081/echo", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 10)

	// Test webhook filter function
	log.Println("Test filter function")
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(0, -1)
	filterFunction := `function(doc) {
							if (doc.value < 6) {
								return false;
							} else {
								return true;
							}
							}`
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", filterFunction, nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 10; i++ {
		body, channels := eventForTest(i)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	time.Sleep(50 * time.Millisecond)
	assert.Equals(t, *count, 4)

	// Validate payload
	*count, *sum, *payloads = 0, 0.0, nil
	em = NewEventManager()
	em.Start(0, -1)
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	body, channels := eventForTest(0)
	em.RaiseDocumentChangeEvent(body, channels)
	time.Sleep(50 * time.Millisecond)
	receivedPayload := string((*payloads)[0])
	fmt.Println("payload:", receivedPayload)
	assert.Equals(t, string((*payloads)[0]), `{"_id":"0","value":0}`)
	assert.Equals(t, *count, 1)

	// Test fast fill, fast webhook
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(5, -1)
	timeout := uint64(60)
	webhookHandler, _ = NewWebhook("http://localhost:8081/echo", "", &timeout)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 100; i++ {
		body, channels := eventForTest(i % 10)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	time.Sleep(500 * time.Millisecond)
	assert.Equals(t, *count, 100)

	// Test queue full, slow webhook.  Drops events
	log.Println("Test queue full, slow webhook")
	*count, *sum = 0, 0.0
	errCount := 0
	em = NewEventManager()
	em.Start(5, -1)
	webhookHandler, _ = NewWebhook("http://localhost:8081/slow", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 100; i++ {
		body, channels := eventForTest(i)
		err := em.RaiseDocumentChangeEvent(body, channels)
		time.Sleep(2 * time.Millisecond)
		if err != nil {
			errCount++
		}
	}
	time.Sleep(5 * time.Second)
	// Expect 21 to complete.  5 get goroutines immediately, 15 get queued, and one is blocked waiting
	// for a goroutine.  The rest get discarded because the queue is full.
	assert.Equals(t, *count, 21)
	assert.Equals(t, errCount, 79)

	// Test queue full, slow webhook, long wait time.  Throttles events
	log.Println("Test queue full, slow webhook, long wait")
	*count, *sum = 0, 0.0
	em = NewEventManager()
	em.Start(5, 1100)
	webhookHandler, _ = NewWebhook("http://localhost:8081/slow", "", nil)
	em.RegisterEventHandler(webhookHandler, DocumentChange)
	for i := 0; i < 100; i++ {
		body, channels := eventForTest(i % 10)
		em.RaiseDocumentChangeEvent(body, channels)
	}
	time.Sleep(5 * time.Second)
	assert.Equals(t, *count, 100)

}
func (k *kvChangeIndexReader) pollReaders() bool {
	k.channelIndexReaderLock.Lock()
	defer k.channelIndexReaderLock.Unlock()

	if len(k.channelIndexReaders) == 0 {
		return true
	}

	// Build the set of clock keys to retrieve.  Stable sequence, plus one per channel reader
	keySet := make([]string, len(k.channelIndexReaders))
	index := 0
	for _, reader := range k.channelIndexReaders {
		keySet[index] = GetChannelClockKey(reader.channelName)
		index++
	}
	bulkGetResults, err := k.indexReadBucket.GetBulkRaw(keySet)

	if err != nil {
		base.Warn("Error retrieving channel clocks: %v", err)
	}
	IndexExpvars.Add("bulkGet_channelClocks", 1)
	IndexExpvars.Add("bulkGet_channelClocks_keyCount", int64(len(keySet)))
	changedChannels := make(chan string, len(k.channelIndexReaders))
	cancelledChannels := make(chan string, len(k.channelIndexReaders))

	var wg sync.WaitGroup
	for _, reader := range k.channelIndexReaders {
		// For each channel, unmarshal new channel clock, then check with reader whether this represents changes
		wg.Add(1)
		go func(reader *KvChannelIndex, wg *sync.WaitGroup) {
			defer func() {
				wg.Done()
			}()
			// Unmarshal channel clock.  If not present in the bulk get results, use empty clock to support
			// channels that don't have any indexed data yet.  If clock was previously found successfully (i.e. empty clock is
			// due to temporary error from server), empty clock treated safely as a non-update by pollForChanges.
			clockKey := GetChannelClockKey(reader.channelName)
			var newChannelClock *base.SequenceClockImpl
			clockBytes, found := bulkGetResults[clockKey]
			if !found {
				newChannelClock = base.NewSequenceClockImpl()
			} else {
				var err error
				newChannelClock, err = base.NewSequenceClockForBytes(clockBytes)
				if err != nil {
					base.Warn("Error unmarshalling channel clock - skipping polling for channel %s: %v", reader.channelName, err)
					return
				}
			}

			// Poll for changes
			hasChanges, cancelPolling := reader.pollForChanges(k.readerStableSequence.AsClock(), newChannelClock)
			if hasChanges {
				changedChannels <- reader.channelName
			}
			if cancelPolling {
				cancelledChannels <- reader.channelName
			}

		}(reader, &wg)
	}

	wg.Wait()
	close(changedChannels)
	close(cancelledChannels)

	// Build channel set from the changed channels
	var channels []string
	for channelName := range changedChannels {
		channels = append(channels, channelName)
	}

	if len(channels) > 0 && k.onChange != nil {
		k.onChange(base.SetFromArray(channels))
	}

	// Remove cancelled channels from channel readers
	for channelName := range cancelledChannels {
		IndexExpvars.Add("pollingChannels_active", -1)
		delete(k.channelIndexReaders, channelName)
	}

	return true
}
Example #25
0
/*
 * Generate the changes for a specific list of doc ID's, only documents accesible to the user will generate
 * results
 */
func (h *handler) sendChangesForDocIds(userChannels base.Set, explicitDocIds []string, options db.ChangesOptions) (error, bool) {

	// Subroutine that creates a response row for a document:
	first := true
	var lastSeq uint64 = 0
	//rowMap := make(map[uint64]*changesRow)
	rowMap := make(map[uint64]*db.ChangeEntry)

	createRow := func(doc db.IDAndRev) *db.ChangeEntry {
		row := &db.ChangeEntry{ID: doc.DocID}

		// Fetch the document body and other metadata that lives with it:
		populatedDoc, body, err := h.db.GetDocAndActiveRev(doc.DocID)
		if err != nil {
			base.LogTo("Changes", "Unable to get changes for docID %v, caused by %v", doc.DocID, err)
			return nil
		}

		if populatedDoc.Sequence <= options.Since.Seq {
			return nil
		}

		if body == nil {
			return nil
		}

		changes := make([]db.ChangeRev, 1)
		changes[0] = db.ChangeRev{"rev": body["_rev"].(string)}
		row.Changes = changes
		row.Seq = db.SequenceID{Seq: populatedDoc.Sequence}
		row.SetBranched((populatedDoc.Flags & ch.Branched) != 0)

		var removedChannels []string

		if deleted, _ := body["_deleted"].(bool); deleted {
			row.Deleted = true
		}

		userCanSeeDocChannel := false

		if h.user == nil || h.user.Channels().Contains(ch.UserStarChannel) {
			userCanSeeDocChannel = true
		} else if len(populatedDoc.Channels) > 0 {
			//Do special _removed/_deleted processing
			for channel, removal := range populatedDoc.Channels {
				//Doc is tagged with channel or was removed at a sequence later that since sequence
				if removal == nil || removal.Seq > options.Since.Seq {
					//if the current user has access to this channel
					if h.user.CanSeeChannel(channel) {
						userCanSeeDocChannel = true
						//If the doc has been removed
						if removal != nil {
							removedChannels = append(removedChannels, channel)
							if removal.Deleted {
								row.Deleted = true
							}
						}
					}
				}
			}
		}

		if !userCanSeeDocChannel {
			return nil
		}

		row.Removed = base.SetFromArray(removedChannels)
		if options.IncludeDocs || options.Conflicts {
			h.db.AddDocInstanceToChangeEntry(row, populatedDoc, options)
		}

		return row
	}

	h.setHeader("Content-Type", "application/json")
	h.setHeader("Cache-Control", "private, max-age=0, no-cache, no-store")
	h.response.Write([]byte("{\"results\":[\r\n"))

	var keys base.Uint64Slice

	for _, docID := range explicitDocIds {
		row := createRow(db.IDAndRev{DocID: docID, RevID: "", Sequence: 0})
		if row != nil {
			rowMap[row.Seq.Seq] = row
			keys = append(keys, row.Seq.Seq)
		}
	}

	//Write out rows sorted by sequenceID
	keys.Sort()
	for _, k := range keys {
		if first {
			first = false
		} else {
			h.response.Write([]byte(","))
		}
		h.addJSON(rowMap[k])

		lastSeq = k

		if options.Limit > 0 {
			options.Limit--
			if options.Limit == 0 {
				break
			}
		}
	}

	s := fmt.Sprintf("],\n\"last_seq\":%d}\n", lastSeq)
	h.response.Write([]byte(s))
	h.logStatus(http.StatusOK, "OK")
	return nil, false
}