Ejemplo n.º 1
0
// POST /_persona creates a browserID-based login session and sets its cookie.
// It's API-compatible with the CouchDB plugin: <https://github.com/iriscouch/browserid_couchdb/>
func (h *handler) handlePersonaPOST() error {
	if len(h.rq.Header["Origin"]) > 0 {
		// CORS not allowed for login #115
		return base.HTTPErrorf(http.StatusBadRequest, "No CORS")
	}
	var params struct {
		Assertion string `json:"assertion"`
	}
	err := h.readJSONInto(&params)
	if err != nil {
		return err
	}

	origin := h.server.config.Persona.Origin
	if origin == "" {
		base.Warn("Can't accept Persona logins: Server URL not configured")
		return base.HTTPErrorf(http.StatusInternalServerError, "Server url not configured")
	}

	// OK, now verify it:
	base.Logf("Persona: Verifying assertion %q for %q", params.Assertion, origin)
	verifiedInfo, err := VerifyPersona(params.Assertion, origin)
	if err != nil {
		base.Logf("Persona: Failed verify: %v", err)
		return err
	}
	base.Logf("Persona: Logged in %q!", verifiedInfo.Email)

	createUserIfNeeded := h.server.config.Persona.Register
	return h.makeSessionFromEmail(verifiedInfo.Email, createUserIfNeeded)

}
Ejemplo n.º 2
0
// Starts and runs the server given its configuration. (This function never returns.)
func RunServer(config *ServerConfig) {
	PrettyPrint = config.Pretty

	base.Logf("==== %s ====", LongVersionString)

	if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 {
		cpus := runtime.NumCPU()
		if cpus > 1 {
			runtime.GOMAXPROCS(cpus)
			base.Logf("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus)
		}
	}

	SetMaxFileDescriptors(config.MaxFileDescriptors)

	sc := NewServerContext(config)
	for _, dbConfig := range config.Databases {
		if _, err := sc.AddDatabaseFromConfig(dbConfig); err != nil {
			base.LogFatal("Error opening database: %v", err)
		}
	}

	if config.ProfileInterface != nil {
		//runtime.MemProfileRate = 10 * 1024
		base.Logf("Starting profile server on %s", *config.ProfileInterface)
		go func() {
			http.ListenAndServe(*config.ProfileInterface, nil)
		}()
	}

	base.Logf("Starting admin server on %s", *config.AdminInterface)
	go config.Serve(*config.AdminInterface, CreateAdminHandler(sc))
	base.Logf("Starting server on %s ...", *config.Interface)
	config.Serve(*config.Interface, CreatePublicHandler(sc))
}
Ejemplo n.º 3
0
func (sc *ServerContext) InitCBGT() error {

	if !sc.HasIndexWriters() {
		base.Logf("Skipping CBGT initialization since no databases are configured as index writers")
		return nil
	}

	cbgtContext, err := sc.InitCBGTManager()
	if err != nil {
		return err
	}
	sc.CbgtContext = cbgtContext

	// loop through all databases and init CBGT index
	for _, dbContext := range sc.databases_ {
		dbContext.BucketSpec.CbgtContext = sc.CbgtContext
		if err := dbContext.CreateCBGTIndex(); err != nil {
			return err
		}
	}

	// Since we've created new indexes via the dbContext.InitCBGT() calls above,
	// we need to kick the manager to recognize them.
	cbgtContext.Manager.Kick("NewIndexesCreated")

	// Make sure that we don't have multiple partitions subscribed to the same DCP shard
	// for a given bucket.
	if err := sc.validateCBGTPartitionMap(); err != nil {
		return err
	}

	return nil

}
Ejemplo n.º 4
0
func TestImport(t *testing.T) {
	db := setupTestDB(t)
	defer tearDownTestDB(t, db)

	// Add docs to the underlying bucket:
	for i := 1; i <= 20; i++ {
		db.Bucket.Add(fmt.Sprintf("alreadyHere%d", i), 0, Body{"key1": i, "key2": "hi"})
	}

	// Make sure they aren't visible thru the gateway:
	doc, err := db.GetDoc("alreadyHere1")
	assert.Equals(t, doc, (*document)(nil))
	assert.Equals(t, err.(*base.HTTPError).Status, 404)

	// Import them:
	count, err := db.UpdateAllDocChannels(false, true)
	assertNoError(t, err, "ApplySyncFun")
	assert.Equals(t, count, 20)

	// Now they're visible:
	doc, err = db.GetDoc("alreadyHere1")
	base.Logf("doc = %+v", doc)
	assert.True(t, doc != nil)
	assertNoError(t, err, "can't get doc")
}
Ejemplo n.º 5
0
// FOR TESTS ONLY: Blocks until the given sequence has been received.
func (c *changeCache) waitForSequenceWithMissing(sequence uint64) {
	var i int
	for i = 0; i < 20; i++ {
		c.lock.RLock()
		nextSequence := c.nextSequence
		c.lock.RUnlock()
		if nextSequence >= sequence+1 {
			foundInMissing := false
			c.skippedSeqLock.RLock()
			for _, skippedSeq := range c.skippedSeqs {
				if skippedSeq.seq == sequence {
					foundInMissing = true
					break
				}
			}
			c.skippedSeqLock.RUnlock()
			if !foundInMissing {
				base.Logf("waitForSequence(%d) took %d ms", sequence, i*100)
				return
			}
		}
		time.Sleep(100 * time.Millisecond)
	}
	panic(fmt.Sprintf("changeCache: Sequence %d never showed up!", sequence))
}
Ejemplo n.º 6
0
// HTTP handler for a POST to _bulk_docs
func (h *handler) handleBulkDocs() error {
	body, err := h.readJSON()
	if err != nil {
		return err
	}
	newEdits, ok := body["new_edits"].(bool)
	if !ok {
		newEdits = true
	}

	docs, ok := body["docs"].([]interface{})
	if !ok {
		err = base.HTTPErrorf(http.StatusBadRequest, "missing 'docs' property")
		return err
	}
	h.db.ReserveSequences(uint64(len(docs)))

	result := make([]db.Body, 0, len(docs))
	for _, item := range docs {
		doc := item.(map[string]interface{})
		docid, _ := doc["_id"].(string)
		var err error
		var revid string
		if newEdits {
			if docid != "" {
				revid, err = h.db.Put(docid, doc)
			} else {
				docid, revid, err = h.db.Post(doc)
			}
		} else {
			revisions := db.ParseRevisions(doc)
			if revisions == nil {
				err = base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions")
			} else {
				revid = revisions[0]
				err = h.db.PutExistingRev(docid, doc, revisions)
			}
		}

		status := db.Body{}
		if docid != "" {
			status["id"] = docid
		}
		if err != nil {
			code, msg := base.ErrorAsHTTPStatus(err)
			status["status"] = code
			status["error"] = base.CouchHTTPErrorName(code)
			status["reason"] = msg
			base.Logf("\tBulkDocs: Doc %q --> %d %s (%v)", docid, code, msg, err)
			err = nil // wrote it to output already; not going to return it
		} else {
			status["rev"] = revid
		}
		result = append(result, status)
	}

	h.writeJSONStatus(http.StatusCreated, result)
	return nil
}
Ejemplo n.º 7
0
func (sc *ServerContext) applySyncFunction(dbcontext *db.DatabaseContext, syncFn string) error {
	changed, err := dbcontext.UpdateSyncFun(syncFn)
	if err != nil || !changed {
		return err
	}
	// Sync function has changed:
	base.Logf("**NOTE:** %q's sync function has changed. The new function may assign different channels to documents, or permissions to users. You may want to re-sync the database to update these.", dbcontext.Name)
	return nil
}
func (k *kvChangeIndexWriter) Init(context *DatabaseContext, options *CacheOptions, indexOptions *ChangeIndexOptions, indexPartitionsCallback IndexPartitionsFunc) (err error) {

	k.context = context
	k.pending = make(chan *LogEntry, maxCacheUpdate)

	k.indexPartitionsCallback = indexPartitionsCallback

	// start process to work pending sequences
	go func() {
		err := k.indexPending()
		if err != nil {
			base.LogFatal("Indexer failed with unrecoverable error:%v", err)
		}

	}()

	k.channelIndexWriters = make(map[string]*kvChannelIndex)
	k.indexWriteBucket, err = base.GetBucket(indexOptions.Spec, nil)
	if err != nil {
		base.Logf("Error opening index bucket %q, pool %q, server <%s>",
			indexOptions.Spec.BucketName, indexOptions.Spec.PoolName, indexOptions.Spec.Server)
		// TODO: revert to local index?
		return err
	}
	cbBucket, ok := k.indexWriteBucket.(base.CouchbaseBucket)
	var maxVbNo uint16
	if ok {
		maxVbNo, _ = cbBucket.GetMaxVbno()
	} else {
		// walrus, for unit testing
		maxVbNo = 1024
	}

	// Set of worker goroutines used to process incoming entries
	k.unmarshalWorkQueue = make(chan *unmarshalEntry, 500)

	// Start fixed set of goroutines to work the unmarshal work queue
	for i := 0; i < maxUnmarshalProcesses; i++ {
		go func() {
			for {
				select {
				case unmarshalEntry := <-k.unmarshalWorkQueue:
					unmarshalEntry.process()
				case <-k.terminator:
					return
				}
			}
		}()
	}

	// Initialize unmarshalWorkers
	k.unmarshalWorkers = make([]*unmarshalWorker, maxVbNo)

	return nil
}
func (k *kvChangeIndexReader) Init(options *CacheOptions, indexOptions *ChangeIndexOptions, onChange func(base.Set), indexPartitionsCallback IndexPartitionsFunc) (err error) {

	k.channelIndexReaders = make(map[string]*kvChannelIndex)
	k.indexPartitionsCallback = indexPartitionsCallback

	// Initialize notification Callback
	k.onChange = onChange

	k.indexReadBucket, err = base.GetBucket(indexOptions.Spec, nil)
	if err != nil {
		base.Logf("Error opening index bucket %q, pool %q, server <%s>",
			indexOptions.Spec.BucketName, indexOptions.Spec.PoolName, indexOptions.Spec.Server)
		// TODO: revert to local index?
		return err
	}

	cbBucket, ok := k.indexReadBucket.(base.CouchbaseBucket)
	if ok {
		k.maxVbNo, _ = cbBucket.GetMaxVbno()
	} else {
		// walrus, for unit testing
		k.maxVbNo = 1024
	}

	// Start background task to poll for changes
	k.terminator = make(chan struct{})
	k.pollingActive = make(chan struct{})
	go func(k *kvChangeIndexReader) {
		defer close(k.pollingActive)
		pollStart := time.Now()
		for {
			timeSinceLastPoll := time.Since(pollStart)
			waitTime := (kPollFrequency * time.Millisecond) - timeSinceLastPoll
			if waitTime < 0 {
				waitTime = 0 * time.Millisecond
			}
			select {
			case <-k.terminator:
				return
			case <-time.After(waitTime):
				// TODO: Doesn't trigger the reader removal processing (in pollReaders) during long
				//       periods without changes to stableSequence.  In that scenario we'll continue
				//       stable sequence polling each poll interval, even if we *actually* don't have any
				//       active readers.
				pollStart = time.Now()
				if k.hasActiveReaders() && k.stableSequenceChanged() {
					k.pollReaders()
				}
			}

		}
	}(k)

	return nil
}
Ejemplo n.º 10
0
// ADMIN API to turn Go CPU profiling on/off
func (h *handler) handleProfiling() error {
	profileName := h.PathVar("name")
	var params struct {
		File string `json:"file"`
	}
	body, err := h.readBody()
	if err != nil {
		return err
	}
	if len(body) > 0 {
		if err = json.Unmarshal(body, &params); err != nil {
			return err
		}
	}

	if params.File != "" {
		f, err := os.Create(params.File)
		if err != nil {
			return err
		}
		if profileName != "" {
			defer f.Close()
			if profile := pprof.Lookup(profileName); profile != nil {
				profile.WriteTo(f, 0)
				base.Logf("Wrote %s profile to %s", profileName, params.File)
			} else {
				return base.HTTPErrorf(http.StatusNotFound, "No such profile %q", profileName)
			}
		} else {
			base.Logf("Starting CPU profile to %s ...", params.File)
			pprof.StartCPUProfile(f)
		}
	} else {
		if profileName != "" {
			return base.HTTPErrorf(http.StatusBadRequest, "Missing JSON 'file' parameter")
		} else {
			base.Log("...ending CPU profile.")
			pprof.StopCPUProfile()
		}
	}
	return nil
}
Ejemplo n.º 11
0
// Queries the 'channels' view to get a range of sequences of a single channel as LogEntries.
func (dbc *DatabaseContext) getChangesInChannelFromView(
	channelName string, endSeq uint64, options ChangesOptions) (LogEntries, error) {
	if dbc.Bucket == nil {
		return nil, errors.New("No bucket available for channel view query")
	}
	start := time.Now()
	// Query the view:
	optMap := changesViewOptions(channelName, endSeq, options)
	base.LogTo("Cache", "  Querying 'channels' view for %q (start=#%d, end=#%d, limit=%d)", channelName, options.Since.SafeSequence()+1, endSeq, options.Limit)
	vres := channelsViewResult{}
	err := dbc.Bucket.ViewCustom(DesignDocSyncGateway, ViewChannels, optMap, &vres)
	if err != nil {
		base.Logf("Error from 'channels' view: %v", err)
		return nil, err
	} else if len(vres.Rows) == 0 {
		base.LogTo("Cache", "    Got no rows from view for %q", channelName)
		return nil, nil
	}

	// Convert the output to LogEntries:
	entries := make(LogEntries, 0, len(vres.Rows))
	for _, row := range vres.Rows {
		entry := &LogEntry{
			Sequence:     uint64(row.Key[1].(float64)),
			DocID:        row.ID,
			RevID:        row.Value.Rev,
			Flags:        row.Value.Flags,
			TimeReceived: time.Now(),
		}
		// base.LogTo("Cache", "  Got view sequence #%d (%q / %q)", entry.Sequence, entry.DocID, entry.RevID)
		entries = append(entries, entry)
	}

	base.LogTo("Cache", "    Got %d rows from view for %q: #%d ... #%d",
		len(entries), channelName, entries[0].Sequence, entries[len(entries)-1].Sequence)
	if elapsed := time.Since(start); elapsed > 200*time.Millisecond {
		base.Logf("changes_view: Query took %v to return %d rows, options = %#v",
			elapsed, len(entries), optMap)
	}
	changeCacheExpvars.Add("view_queries", 1)
	return entries, nil
}
Ejemplo n.º 12
0
func (sc *ServerContext) _removeDatabase(dbName string) bool {

	context := sc.databases_[dbName]
	if context == nil {
		return false
	}
	base.Logf("Closing db /%s (bucket %q)", context.Name, context.Bucket.GetName())
	context.Close()
	delete(sc.databases_, dbName)
	return true
}
Ejemplo n.º 13
0
// Simple Sync Gateway launcher tool.
func main() {

	signalchannel := make(chan os.Signal, 1)
	signal.Notify(signalchannel, syscall.SIGHUP)

	go func() {
		for _ = range signalchannel {
			base.Logf("SIGHUP: Reloading Config....\n")
			rest.ReloadConf()
		}
	}()

	rest.ServerMain()
}
Ejemplo n.º 14
0
func (c *changeCache) waitForSequence(sequence uint64) {
	var i int
	for i = 0; i < 20; i++ {
		c.lock.RLock()
		nextSequence := c.nextSequence
		c.lock.RUnlock()
		if nextSequence >= sequence+1 {
			base.Logf("waitForSequence(%d) took %d ms", sequence, i*100)
			return
		}
		time.Sleep(100 * time.Millisecond)
	}
	panic(fmt.Sprintf("changeCache: Sequence %d never showed up!", sequence))
}
Ejemplo n.º 15
0
// Calls the JS sync function to assign the doc to channels, grant users
// access to channels, and reject invalid documents.
func (db *Database) getChannelsAndAccess(doc *document, body Body, revID string) (result base.Set, access channels.AccessMap, roles channels.AccessMap, err error) {
	base.LogTo("CRUD+", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"])

	// Get the parent revision, to pass to the sync function:
	var oldJsonBytes []byte
	if oldJsonBytes, err = db.getAncestorJSON(doc, revID); err != nil {
		return
	}
	oldJson := string(oldJsonBytes)

	if db.ChannelMapper != nil {
		// Call the ChannelMapper:
		var output *channels.ChannelMapperOutput
		output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson,
			makeUserCtx(db.user))
		if err == nil {
			result = output.Channels
			if !doc.hasFlag(channels.Deleted) { // deleted docs can't grant access
				access = output.Access
				roles = output.Roles
			}
			err = output.Rejection
			if err != nil {
				base.Logf("Sync fn rejected: new=%+v  old=%s --> %s", body, oldJson, err)
			} else if !validateAccessMap(access) || !validateRoleAccessMap(roles) {
				err = base.HTTPErrorf(500, "Error in JS sync function")
			}

		} else {
			base.Warn("Sync fn exception: %+v; doc = %s", err, body)
			err = base.HTTPErrorf(500, "Exception in JS sync function")
		}

	} else {
		// No ChannelMapper so by default use the "channels" property:
		value, _ := body["channels"].([]interface{})
		if value != nil {
			array := make([]string, 0, len(value))
			for _, channel := range value {
				channelStr, ok := channel.(string)
				if ok && len(channelStr) > 0 {
					array = append(array, channelStr)
				}
			}
			result, err = channels.SetFromArray(array, channels.KeepStar)
		}
	}
	return
}
Ejemplo n.º 16
0
func (sc *ServerContext) startShadowing(dbcontext *db.DatabaseContext, shadow *ShadowConfig) error {

	base.Warn("Bucket Shadowing feature comes with a number of limitations and caveats. See https://github.com/couchbase/sync_gateway/issues/1363 for more details.")

	var pattern *regexp.Regexp
	if shadow.Doc_id_regex != nil {
		var err error
		pattern, err = regexp.Compile(*shadow.Doc_id_regex)
		if err != nil {
			base.Warn("Invalid shadow doc_id_regex: %s", *shadow.Doc_id_regex)
			return err
		}
	}

	spec := base.BucketSpec{
		Server:     *shadow.Server,
		PoolName:   "default",
		BucketName: *shadow.Bucket,
		FeedType:   shadow.FeedType,
	}
	if shadow.Pool != nil {
		spec.PoolName = *shadow.Pool
	}
	if shadow.Username != "" {
		spec.Auth = shadow
	}

	bucket, err := base.GetBucket(spec, nil)

	if err != nil {
		err = base.HTTPErrorf(http.StatusBadGateway,
			"Unable to connect to shadow bucket: %s", err)
		return err
	}
	shadower, err := db.NewShadower(dbcontext, bucket, pattern)
	if err != nil {
		bucket.Close()
		return err
	}
	dbcontext.Shadower = shadower

	//Remove credentials from server URL before logging
	url, err := couchbase.ParseURL(spec.Server)
	if err == nil {
		base.Logf("Database %q shadowing remote bucket %q, pool %q, server <%s:%s/%s>", dbcontext.Name, spec.BucketName, spec.PoolName, url.Scheme, url.Host, url.Path)
	}
	return nil
}
Ejemplo n.º 17
0
// POST a report of database statistics
func (sc *ServerContext) reportStats() {
	if sc.config.DeploymentID == nil {
		panic("Can't reportStats without DeploymentID")
	}
	stats := sc.Stats()
	if stats == nil {
		return // No activity
	}
	base.Logf("Reporting server stats to %s ...", kStatsReportURL)
	body, _ := json.Marshal(stats)
	bodyReader := bytes.NewReader(body)
	_, err := sc.HTTPClient.Post(kStatsReportURL, "application/json", bodyReader)
	if err != nil {
		base.Warn("Error posting stats: %v", err)
	}
}
Ejemplo n.º 18
0
func (sc *ServerContext) startStatsReporter() {
	interval := kStatsReportInterval
	if sc.config.StatsReportInterval != nil {
		if *sc.config.StatsReportInterval <= 0 {
			return
		}
		interval = time.Duration(*sc.config.StatsReportInterval) * time.Second
	}
	sc.statsTicker = time.NewTicker(interval)
	go func() {
		for _ = range sc.statsTicker.C {
			sc.reportStats()
		}
	}()
	base.Logf("Will report server stats for %q every %v",
		*sc.config.DeploymentID, interval)
}
Ejemplo n.º 19
0
func (sc *ServerContext) startShadowing(dbcontext *db.DatabaseContext, shadow *ShadowConfig) error {
	var pattern *regexp.Regexp
	if shadow.Doc_id_regex != nil {
		var err error
		pattern, err = regexp.Compile(*shadow.Doc_id_regex)
		if err != nil {
			base.Warn("Invalid shadow doc_id_regex: %s", *shadow.Doc_id_regex)
			return err
		}
	}

	spec := base.BucketSpec{
		Server:     *shadow.Server,
		PoolName:   "default",
		BucketName: shadow.Bucket,
		FeedType:   shadow.FeedType,
	}
	if shadow.Pool != nil {
		spec.PoolName = *shadow.Pool
	}
	if shadow.Username != "" {
		spec.Auth = shadow
	}

	bucket, err := db.ConnectToBucket(spec)
	if err != nil {
		return err
	}
	shadower, err := db.NewShadower(dbcontext, bucket, pattern)
	if err != nil {
		bucket.Close()
		return err
	}
	dbcontext.Shadower = shadower

	//Remove credentials from server URL before logging
	url, err := couchbase.ParseURL(spec.Server)
	if err == nil {
		base.Logf("Database %q shadowing remote bucket %q, pool %q, server <%s:%s/%s>", dbcontext.Name, spec.BucketName, spec.PoolName, url.Scheme, url.Host, url.Path)
	}
	return nil
}
Ejemplo n.º 20
0
// Calls the JS sync function to assign the doc to channels, grant users
// access to channels, and reject invalid documents.
func (db *Database) getChannelsAndAccess(doc *document, body Body, revID string) (result base.Set, access channels.AccessMap, roles channels.AccessMap, oldJson string, err error) {
	base.LogTo("CRUD+", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"])

	// Get the parent revision, to pass to the sync function:
	var oldJsonBytes []byte
	if oldJsonBytes, err = db.getAncestorJSON(doc, revID); err != nil {
		return
	}
	oldJson = string(oldJsonBytes)

	if db.ChannelMapper != nil {
		// Call the ChannelMapper:
		var output *channels.ChannelMapperOutput
		output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson,
			makeUserCtx(db.user))
		if err == nil {
			result = output.Channels
			access = output.Access
			roles = output.Roles
			err = output.Rejection
			if err != nil {
				base.Logf("Sync fn rejected: new=%+v  old=%s --> %s", body, oldJson, err)
			} else if !validateAccessMap(access) || !validateRoleAccessMap(roles) {
				err = base.HTTPErrorf(500, "Error in JS sync function")
			}

		} else {
			base.Warn("Sync fn exception: %+v; doc = %s", err, body)
			err = base.HTTPErrorf(500, "Exception in JS sync function")
		}

	} else {
		// No ChannelMapper so by default use the "channels" property:
		value := body["channels"]
		if value != nil {
			array := base.ValueToStringArray(value)
			result, err = channels.SetFromArray(array, channels.KeepStar)
		}
	}
	return
}
Ejemplo n.º 21
0
// Deletes old revisions that have been moved to individual docs
func (db *Database) Compact() (int, error) {
	opts := Body{"stale": false, "reduce": false}
	vres, err := db.Bucket.View(DesignDocSyncHousekeeping, ViewOldRevs, opts)
	if err != nil {
		base.Warn("old_revs view returned %v", err)
		return 0, err
	}

	//FIX: Is there a way to do this in one operation?
	base.Logf("Compacting away %d old revs of %q ...", len(vres.Rows), db.Name)
	count := 0
	for _, row := range vres.Rows {
		base.LogTo("CRUD", "\tDeleting %q", row.ID)
		if err := db.Bucket.Delete(row.ID); err != nil {
			base.Warn("Error deleting %q: %v", row.ID, err)
		} else {
			count++
		}
	}
	return count, nil
}
Ejemplo n.º 22
0
// ADMIN API to dump Go heap profile
func (h *handler) handleHeapProfiling() error {
	var params struct {
		File string `json:"file"`
	}
	body, err := h.readBody()
	if err != nil {
		return err
	}
	if err = json.Unmarshal(body, &params); err != nil {
		return err
	}

	base.Logf("Dumping heap profile to %s ...", params.File)
	f, err := os.Create(params.File)
	if err != nil {
		return err
	}
	pprof.WriteHeapProfile(f)
	f.Close()
	return nil
}
Ejemplo n.º 23
0
func ValidateConfigOrPanic(runMode SyncGatewayRunMode) {

	// if the user passes -skipRunModeValidation on the command line, then skip validation
	if config.SkipRunmodeValidation == true {
		base.Logf("Skipping runmode (accel vs normal) config validation")
		return
	}

	switch runMode {
	case SyncGatewayRunModeNormal:
		// if index writer == true for any databases, panic
		if config.HasAnyIndexWriterConfiguredDatabases() {
			base.LogPanic("SG is running in normal mode but there are databases configured as index writers")
		}
	case SyncGatewayRunModeAccel:
		// if index writer != true for any databases, panic
		if config.HasAnyIndexReaderConfiguredDatabases() {
			base.LogPanic("SG is running in sg-accelerator mode but there are databases configured as index readers")
		}
	}

}
Ejemplo n.º 24
0
// Deletes all documents in the database
func (db *Database) DeleteAllDocs(docType string) error {
	opts := Body{"stale": false}
	if docType != "" {
		opts["startkey"] = "_sync:" + docType + ":"
		opts["endkey"] = "_sync:" + docType + "~"
		opts["inclusive_end"] = false
	}
	vres, err := db.Bucket.View(DesignDocSyncHousekeeping, ViewAllBits, opts)
	if err != nil {
		base.Warn("all_bits view returned %v", err)
		return err
	}

	//FIX: Is there a way to do this in one operation?
	base.Logf("Deleting %d %q documents of %q ...", len(vres.Rows), docType, db.Name)
	for _, row := range vres.Rows {
		base.LogTo("CRUD", "\tDeleting %q", row.ID)
		if err := db.Bucket.Delete(row.ID); err != nil {
			base.Warn("Error deleting %q: %v", row.ID, err)
		}
	}
	return nil
}
Ejemplo n.º 25
0
func (sc *ServerContext) installPrincipals(context *db.DatabaseContext, spec map[string]*db.PrincipalConfig, what string) error {
	for name, princ := range spec {
		isGuest := name == base.GuestUsername
		if isGuest {
			internalName := ""
			princ.Name = &internalName
		} else {
			princ.Name = &name
		}
		_, err := context.UpdatePrincipal(*princ, (what == "user"), isGuest)
		if err != nil {
			// A conflict error just means updatePrincipal didn't overwrite an existing user.
			if status, _ := base.ErrorAsHTTPStatus(err); status != http.StatusConflict {
				return fmt.Errorf("Couldn't create %s %q: %v", what, name, err)
			}
		} else if isGuest {
			base.Log("    Reset guest user to config")
		} else {
			base.Logf("    Created %s %q", what, name)
		}
	}
	return nil
}
Ejemplo n.º 26
0
func (h *handler) checkAuth(context *db.DatabaseContext) error {
	h.user = nil
	if context == nil {
		return nil
	}

	// Check basic auth first
	if userName, password := h.getBasicAuth(); userName != "" {
		h.user = context.Authenticator().AuthenticateUser(userName, password)
		if h.user == nil {
			base.Logf("HTTP auth failed for username=%q", userName)
			h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`)
			return base.HTTPErrorf(http.StatusUnauthorized, "Invalid login")
		}
		return nil
	}

	// Check cookie
	var err error
	h.user, err = context.Authenticator().AuthenticateCookie(h.rq, h.response)
	if err != nil {
		return err
	} else if h.user != nil {
		return nil
	}

	// No auth given -- check guest access
	if h.user, err = context.Authenticator().GetUser(""); err != nil {
		return err
	}
	if h.privs == regularPrivs && h.user.Disabled() {
		h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`)
		return base.HTTPErrorf(http.StatusUnauthorized, "Login required")
	}

	return nil
}
Ejemplo n.º 27
0
// Returns the DatabaseContext with the given name
func (sc *ServerContext) GetDatabase(name string) (*db.DatabaseContext, error) {
	sc.lock.RLock()
	dbc := sc.databases_[name]
	sc.lock.RUnlock()
	if dbc != nil {
		return dbc, nil
	} else if db.ValidateDatabaseName(name) != nil {
		return nil, base.HTTPErrorf(http.StatusBadRequest, "invalid database name %q", name)
	} else if sc.config.ConfigServer == nil {
		return nil, base.HTTPErrorf(http.StatusNotFound, "no such database %q", name)
	} else {
		// Let's ask the config server if it knows this database:
		base.Logf("Asking config server %q about db %q...", *sc.config.ConfigServer, name)
		config, err := sc.getDbConfigFromServer(name)
		if err != nil {
			return nil, err
		}
		if dbc, err = sc.getOrAddDatabaseFromConfig(config, true); err != nil {
			return nil, err
		}
	}
	return dbc, nil

}
Ejemplo n.º 28
0
// Adds a database to the ServerContext.  Attempts a read after it gets the write
// lock to see if it's already been added by another process. If so, returns either the
// existing DatabaseContext or an error based on the useExisting flag.
func (sc *ServerContext) _getOrAddDatabaseFromConfig(config *DbConfig, useExisting bool) (*db.DatabaseContext, error) {

	server := "http://localhost:8091"
	pool := "default"
	bucketName := config.Name

	if config.Server != nil {
		server = *config.Server
	}
	if config.Pool != nil {
		pool = *config.Pool
	}
	if config.Bucket != nil {
		bucketName = *config.Bucket
	}
	dbName := config.Name
	if dbName == "" {
		dbName = bucketName
	}

	if sc.databases_[dbName] != nil {
		if useExisting {
			return sc.databases_[dbName], nil
		} else {
			return nil, base.HTTPErrorf(http.StatusPreconditionFailed, // what CouchDB returns
				"Duplicate database name %q", dbName)
		}
	}

	base.Logf("Opening db /%s as bucket %q, pool %q, server <%s>",
		dbName, bucketName, pool, server)

	if err := db.ValidateDatabaseName(dbName); err != nil {
		return nil, err
	}

	var importDocs, autoImport bool
	switch config.ImportDocs {
	case nil, false:
	case true:
		importDocs = true
	case "continuous":
		importDocs = true
		autoImport = true
	default:
		return nil, fmt.Errorf("Unrecognized value for ImportDocs: %#v", config.ImportDocs)
	}

	feedType := strings.ToLower(config.FeedType)

	// Connect to the bucket and add the database:
	spec := base.BucketSpec{
		Server:     server,
		PoolName:   pool,
		BucketName: bucketName,
		FeedType:   feedType,
	}

	// If we are using DCPSHARD feed type, set CbgtContext on bucket spec
	if feedType == strings.ToLower(base.DcpShardFeedType) {
		spec.CbgtContext = sc.CbgtContext
	}

	if config.Username != "" {
		spec.Auth = config
	}

	// Set cache properties, if present
	cacheOptions := db.CacheOptions{}
	if config.CacheConfig != nil {
		if config.CacheConfig.CachePendingSeqMaxNum != nil && *config.CacheConfig.CachePendingSeqMaxNum > 0 {
			cacheOptions.CachePendingSeqMaxNum = *config.CacheConfig.CachePendingSeqMaxNum
		}
		if config.CacheConfig.CachePendingSeqMaxWait != nil && *config.CacheConfig.CachePendingSeqMaxWait > 0 {
			cacheOptions.CachePendingSeqMaxWait = time.Duration(*config.CacheConfig.CachePendingSeqMaxWait) * time.Millisecond
		}
		if config.CacheConfig.CacheSkippedSeqMaxWait != nil && *config.CacheConfig.CacheSkippedSeqMaxWait > 0 {
			cacheOptions.CacheSkippedSeqMaxWait = time.Duration(*config.CacheConfig.CacheSkippedSeqMaxWait) * time.Millisecond
		}
		// set EnableStarChannelLog directly here (instead of via NewDatabaseContext), so that it's set when we create the channels view in ConnectToBucket
		if config.CacheConfig.EnableStarChannel != nil {
			db.EnableStarChannelLog = *config.CacheConfig.EnableStarChannel
		}

		if config.CacheConfig.ChannelCacheMaxLength != nil && *config.CacheConfig.ChannelCacheMaxLength > 0 {
			cacheOptions.ChannelCacheMaxLength = *config.CacheConfig.ChannelCacheMaxLength
		}
		if config.CacheConfig.ChannelCacheMinLength != nil && *config.CacheConfig.ChannelCacheMinLength > 0 {
			cacheOptions.ChannelCacheMinLength = *config.CacheConfig.ChannelCacheMinLength
		}
		if config.CacheConfig.ChannelCacheAge != nil && *config.CacheConfig.ChannelCacheAge > 0 {
			cacheOptions.ChannelCacheAge = time.Duration(*config.CacheConfig.ChannelCacheAge) * time.Second
		}

	}

	bucket, err := db.ConnectToBucket(spec, func(bucket string, err error) {
		base.Warn("Lost TAP feed for bucket %s, with error: %v", bucket, err)

		if dc := sc.databases_[dbName]; dc != nil {
			dc.TakeDbOffline("Lost TAP feed")
		}
	})

	if err != nil {
		return nil, err
	}

	// Channel index definition, if present
	channelIndexOptions := &db.ChangeIndexOptions{} // TODO: this is confusing!  why is it called both a "change index" and a "channel index"?
	sequenceHashOptions := &db.SequenceHashOptions{}
	if config.ChannelIndex != nil {
		indexServer := "http://localhost:8091"
		indexPool := "default"
		indexBucketName := ""

		if config.ChannelIndex.Server != nil {
			indexServer = *config.ChannelIndex.Server
		}
		if config.ChannelIndex.Pool != nil {
			indexPool = *config.ChannelIndex.Pool
		}
		if config.ChannelIndex.Bucket != nil {
			indexBucketName = *config.ChannelIndex.Bucket
		}
		indexSpec := base.BucketSpec{
			Server:          indexServer,
			PoolName:        indexPool,
			BucketName:      indexBucketName,
			CouchbaseDriver: base.GoCB,
		}
		if config.ChannelIndex.Username != "" {
			indexSpec.Auth = config.ChannelIndex
		}

		if config.ChannelIndex.NumShards != 0 {
			channelIndexOptions.NumShards = config.ChannelIndex.NumShards
		} else {
			channelIndexOptions.NumShards = 64
		}

		channelIndexOptions.ValidateOrPanic()

		channelIndexOptions.Spec = indexSpec
		channelIndexOptions.Writer = config.ChannelIndex.IndexWriter

		// TODO: separate config of hash bucket
		sequenceHashOptions.Bucket, err = base.GetBucket(indexSpec, nil)
		if err != nil {
			base.Logf("Error opening sequence hash bucket %q, pool %q, server <%s>",
				indexBucketName, indexPool, indexServer)
			// TODO: revert to local index?
			return nil, err
		}
		sequenceHashOptions.Size = 32
	} else {
		channelIndexOptions = nil
	}

	var revCacheSize uint32
	if config.RevCacheSize != nil && *config.RevCacheSize > 0 {
		revCacheSize = *config.RevCacheSize
	} else {
		revCacheSize = db.KDefaultRevisionCacheCapacity
	}

	contextOptions := db.DatabaseContextOptions{
		CacheOptions:          &cacheOptions,
		IndexOptions:          channelIndexOptions,
		SequenceHashOptions:   sequenceHashOptions,
		RevisionCacheCapacity: revCacheSize,
		AdminInterface:        sc.config.AdminInterface,
	}

	dbcontext, err := db.NewDatabaseContext(dbName, bucket, autoImport, contextOptions)
	if err != nil {
		return nil, err
	}
	dbcontext.BucketSpec = spec

	syncFn := ""
	if config.Sync != nil {
		syncFn = *config.Sync
	}
	if err := sc.applySyncFunction(dbcontext, syncFn); err != nil {
		return nil, err
	}

	if importDocs {
		db, _ := db.GetDatabase(dbcontext, nil)
		if _, err := db.UpdateAllDocChannels(false, true); err != nil {
			return nil, err
		}
	}

	if config.RevsLimit != nil && *config.RevsLimit > 0 {
		dbcontext.RevsLimit = *config.RevsLimit
	}

	dbcontext.AllowEmptyPassword = config.AllowEmptyPassword

	if dbcontext.ChannelMapper == nil {
		base.Logf("Using default sync function 'channel(doc.channels)' for database %q", dbName)
	}

	// Create default users & roles:
	if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil {
		return nil, err
	} else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil {
		return nil, err
	}

	emitAccessRelatedWarnings(config, dbcontext)

	// Install bucket-shadower if any:
	if shadow := config.Shadow; shadow != nil {
		if err := sc.startShadowing(dbcontext, shadow); err != nil {
			base.Warn("Database %q: unable to connect to external bucket for shadowing: %v",
				dbName, err)
		}
	}

	// Initialize event handlers
	if err := sc.initEventHandlers(dbcontext, config); err != nil {
		return nil, err
	}

	dbcontext.ExitChanges = make(chan struct{})

	// Register it so HTTP handlers can find it:
	sc.databases_[dbcontext.Name] = dbcontext

	// Save the config
	sc.config.Databases[config.Name] = config

	if config.StartOffline {
		atomic.StoreUint32(&dbcontext.State, db.DBOffline)
		if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) {
			dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "offline", "DB loaded from config", *sc.config.AdminInterface)
		}
	} else {
		atomic.StoreUint32(&dbcontext.State, db.DBOnline)
		if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) {
			dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "online", "DB loaded from config", *sc.config.AdminInterface)
		}
	}

	return dbcontext, nil
}
Ejemplo n.º 29
0
func (h *handler) checkAuth(context *db.DatabaseContext) error {
	h.user = nil
	if context == nil {
		return nil
	}

	var err error
	// If oidc enabled, check for bearer ID token
	if context.Options.OIDCOptions != nil {
		if token := h.getBearerToken(); token != "" {
			h.user, _, err = context.Authenticator().AuthenticateUntrustedJWT(token, context.OIDCProviders, h.getOIDCCallbackURL)
			if h.user == nil || err != nil {
				return base.HTTPErrorf(http.StatusUnauthorized, "Invalid login")
			}
			return nil
		}

		/*
		* If unsupported/oidc testing is enabled
		* and this is a call on the token endpoint
		* and the username and password match those in the oidc default provider config
		* then authorize this request
		 */
		if unsupportedOptions := context.Options.UnsupportedOptions; unsupportedOptions != nil {
			if unsupportedOptions.OidcTestProvider.Enabled && strings.HasSuffix(h.rq.URL.Path, "/_oidc_testing/token") {
				if username, password := h.getBasicAuth(); username != "" && password != "" {
					provider := context.Options.OIDCOptions.Providers.GetProviderForIssuer(issuerUrlForDB(h, context.Name), testProviderAudiences)
					if provider != nil && provider.ClientID != nil && provider.ValidationKey != nil {
						if *provider.ClientID == username && *provider.ValidationKey == password {
							return nil
						}
					}
				}
			}
		}
	}

	// Check basic auth first
	if userName, password := h.getBasicAuth(); userName != "" {
		h.user = context.Authenticator().AuthenticateUser(userName, password)
		if h.user == nil {
			base.Logf("HTTP auth failed for username=%q", userName)
			h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`)
			return base.HTTPErrorf(http.StatusUnauthorized, "Invalid login")
		}
		return nil
	}

	// Check cookie
	h.user, err = context.Authenticator().AuthenticateCookie(h.rq, h.response)
	if err != nil {
		return err
	} else if h.user != nil {
		return nil
	}

	// No auth given -- check guest access
	if h.user, err = context.Authenticator().GetUser(""); err != nil {
		return err
	}
	if h.privs == regularPrivs && h.user.Disabled() {
		h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`)
		return base.HTTPErrorf(http.StatusUnauthorized, "Login required")
	}

	return nil
}
Ejemplo n.º 30
0
// HTTP handler for a POST to _bulk_docs
func (h *handler) handleBulkDocs() error {
	body, err := h.readJSON()
	if err != nil {
		return err
	}
	newEdits, ok := body["new_edits"].(bool)
	if !ok {
		newEdits = true
	}

	userDocs, ok := body["docs"].([]interface{})
	if !ok {
		err = base.HTTPErrorf(http.StatusBadRequest, "missing 'docs' property")
		return err
	}
	lenDocs := len(userDocs)
	// split out local docs, save them on their own
	localDocs := make([]interface{}, 0, lenDocs)
	docs := make([]interface{}, 0, lenDocs)
	for _, item := range userDocs {
		doc := item.(map[string]interface{})
		docid, _ := doc["_id"].(string)
		if strings.HasPrefix(docid, "_local/") {
			localDocs = append(localDocs, doc)
		} else {
			docs = append(docs, doc)
		}
	}

	h.db.ReserveSequences(uint64(len(docs)))

	result := make([]db.Body, 0, len(docs))
	for _, item := range docs {
		doc := item.(map[string]interface{})
		docid, _ := doc["_id"].(string)
		var err error
		var revid string
		if newEdits {
			if docid != "" {
				revid, err = h.db.Put(docid, doc)
			} else {
				docid, revid, err = h.db.Post(doc)
			}
		} else {
			revisions := db.ParseRevisions(doc)
			if revisions == nil {
				err = base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions")
			} else {
				revid = revisions[0]
				err = h.db.PutExistingRev(docid, doc, revisions)
			}
		}

		status := db.Body{}
		if docid != "" {
			status["id"] = docid
		}
		if err != nil {
			code, msg := base.ErrorAsHTTPStatus(err)
			status["status"] = code
			status["error"] = base.CouchHTTPErrorName(code)
			status["reason"] = msg
			base.Logf("\tBulkDocs: Doc %q --> %d %s (%v)", docid, code, msg, err)
			err = nil // wrote it to output already; not going to return it
		} else {
			status["rev"] = revid
		}
		result = append(result, status)
	}

	for _, item := range localDocs {
		doc := item.(map[string]interface{})
		for k, v := range doc {
			doc[k] = base.FixJSONNumbers(v)
		}
		var err error
		var revid string
		offset := len("_local/")
		docid, _ := doc["_id"].(string)
		idslug := docid[offset:]
		revid, err = h.db.PutSpecial("local", idslug, doc)
		status := db.Body{}
		status["id"] = docid
		if err != nil {
			code, msg := base.ErrorAsHTTPStatus(err)
			status["status"] = code
			status["error"] = base.CouchHTTPErrorName(code)
			status["reason"] = msg
			base.Logf("\tBulkDocs: Local Doc %q --> %d %s (%v)", docid, code, msg, err)
			err = nil
		} else {
			status["rev"] = revid
		}
		result = append(result, status)
	}

	h.writeJSONStatus(http.StatusCreated, result)
	return nil
}