// Top-level handler call. It's passed a pointer to the specific method to run. func (h *handler) invoke(method handlerMethod) error { restExpvars.Add("requests_total", 1) restExpvars.Add("requests_active", 1) defer restExpvars.Add("requests_active", -1) var err error if h.server.config.CompressResponses == nil || *h.server.config.CompressResponses { if encoded := NewEncodedResponseWriter(h.response, h.rq); encoded != nil { h.response = encoded defer encoded.Close() } } switch h.rq.Header.Get("Content-Encoding") { case "": h.requestBody = h.rq.Body case "gzip": if h.requestBody, err = gzip.NewReader(h.rq.Body); err != nil { return err } h.rq.Header.Del("Content-Encoding") // to prevent double decoding later on default: return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Unsupported Content-Encoding; use gzip") } h.setHeader("Server", VersionString) // If there is a "db" path variable, look up the database context: var dbContext *db.DatabaseContext if dbname := h.PathVar("db"); dbname != "" { if dbContext, err = h.server.GetDatabase(dbname); err != nil { h.logRequestLine() return err } } // Authenticate, if not on admin port: if h.privs != adminPrivs { if err = h.checkAuth(dbContext); err != nil { h.logRequestLine() return err } } h.logRequestLine() // Now set the request's Database (i.e. context + user) if dbContext != nil { h.db, err = db.GetDatabase(dbContext, h.user) if err != nil { return err } } return method(h) // Call the actual handler code }
func collectAccessRelatedWarnings(config *DbConfig, context *db.DatabaseContext) []string { currentDb, err := db.GetDatabase(context, nil) if err != nil { base.Warn("Could not get database, skipping access related warnings") } numUsersInDb := 0 // If no users defined in config, and no users were returned from the view, add warning. // NOTE: currently ignoring the fact that the config could contain only disabled=true users. if len(config.Users) == 0 { // There are no users in the config, but there might be users in the db. Find out // by querying the "view principals" view which will return users and roles. We only want to // find out if there is at least one user (or role) defined, so set limit == 1 to minimize // performance hit of query. viewOptions := db.Body{ "stale": false, "limit": 1, } vres, err := currentDb.Bucket.View(db.DesignDocSyncGateway, db.ViewPrincipals, viewOptions) if err != nil { base.Warn("Error trying to query ViewPrincipals: %v", err) return []string{} } numUsersInDb = len(vres.Rows) if len(vres.Rows) == 0 { noUsersWarning := fmt.Sprintf("No users have been defined in the '%v' database, which means that you will not be able to get useful data out of the sync gateway over the standard port. FIX: define users in the configuration json or via the REST API on the admin port, and grant users to channels via the admin_channels parameter.", currentDb.Name) return []string{noUsersWarning} } } // If the GUEST user is the *only* user defined, but it is disabled or has no access to channels, add warning guestUser, ok := config.Users[base.GuestUsername] if ok == true { // Do we have any other users? If so, we're done. if len(config.Users) > 1 || numUsersInDb > 1 { return []string{} } if guestUser.Disabled == true || len(guestUser.ExplicitChannels) == 0 { noGuestChannelsWarning := fmt.Sprintf("The GUEST user is the only user defined in the '%v' database, but is either disabled or has no access to any channels. This means that you will not be able to get useful data out of the sync gateway over the standard port. FIX: enable and/or grant access to the GUEST user to channels via the admin_channels parameter.", currentDb.Name) return []string{noGuestChannelsWarning} } } return []string{} }
// Top-level handler call. It's passed a pointer to the specific method to run. func (h *handler) invoke(method handlerMethod) error { base.StatsExpvars.Add("requests_total", 1) base.StatsExpvars.Add("requests_active", 1) defer base.StatsExpvars.Add("requests_active", -1) var err error if h.server.config.CompressResponses == nil || *h.server.config.CompressResponses { if encoded := NewEncodedResponseWriter(h.response, h.rq); encoded != nil { h.response = encoded defer encoded.Close() } } switch h.rq.Header.Get("Content-Encoding") { case "": h.requestBody = h.rq.Body case "gzip": if h.requestBody, err = gzip.NewReader(h.rq.Body); err != nil { return err } h.rq.Header.Del("Content-Encoding") // to prevent double decoding later on default: return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Unsupported Content-Encoding; use gzip") } h.setHeader("Server", VersionString) // If there is a "db" path variable, look up the database context: var dbContext *db.DatabaseContext if dbname := h.PathVar("db"); dbname != "" { if dbContext, err = h.server.GetDatabase(dbname); err != nil { h.logRequestLine() return err } } // If this call is in the context of a DB make sure the DB is in a valid state if dbContext != nil { if !h.runOffline { //get a read lock on the dbContext //When the lock is returned we know that the db state will not be changed by //any other call dbContext.AccessLock.RLock() //defer releasing the dbContext until after the handler method returns defer dbContext.AccessLock.RUnlock() dbState := atomic.LoadUint32(&dbContext.State) //if dbState == db.DBOnline, continue flow and invoke the handler method if dbState == db.DBOffline { //DB is offline, only handlers with runOffline true can run in this state return base.HTTPErrorf(http.StatusServiceUnavailable, "DB is currently under maintenance") } else if dbState != db.DBOnline { //DB is in transition state, no calls will be accepted until it is Online or Offline state return base.HTTPErrorf(http.StatusServiceUnavailable, fmt.Sprintf("DB is %v - try again later", db.RunStateString[dbState])) } } } // Authenticate, if not on admin port: if h.privs != adminPrivs { if err = h.checkAuth(dbContext); err != nil { h.logRequestLine() return err } } h.logRequestLine() // Now set the request's Database (i.e. context + user) if dbContext != nil { h.db, err = db.GetDatabase(dbContext, h.user) if err != nil { return err } } return method(h) // Call the actual handler code }
// Adds a database to the ServerContext. Attempts a read after it gets the write // lock to see if it's already been added by another process. If so, returns either the // existing DatabaseContext or an error based on the useExisting flag. func (sc *ServerContext) _getOrAddDatabaseFromConfig(config *DbConfig, useExisting bool) (*db.DatabaseContext, error) { server := "http://localhost:8091" pool := "default" bucketName := config.Name if config.Server != nil { server = *config.Server } if config.Pool != nil { pool = *config.Pool } if config.Bucket != nil { bucketName = *config.Bucket } dbName := config.Name if dbName == "" { dbName = bucketName } if sc.databases_[dbName] != nil { if useExisting { return sc.databases_[dbName], nil } else { return nil, base.HTTPErrorf(http.StatusPreconditionFailed, // what CouchDB returns "Duplicate database name %q", dbName) } } base.Logf("Opening db /%s as bucket %q, pool %q, server <%s>", dbName, bucketName, pool, server) if err := db.ValidateDatabaseName(dbName); err != nil { return nil, err } var importDocs, autoImport bool switch config.ImportDocs { case nil, false: case true: importDocs = true case "continuous": importDocs = true autoImport = true default: return nil, fmt.Errorf("Unrecognized value for ImportDocs: %#v", config.ImportDocs) } feedType := strings.ToLower(config.FeedType) // Connect to the bucket and add the database: spec := base.BucketSpec{ Server: server, PoolName: pool, BucketName: bucketName, FeedType: feedType, } // If we are using DCPSHARD feed type, set CbgtContext on bucket spec if feedType == strings.ToLower(base.DcpShardFeedType) { spec.CbgtContext = sc.CbgtContext } if config.Username != "" { spec.Auth = config } // Set cache properties, if present cacheOptions := db.CacheOptions{} if config.CacheConfig != nil { if config.CacheConfig.CachePendingSeqMaxNum != nil && *config.CacheConfig.CachePendingSeqMaxNum > 0 { cacheOptions.CachePendingSeqMaxNum = *config.CacheConfig.CachePendingSeqMaxNum } if config.CacheConfig.CachePendingSeqMaxWait != nil && *config.CacheConfig.CachePendingSeqMaxWait > 0 { cacheOptions.CachePendingSeqMaxWait = time.Duration(*config.CacheConfig.CachePendingSeqMaxWait) * time.Millisecond } if config.CacheConfig.CacheSkippedSeqMaxWait != nil && *config.CacheConfig.CacheSkippedSeqMaxWait > 0 { cacheOptions.CacheSkippedSeqMaxWait = time.Duration(*config.CacheConfig.CacheSkippedSeqMaxWait) * time.Millisecond } // set EnableStarChannelLog directly here (instead of via NewDatabaseContext), so that it's set when we create the channels view in ConnectToBucket if config.CacheConfig.EnableStarChannel != nil { db.EnableStarChannelLog = *config.CacheConfig.EnableStarChannel } if config.CacheConfig.ChannelCacheMaxLength != nil && *config.CacheConfig.ChannelCacheMaxLength > 0 { cacheOptions.ChannelCacheMaxLength = *config.CacheConfig.ChannelCacheMaxLength } if config.CacheConfig.ChannelCacheMinLength != nil && *config.CacheConfig.ChannelCacheMinLength > 0 { cacheOptions.ChannelCacheMinLength = *config.CacheConfig.ChannelCacheMinLength } if config.CacheConfig.ChannelCacheAge != nil && *config.CacheConfig.ChannelCacheAge > 0 { cacheOptions.ChannelCacheAge = time.Duration(*config.CacheConfig.ChannelCacheAge) * time.Second } } bucket, err := db.ConnectToBucket(spec, func(bucket string, err error) { base.Warn("Lost TAP feed for bucket %s, with error: %v", bucket, err) if dc := sc.databases_[dbName]; dc != nil { dc.TakeDbOffline("Lost TAP feed") } }) if err != nil { return nil, err } // Channel index definition, if present channelIndexOptions := &db.ChangeIndexOptions{} // TODO: this is confusing! why is it called both a "change index" and a "channel index"? sequenceHashOptions := &db.SequenceHashOptions{} if config.ChannelIndex != nil { indexServer := "http://localhost:8091" indexPool := "default" indexBucketName := "" if config.ChannelIndex.Server != nil { indexServer = *config.ChannelIndex.Server } if config.ChannelIndex.Pool != nil { indexPool = *config.ChannelIndex.Pool } if config.ChannelIndex.Bucket != nil { indexBucketName = *config.ChannelIndex.Bucket } indexSpec := base.BucketSpec{ Server: indexServer, PoolName: indexPool, BucketName: indexBucketName, CouchbaseDriver: base.GoCB, } if config.ChannelIndex.Username != "" { indexSpec.Auth = config.ChannelIndex } if config.ChannelIndex.NumShards != 0 { channelIndexOptions.NumShards = config.ChannelIndex.NumShards } else { channelIndexOptions.NumShards = 64 } channelIndexOptions.ValidateOrPanic() channelIndexOptions.Spec = indexSpec channelIndexOptions.Writer = config.ChannelIndex.IndexWriter // TODO: separate config of hash bucket sequenceHashOptions.Bucket, err = base.GetBucket(indexSpec, nil) if err != nil { base.Logf("Error opening sequence hash bucket %q, pool %q, server <%s>", indexBucketName, indexPool, indexServer) // TODO: revert to local index? return nil, err } sequenceHashOptions.Size = 32 } else { channelIndexOptions = nil } var revCacheSize uint32 if config.RevCacheSize != nil && *config.RevCacheSize > 0 { revCacheSize = *config.RevCacheSize } else { revCacheSize = db.KDefaultRevisionCacheCapacity } contextOptions := db.DatabaseContextOptions{ CacheOptions: &cacheOptions, IndexOptions: channelIndexOptions, SequenceHashOptions: sequenceHashOptions, RevisionCacheCapacity: revCacheSize, AdminInterface: sc.config.AdminInterface, } dbcontext, err := db.NewDatabaseContext(dbName, bucket, autoImport, contextOptions) if err != nil { return nil, err } dbcontext.BucketSpec = spec syncFn := "" if config.Sync != nil { syncFn = *config.Sync } if err := sc.applySyncFunction(dbcontext, syncFn); err != nil { return nil, err } if importDocs { db, _ := db.GetDatabase(dbcontext, nil) if _, err := db.UpdateAllDocChannels(false, true); err != nil { return nil, err } } if config.RevsLimit != nil && *config.RevsLimit > 0 { dbcontext.RevsLimit = *config.RevsLimit } dbcontext.AllowEmptyPassword = config.AllowEmptyPassword if dbcontext.ChannelMapper == nil { base.Logf("Using default sync function 'channel(doc.channels)' for database %q", dbName) } // Create default users & roles: if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil { return nil, err } else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil { return nil, err } emitAccessRelatedWarnings(config, dbcontext) // Install bucket-shadower if any: if shadow := config.Shadow; shadow != nil { if err := sc.startShadowing(dbcontext, shadow); err != nil { base.Warn("Database %q: unable to connect to external bucket for shadowing: %v", dbName, err) } } // Initialize event handlers if err := sc.initEventHandlers(dbcontext, config); err != nil { return nil, err } dbcontext.ExitChanges = make(chan struct{}) // Register it so HTTP handlers can find it: sc.databases_[dbcontext.Name] = dbcontext // Save the config sc.config.Databases[config.Name] = config if config.StartOffline { atomic.StoreUint32(&dbcontext.State, db.DBOffline) if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) { dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "offline", "DB loaded from config", *sc.config.AdminInterface) } } else { atomic.StoreUint32(&dbcontext.State, db.DBOnline) if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) { dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "online", "DB loaded from config", *sc.config.AdminInterface) } } return dbcontext, nil }
// Adds a database to the ServerContext. Attempts a read after it gets the write // lock to see if it's already been added by another process. If so, returns either the // existing DatabaseContext or an error based on the useExisting flag. func (sc *ServerContext) getOrAddDatabaseFromConfig(config *DbConfig, useExisting bool) (*db.DatabaseContext, error) { // Obtain write lock during add database, to avoid race condition when creating based on ConfigServer sc.lock.Lock() defer sc.lock.Unlock() server := "http://localhost:8091" pool := "default" bucketName := config.Name if config.Server != nil { server = *config.Server } if config.Pool != nil { pool = *config.Pool } if config.Bucket != nil { bucketName = *config.Bucket } dbName := config.Name if dbName == "" { dbName = bucketName } if sc.databases_[dbName] != nil { if useExisting { return sc.databases_[dbName], nil } else { return nil, base.HTTPErrorf(http.StatusPreconditionFailed, // what CouchDB returns "Duplicate database name %q", dbName) } } base.Logf("Opening db /%s as bucket %q, pool %q, server <%s>", dbName, bucketName, pool, server) if err := db.ValidateDatabaseName(dbName); err != nil { return nil, err } var importDocs, autoImport bool switch config.ImportDocs { case nil, false: case true: importDocs = true case "continuous": importDocs = true autoImport = true default: return nil, fmt.Errorf("Unrecognized value for ImportDocs: %#v", config.ImportDocs) } feedType := strings.ToLower(config.FeedType) // Connect to the bucket and add the database: spec := base.BucketSpec{ Server: server, PoolName: pool, BucketName: bucketName, FeedType: feedType, } if config.Username != "" { spec.Auth = config } // Set cache properties, if present cacheOptions := db.CacheOptions{} if config.CacheConfig != nil { if config.CacheConfig.CachePendingSeqMaxNum != nil && *config.CacheConfig.CachePendingSeqMaxNum > 0 { cacheOptions.CachePendingSeqMaxNum = *config.CacheConfig.CachePendingSeqMaxNum } if config.CacheConfig.CachePendingSeqMaxWait != nil && *config.CacheConfig.CachePendingSeqMaxWait > 0 { cacheOptions.CachePendingSeqMaxWait = time.Duration(*config.CacheConfig.CachePendingSeqMaxWait) * time.Millisecond } if config.CacheConfig.CacheSkippedSeqMaxWait != nil && *config.CacheConfig.CacheSkippedSeqMaxWait > 0 { cacheOptions.CacheSkippedSeqMaxWait = time.Duration(*config.CacheConfig.CacheSkippedSeqMaxWait) * time.Millisecond } // set EnableStarChannelLog directly here (instead of via NewDatabaseContext), so that it's set when we create the channels view in ConnectToBucket if config.CacheConfig.EnableStarChannel != nil { db.EnableStarChannelLog = *config.CacheConfig.EnableStarChannel } } bucket, err := db.ConnectToBucket(spec) if err != nil { return nil, err } dbcontext, err := db.NewDatabaseContext(dbName, bucket, autoImport, cacheOptions) if err != nil { return nil, err } syncFn := "" if config.Sync != nil { syncFn = *config.Sync } if err := sc.applySyncFunction(dbcontext, syncFn); err != nil { return nil, err } if importDocs { db, _ := db.GetDatabase(dbcontext, nil) if _, err := db.UpdateAllDocChannels(false, true); err != nil { return nil, err } } if config.RevsLimit != nil && *config.RevsLimit > 0 { dbcontext.RevsLimit = *config.RevsLimit } dbcontext.AllowEmptyPassword = config.AllowEmptyPassword if dbcontext.ChannelMapper == nil { base.Logf("Using default sync function 'channel(doc.channels)' for database %q", dbName) } // Create default users & roles: if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil { return nil, err } else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil { return nil, err } // Install bucket-shadower if any: if shadow := config.Shadow; shadow != nil { if err := sc.startShadowing(dbcontext, shadow); err != nil { base.Warn("Database %q: unable to connect to external bucket for shadowing: %v", dbName, err) } } // Initialize event handlers, if any: if config.EventHandlers != nil { // Process document commit event handlers if err = sc.processEventHandlersForEvent(config.EventHandlers.DocumentChanged, db.DocumentChange, dbcontext); err != nil { return nil, err } // WaitForProcess uses string, to support both omitempty and zero values customWaitTime := int64(-1) if config.EventHandlers.WaitForProcess != "" { customWaitTime, err = strconv.ParseInt(config.EventHandlers.WaitForProcess, 10, 0) if err != nil { customWaitTime = -1 base.Warn("Error parsing wait_for_process from config, using default %s", err) } } dbcontext.EventMgr.Start(config.EventHandlers.MaxEventProc, int(customWaitTime)) } // Register it so HTTP handlers can find it: sc.databases_[dbcontext.Name] = dbcontext // Save the config sc.config.Databases[config.Name] = config return dbcontext, nil }