// Helper function to open a Couchbase connection and return a specific bucket. func ConnectToBucket(spec base.BucketSpec) (bucket base.Bucket, err error) { bucket, err = base.GetBucket(spec) if err != nil { err = base.HTTPErrorf(http.StatusBadGateway, "Unable to connect to server: %s", err) } else { err = installViews(bucket) } return }
// Helper function to open a Couchbase connection and return a specific bucket. func ConnectToBucket(spec base.BucketSpec, callback func(bucket string, err error)) (bucket base.Bucket, err error) { bucket, err = base.GetBucket(spec, callback) if err != nil { err = base.HTTPErrorf(http.StatusBadGateway, " Unable to connect to Couchbase Server (connection refused). Please ensure it is running and reachable at the configured host and port. Detailed error: %s", err) } else { err = installViews(bucket) } return }
func (k *kvChangeIndexWriter) Init(context *DatabaseContext, options *CacheOptions, indexOptions *ChangeIndexOptions, indexPartitionsCallback IndexPartitionsFunc) (err error) { k.context = context k.pending = make(chan *LogEntry, maxCacheUpdate) k.indexPartitionsCallback = indexPartitionsCallback // start process to work pending sequences go func() { err := k.indexPending() if err != nil { base.LogFatal("Indexer failed with unrecoverable error:%v", err) } }() k.channelIndexWriters = make(map[string]*kvChannelIndex) k.indexWriteBucket, err = base.GetBucket(indexOptions.Spec, nil) if err != nil { base.Logf("Error opening index bucket %q, pool %q, server <%s>", indexOptions.Spec.BucketName, indexOptions.Spec.PoolName, indexOptions.Spec.Server) // TODO: revert to local index? return err } cbBucket, ok := k.indexWriteBucket.(base.CouchbaseBucket) var maxVbNo uint16 if ok { maxVbNo, _ = cbBucket.GetMaxVbno() } else { // walrus, for unit testing maxVbNo = 1024 } // Set of worker goroutines used to process incoming entries k.unmarshalWorkQueue = make(chan *unmarshalEntry, 500) // Start fixed set of goroutines to work the unmarshal work queue for i := 0; i < maxUnmarshalProcesses; i++ { go func() { for { select { case unmarshalEntry := <-k.unmarshalWorkQueue: unmarshalEntry.process() case <-k.terminator: return } } }() } // Initialize unmarshalWorkers k.unmarshalWorkers = make([]*unmarshalWorker, maxVbNo) return nil }
func (k *kvChangeIndexReader) Init(options *CacheOptions, indexOptions *ChangeIndexOptions, onChange func(base.Set), indexPartitionsCallback IndexPartitionsFunc) (err error) { k.channelIndexReaders = make(map[string]*kvChannelIndex) k.indexPartitionsCallback = indexPartitionsCallback // Initialize notification Callback k.onChange = onChange k.indexReadBucket, err = base.GetBucket(indexOptions.Spec, nil) if err != nil { base.Logf("Error opening index bucket %q, pool %q, server <%s>", indexOptions.Spec.BucketName, indexOptions.Spec.PoolName, indexOptions.Spec.Server) // TODO: revert to local index? return err } cbBucket, ok := k.indexReadBucket.(base.CouchbaseBucket) if ok { k.maxVbNo, _ = cbBucket.GetMaxVbno() } else { // walrus, for unit testing k.maxVbNo = 1024 } // Start background task to poll for changes k.terminator = make(chan struct{}) k.pollingActive = make(chan struct{}) go func(k *kvChangeIndexReader) { defer close(k.pollingActive) pollStart := time.Now() for { timeSinceLastPoll := time.Since(pollStart) waitTime := (kPollFrequency * time.Millisecond) - timeSinceLastPoll if waitTime < 0 { waitTime = 0 * time.Millisecond } select { case <-k.terminator: return case <-time.After(waitTime): // TODO: Doesn't trigger the reader removal processing (in pollReaders) during long // periods without changes to stableSequence. In that scenario we'll continue // stable sequence polling each poll interval, even if we *actually* don't have any // active readers. pollStart = time.Now() if k.hasActiveReaders() && k.stableSequenceChanged() { k.pollReaders() } } } }(k) return nil }
func init() { var err error gTestBucket, err = base.GetBucket(base.BucketSpec{ Server: kTestURL, BucketName: "sync_gateway_tests"}, nil) if err != nil { log.Fatalf("Couldn't connect to bucket: %v", err) } if err != nil { log.Fatalf("Couldn't install design doc: %v", err) } }
func (sc *ServerContext) startShadowing(dbcontext *db.DatabaseContext, shadow *ShadowConfig) error { base.Warn("Bucket Shadowing feature comes with a number of limitations and caveats. See https://github.com/couchbase/sync_gateway/issues/1363 for more details.") var pattern *regexp.Regexp if shadow.Doc_id_regex != nil { var err error pattern, err = regexp.Compile(*shadow.Doc_id_regex) if err != nil { base.Warn("Invalid shadow doc_id_regex: %s", *shadow.Doc_id_regex) return err } } spec := base.BucketSpec{ Server: *shadow.Server, PoolName: "default", BucketName: *shadow.Bucket, FeedType: shadow.FeedType, } if shadow.Pool != nil { spec.PoolName = *shadow.Pool } if shadow.Username != "" { spec.Auth = shadow } bucket, err := base.GetBucket(spec, nil) if err != nil { err = base.HTTPErrorf(http.StatusBadGateway, "Unable to connect to shadow bucket: %s", err) return err } shadower, err := db.NewShadower(dbcontext, bucket, pattern) if err != nil { bucket.Close() return err } dbcontext.Shadower = shadower //Remove credentials from server URL before logging url, err := couchbase.ParseURL(spec.Server) if err == nil { base.Logf("Database %q shadowing remote bucket %q, pool %q, server <%s:%s/%s>", dbcontext.Name, spec.BucketName, spec.PoolName, url.Scheme, url.Host, url.Path) } return nil }
// Helper function to open a Couchbase connection and return a specific bucket. func ConnectToBucket(spec base.BucketSpec, callback func(bucket string, err error)) (bucket base.Bucket, err error) { //start a retry loop to connect to the bucket backing off double the delay each time worker := func() (shouldRetry bool, err error, value interface{}) { bucket, err = base.GetBucket(spec, callback) return err != nil, err, bucket } sleeper := base.CreateDoublingSleeperFunc( 13, //MaxNumRetries approx 40 seconds total retry duration 5, //InitialRetrySleepTimeMS ) description := fmt.Sprintf("Attempt to connect to bucket : %v", spec.BucketName) err, ibucket := base.RetryLoop(description, worker, sleeper) if err != nil { err = base.HTTPErrorf(http.StatusBadGateway, " Unable to connect to Couchbase Server (connection refused). Please ensure it is running and reachable at the configured host and port. Detailed error: %s", err) } else { bucket, _ := ibucket.(base.Bucket) err = installViews(bucket) } return }
// Adds a database to the ServerContext. Attempts a read after it gets the write // lock to see if it's already been added by another process. If so, returns either the // existing DatabaseContext or an error based on the useExisting flag. func (sc *ServerContext) _getOrAddDatabaseFromConfig(config *DbConfig, useExisting bool) (*db.DatabaseContext, error) { server := "http://localhost:8091" pool := "default" bucketName := config.Name if config.Server != nil { server = *config.Server } if config.Pool != nil { pool = *config.Pool } if config.Bucket != nil { bucketName = *config.Bucket } dbName := config.Name if dbName == "" { dbName = bucketName } if sc.databases_[dbName] != nil { if useExisting { return sc.databases_[dbName], nil } else { return nil, base.HTTPErrorf(http.StatusPreconditionFailed, // what CouchDB returns "Duplicate database name %q", dbName) } } base.Logf("Opening db /%s as bucket %q, pool %q, server <%s>", dbName, bucketName, pool, server) if err := db.ValidateDatabaseName(dbName); err != nil { return nil, err } var importDocs, autoImport bool switch config.ImportDocs { case nil, false: case true: importDocs = true case "continuous": importDocs = true autoImport = true default: return nil, fmt.Errorf("Unrecognized value for ImportDocs: %#v", config.ImportDocs) } feedType := strings.ToLower(config.FeedType) // Connect to the bucket and add the database: spec := base.BucketSpec{ Server: server, PoolName: pool, BucketName: bucketName, FeedType: feedType, } // If we are using DCPSHARD feed type, set CbgtContext on bucket spec if feedType == strings.ToLower(base.DcpShardFeedType) { spec.CbgtContext = sc.CbgtContext } if config.Username != "" { spec.Auth = config } // Set cache properties, if present cacheOptions := db.CacheOptions{} if config.CacheConfig != nil { if config.CacheConfig.CachePendingSeqMaxNum != nil && *config.CacheConfig.CachePendingSeqMaxNum > 0 { cacheOptions.CachePendingSeqMaxNum = *config.CacheConfig.CachePendingSeqMaxNum } if config.CacheConfig.CachePendingSeqMaxWait != nil && *config.CacheConfig.CachePendingSeqMaxWait > 0 { cacheOptions.CachePendingSeqMaxWait = time.Duration(*config.CacheConfig.CachePendingSeqMaxWait) * time.Millisecond } if config.CacheConfig.CacheSkippedSeqMaxWait != nil && *config.CacheConfig.CacheSkippedSeqMaxWait > 0 { cacheOptions.CacheSkippedSeqMaxWait = time.Duration(*config.CacheConfig.CacheSkippedSeqMaxWait) * time.Millisecond } // set EnableStarChannelLog directly here (instead of via NewDatabaseContext), so that it's set when we create the channels view in ConnectToBucket if config.CacheConfig.EnableStarChannel != nil { db.EnableStarChannelLog = *config.CacheConfig.EnableStarChannel } if config.CacheConfig.ChannelCacheMaxLength != nil && *config.CacheConfig.ChannelCacheMaxLength > 0 { cacheOptions.ChannelCacheMaxLength = *config.CacheConfig.ChannelCacheMaxLength } if config.CacheConfig.ChannelCacheMinLength != nil && *config.CacheConfig.ChannelCacheMinLength > 0 { cacheOptions.ChannelCacheMinLength = *config.CacheConfig.ChannelCacheMinLength } if config.CacheConfig.ChannelCacheAge != nil && *config.CacheConfig.ChannelCacheAge > 0 { cacheOptions.ChannelCacheAge = time.Duration(*config.CacheConfig.ChannelCacheAge) * time.Second } } bucket, err := db.ConnectToBucket(spec, func(bucket string, err error) { base.Warn("Lost TAP feed for bucket %s, with error: %v", bucket, err) if dc := sc.databases_[dbName]; dc != nil { dc.TakeDbOffline("Lost TAP feed") } }) if err != nil { return nil, err } // Channel index definition, if present channelIndexOptions := &db.ChangeIndexOptions{} // TODO: this is confusing! why is it called both a "change index" and a "channel index"? sequenceHashOptions := &db.SequenceHashOptions{} if config.ChannelIndex != nil { indexServer := "http://localhost:8091" indexPool := "default" indexBucketName := "" if config.ChannelIndex.Server != nil { indexServer = *config.ChannelIndex.Server } if config.ChannelIndex.Pool != nil { indexPool = *config.ChannelIndex.Pool } if config.ChannelIndex.Bucket != nil { indexBucketName = *config.ChannelIndex.Bucket } indexSpec := base.BucketSpec{ Server: indexServer, PoolName: indexPool, BucketName: indexBucketName, CouchbaseDriver: base.GoCB, } if config.ChannelIndex.Username != "" { indexSpec.Auth = config.ChannelIndex } if config.ChannelIndex.NumShards != 0 { channelIndexOptions.NumShards = config.ChannelIndex.NumShards } else { channelIndexOptions.NumShards = 64 } channelIndexOptions.ValidateOrPanic() channelIndexOptions.Spec = indexSpec channelIndexOptions.Writer = config.ChannelIndex.IndexWriter // TODO: separate config of hash bucket sequenceHashOptions.Bucket, err = base.GetBucket(indexSpec, nil) if err != nil { base.Logf("Error opening sequence hash bucket %q, pool %q, server <%s>", indexBucketName, indexPool, indexServer) // TODO: revert to local index? return nil, err } sequenceHashOptions.Size = 32 } else { channelIndexOptions = nil } var revCacheSize uint32 if config.RevCacheSize != nil && *config.RevCacheSize > 0 { revCacheSize = *config.RevCacheSize } else { revCacheSize = db.KDefaultRevisionCacheCapacity } contextOptions := db.DatabaseContextOptions{ CacheOptions: &cacheOptions, IndexOptions: channelIndexOptions, SequenceHashOptions: sequenceHashOptions, RevisionCacheCapacity: revCacheSize, AdminInterface: sc.config.AdminInterface, } dbcontext, err := db.NewDatabaseContext(dbName, bucket, autoImport, contextOptions) if err != nil { return nil, err } dbcontext.BucketSpec = spec syncFn := "" if config.Sync != nil { syncFn = *config.Sync } if err := sc.applySyncFunction(dbcontext, syncFn); err != nil { return nil, err } if importDocs { db, _ := db.GetDatabase(dbcontext, nil) if _, err := db.UpdateAllDocChannels(false, true); err != nil { return nil, err } } if config.RevsLimit != nil && *config.RevsLimit > 0 { dbcontext.RevsLimit = *config.RevsLimit } dbcontext.AllowEmptyPassword = config.AllowEmptyPassword if dbcontext.ChannelMapper == nil { base.Logf("Using default sync function 'channel(doc.channels)' for database %q", dbName) } // Create default users & roles: if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil { return nil, err } else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil { return nil, err } emitAccessRelatedWarnings(config, dbcontext) // Install bucket-shadower if any: if shadow := config.Shadow; shadow != nil { if err := sc.startShadowing(dbcontext, shadow); err != nil { base.Warn("Database %q: unable to connect to external bucket for shadowing: %v", dbName, err) } } // Initialize event handlers if err := sc.initEventHandlers(dbcontext, config); err != nil { return nil, err } dbcontext.ExitChanges = make(chan struct{}) // Register it so HTTP handlers can find it: sc.databases_[dbcontext.Name] = dbcontext // Save the config sc.config.Databases[config.Name] = config if config.StartOffline { atomic.StoreUint32(&dbcontext.State, db.DBOffline) if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) { dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "offline", "DB loaded from config", *sc.config.AdminInterface) } } else { atomic.StoreUint32(&dbcontext.State, db.DBOnline) if dbcontext.EventMgr.HasHandlerForEvent(db.DBStateChange) { dbcontext.EventMgr.RaiseDBStateChangeEvent(dbName, "online", "DB loaded from config", *sc.config.AdminInterface) } } return dbcontext, nil }