func (k *kvChangeIndexWriter) indexPending() error { // Read entries from the pending list into array entries := k.readFromPending() // Initialize partition map (lazy init) indexPartitions, err := k.indexPartitionsCallback() if err != nil { base.LogFatal("Unable to load index partition map - cannot write incoming entry to index") } // Generic channelStorage for log entry storage (if needed) channelStorage := NewChannelStorage(k.indexWriteBucket, "", indexPartitions) indexRetryCount := 0 maxRetries := 15 // Continual processing of arriving entries from the feed. var sleeper base.RetrySleeper for { latestWriteBatch.Set(int64(len(entries))) err := k.indexEntries(entries, indexPartitions.VbMap, channelStorage) if err != nil { if indexRetryCount == 0 { sleeper = base.CreateDoublingSleeperFunc(maxRetries, 5) } indexRetryCount++ shouldContinue, sleepMs := sleeper(indexRetryCount) if !shouldContinue { return errors.New(fmt.Sprintf("Unable to successfully write to index after %d attempts", maxRetries)) } <-time.After(time.Millisecond * time.Duration(sleepMs)) } else { // Successful indexing, read next entries indexRetryCount = 0 entries = k.readFromPending() } } }
// Helper function to open a Couchbase connection and return a specific bucket. func ConnectToBucket(spec base.BucketSpec, callback func(bucket string, err error)) (bucket base.Bucket, err error) { //start a retry loop to connect to the bucket backing off double the delay each time worker := func() (shouldRetry bool, err error, value interface{}) { bucket, err = base.GetBucket(spec, callback) return err != nil, err, bucket } sleeper := base.CreateDoublingSleeperFunc( 13, //MaxNumRetries approx 40 seconds total retry duration 5, //InitialRetrySleepTimeMS ) description := fmt.Sprintf("Attempt to connect to bucket : %v", spec.BucketName) err, ibucket := base.RetryLoop(description, worker, sleeper) if err != nil { err = base.HTTPErrorf(http.StatusBadGateway, " Unable to connect to Couchbase Server (connection refused). Please ensure it is running and reachable at the configured host and port. Detailed error: %s", err) } else { bucket, _ := ibucket.(base.Bucket) err = installViews(bucket) } return }
func installViews(bucket base.Bucket) error { // View for finding every Couchbase doc (used when deleting a database) // Key is docid; value is null allbits_map := `function (doc, meta) { emit(meta.id, null); }` // View for _all_docs // Key is docid; value is [revid, sequence] alldocs_map := `function (doc, meta) { var sync = doc._sync; if (sync === undefined || meta.id.substring(0,6) == "_sync:") return; if ((sync.flags & 1) || sync.deleted) return; var channels = sync.channels; var channelNames = []; for (ch in channels) { if (channels[ch] == null) channelNames.push(ch); } emit(meta.id, {r:sync.rev, s:sync.sequence, c:channelNames}); }` // View for importing unknown docs // Key is [existing?, docid] where 'existing?' is false for unknown docs import_map := `function (doc, meta) { if(meta.id.substring(0,6) != "_sync:") { var exists = (doc["_sync"] !== undefined); emit([exists, meta.id], null); } }` // View for compaction -- finds all revision docs // Key and value are ignored. oldrevs_map := `function (doc, meta) { var sync = doc._sync; if (meta.id.substring(0,10) == "_sync:rev:") emit("",null); }` // Sessions view - used for session delete // Key is username; value is docid sessions_map := `function (doc, meta) { var prefix = meta.id.substring(0,%d); if (prefix == %q) emit(doc.username, meta.id);}` sessions_map = fmt.Sprintf(sessions_map, len(auth.SessionKeyPrefix), auth.SessionKeyPrefix) // All-principals view // Key is name; value is true for user, false for role principals_map := `function (doc, meta) { var prefix = meta.id.substring(0,11); var isUser = (prefix == %q); if (isUser || prefix == %q) emit(meta.id.substring(%d), isUser); }` principals_map = fmt.Sprintf(principals_map, auth.UserKeyPrefix, auth.RoleKeyPrefix, len(auth.UserKeyPrefix)) // By-channels view. // Key is [channelname, sequence]; value is [docid, revid, flag?] // where flag is true for doc deletion, false for removed from channel, missing otherwise channels_map := `function (doc, meta) { var sync = doc._sync; if (sync === undefined || meta.id.substring(0,6) == "_sync:") return; var sequence = sync.sequence; if (sequence === undefined) return; var value = {rev:sync.rev}; if (sync.flags) { value.flags = sync.flags } else if (sync.deleted) { value.flags = %d // channels.Deleted } if (%v) // EnableStarChannelLog emit(["*", sequence], value); var channels = sync.channels; if (channels) { for (var name in channels) { removed = channels[name]; if (!removed) emit([name, sequence], value); else { var flags = removed.del ? %d : %d; // channels.Removed/Deleted emit([name, removed.seq], {rev:removed.rev, flags: flags}); } } } }` channels_map = fmt.Sprintf(channels_map, channels.Deleted, EnableStarChannelLog, channels.Removed|channels.Deleted, channels.Removed) // Channel access view, used by ComputeChannelsForPrincipal() // Key is username; value is dictionary channelName->firstSequence (compatible with TimedSet) access_map := `function (doc, meta) { var sync = doc._sync; if (sync === undefined || meta.id.substring(0,6) == "_sync:") return; var access = sync.access; if (access) { for (var name in access) { emit(name, access[name]); } } }` // Vbucket sequence version of channel access view, used by ComputeChannelsForPrincipal() // Key is username; value is dictionary channelName->firstSequence (compatible with TimedSet) access_vbSeq_map := `function (doc, meta) { var sync = doc._sync; if (sync === undefined || meta.id.substring(0,6) == "_sync:") return; var access = sync.access; if (access) { for (var name in access) { // Build a timed set based on vb and vbseq of this revision var value = {}; for (var channel in access[name]) { var timedSetWithVbucket = {}; timedSetWithVbucket["vb"] = parseInt(meta.vb, 10); timedSetWithVbucket["seq"] = parseInt(meta.seq, 10); value[channel] = timedSetWithVbucket; } emit(name, value) } } }` // Role access view, used by ComputeRolesForUser() // Key is username; value is array of role names roleAccess_map := `function (doc, meta) { var sync = doc._sync; if (sync === undefined || meta.id.substring(0,6) == "_sync:") return; var access = sync.role_access; if (access) { for (var name in access) { emit(name, access[name]); } } }` designDocMap := map[string]sgbucket.DesignDoc{} designDocMap[DesignDocSyncGateway] = sgbucket.DesignDoc{ Views: sgbucket.ViewMap{ ViewPrincipals: sgbucket.ViewDef{Map: principals_map}, ViewChannels: sgbucket.ViewDef{Map: channels_map}, ViewAccess: sgbucket.ViewDef{Map: access_map}, ViewAccessVbSeq: sgbucket.ViewDef{Map: access_vbSeq_map}, ViewRoleAccess: sgbucket.ViewDef{Map: roleAccess_map}, }, } designDocMap[DesignDocSyncHousekeeping] = sgbucket.DesignDoc{ Views: sgbucket.ViewMap{ ViewAllBits: sgbucket.ViewDef{Map: allbits_map}, ViewAllDocs: sgbucket.ViewDef{Map: alldocs_map, Reduce: "_count"}, ViewImport: sgbucket.ViewDef{Map: import_map, Reduce: "_count"}, ViewOldRevs: sgbucket.ViewDef{Map: oldrevs_map, Reduce: "_count"}, ViewSessions: sgbucket.ViewDef{Map: sessions_map}, }, } sleeper := base.CreateDoublingSleeperFunc( 11, //MaxNumRetries approx 10 seconds total retry duration 5, //InitialRetrySleepTimeMS ) // add all design docs from map into bucket for designDocName, designDoc := range designDocMap { //start a retry loop to put design document backing off double the delay each time worker := func() (shouldRetry bool, err error, value interface{}) { err = bucket.PutDDoc(designDocName, designDoc) if err != nil { base.Warn("Error installing Couchbase design doc: %v", err) } return err != nil, err, nil } description := fmt.Sprintf("Attempt to install Couchbase design doc bucket : %v", designDocName) err, _ := base.RetryLoop(description, worker, sleeper) if err != nil { return err } } return nil }