コード例 #1
0
// Index partitionsfor testing
func SeedPartitionMap(bucket base.Bucket, numPartitions uint16) error {
	maxVbNo := uint16(1024)
	//maxVbNo := uint16(64)
	partitionDefs := make(base.PartitionStorageSet, numPartitions)
	vbPerPartition := maxVbNo / numPartitions
	for partition := uint16(0); partition < numPartitions; partition++ {
		storage := base.PartitionStorage{
			Index: partition,
			VbNos: make([]uint16, vbPerPartition),
		}
		for index := uint16(0); index < vbPerPartition; index++ {
			vb := partition*vbPerPartition + index
			storage.VbNos[index] = vb
		}
		partitionDefs[partition] = storage
	}

	// Persist to bucket
	value, err := json.Marshal(partitionDefs)
	if err != nil {
		return err
	}
	bucket.SetRaw(base.KIndexPartitionKey, 0, value)
	return nil
}
コード例 #2
0
ファイル: shadower.go プロジェクト: arjunblue/sync_gateway
// Creates a new Shadower.
func NewShadower(context *DatabaseContext, bucket base.Bucket, docIDPattern *regexp.Regexp) (*Shadower, error) {
	tapFeed, err := bucket.StartTapFeed(sgbucket.TapArguments{Backfill: 0})
	if err != nil {
		return nil, err
	}
	s := &Shadower{context: context, bucket: bucket, tapFeed: tapFeed, docIDPattern: docIDPattern}
	go s.readTapFeed()
	return s, nil
}
コード例 #3
0
ファイル: shadower.go プロジェクト: paulharter/sync_gateway
// Creates a new Shadower.
func NewShadower(context *DatabaseContext, bucket base.Bucket, docIDPattern *regexp.Regexp) (*Shadower, error) {
	tapFeed, err := bucket.StartTapFeed(sgbucket.TapArguments{Backfill: 0, Notify: func(bucket string, err error) {
		context.TakeDbOffline("Lost shadower TAP Feed")
	}})
	if err != nil {
		return nil, err
	}
	s := &Shadower{context: context, bucket: bucket, tapFeed: tapFeed, docIDPattern: docIDPattern}
	go s.readTapFeed()
	return s, nil
}
コード例 #4
0
func (context *DatabaseContext) GetCBGTIndexNameForBucket(bucket base.Bucket) (indexName string) {
	// Real Couchbase buckets use an index name that includes UUID.
	cbBucket, ok := bucket.(base.CouchbaseBucket)
	if ok {
		indexName = cbBucket.GetCBGTIndexName()
	} else {
		indexName = bucket.GetName()
	}
	return indexName

}
コード例 #5
0
func appendWrite(bucket base.Bucket, key string, vb int, sequence uint64) error {
	sequenceBytes := getSequenceAsBytes(sequence)
	err := bucket.Append(key, sequenceBytes)
	// TODO: assumes err means it hasn't been created yet
	if err != nil {
		added, err := bucket.AddRaw(key, 0, sequenceBytes)
		if err != nil || added == false {
			log.Printf("AddRaw also failed?! %s:%v", key, err)
		}
	}
	return nil
}
コード例 #6
0
// Starts a changeListener on a given Bucket.
func (listener *changeListener) Start(bucket base.Bucket, trackDocs bool, notify sgbucket.BucketNotifyFn) error {
	listener.bucket = bucket
	listener.TapArgs = sgbucket.TapArguments{
		Backfill: sgbucket.TapNoBackfill,
		Notify:   notify,
	}
	tapFeed, err := bucket.StartTapFeed(listener.TapArgs)
	if err != nil {
		return err
	}

	listener.tapFeed = tapFeed
	listener.counter = 1
	listener.terminateCheckCounter = 0
	listener.keyCounts = map[string]uint64{}
	listener.tapNotifier = sync.NewCond(&sync.Mutex{})
	if trackDocs {
		listener.DocChannel = make(chan sgbucket.TapEvent, 100)
	}

	// Start a goroutine to broadcast to the tapNotifier whenever a channel or user/role changes:
	go func() {
		defer func() {
			listener.notifyStopping()
			if listener.DocChannel != nil {
				close(listener.DocChannel)
			}
		}()
		for event := range tapFeed.Events() {
			if event.Opcode == sgbucket.TapMutation || event.Opcode == sgbucket.TapDeletion {
				key := string(event.Key)
				if strings.HasPrefix(key, auth.UserKeyPrefix) ||
					strings.HasPrefix(key, auth.RoleKeyPrefix) {
					if listener.OnDocChanged != nil {
						listener.OnDocChanged(key, event.Value, event.Sequence, event.VbNo)
					}
					listener.Notify(base.SetOf(key))
				} else if trackDocs && !strings.HasPrefix(key, KSyncKeyPrefix) && !strings.HasPrefix(key, kIndexPrefix) {
					if listener.OnDocChanged != nil {
						listener.OnDocChanged(key, event.Value, event.Sequence, event.VbNo)
					}
					listener.DocChannel <- event
				}
			}
		}
	}()

	return nil
}
コード例 #7
0
func verifyVBMapping(bucket base.Bucket, channelName string) error {

	channelVbNo := uint32(0)

	for i := 0; i < 1024; i++ {
		docId := fmt.Sprintf("_index::%s::%d::%s", channelName, 1, vbSuffixMap[i])
		vbNo := bucket.VBHash(docId)
		if channelVbNo == 0 {
			channelVbNo = vbNo
		}
		if vbNo != channelVbNo {
			return errors.New("vb numbers don't match")
		}
	}
	return nil
}
コード例 #8
0
ファイル: database.go プロジェクト: dbergan/sync_gateway
func installViews(bucket base.Bucket) error {
	// View for finding every Couchbase doc (used when deleting a database)
	// Key is docid; value is null
	allbits_map := `function (doc, meta) {
                      emit(meta.id, null); }`
	// View for _all_docs
	// Key is docid; value is [revid, sequence]
	alldocs_map := `function (doc, meta) {
                     var sync = doc._sync;
                     if (sync === undefined || meta.id.substring(0,6) == "_sync:")
                       return;
                     if ((sync.flags & 1) || sync.deleted)
                       return;
                     var channels = sync.channels;
                     var channelNames = [];
                     for (ch in channels) {
                     	if (channels[ch] == null)
                     		channelNames.push(ch);
                     }
                     emit(meta.id, {r:sync.rev, s:sync.sequence, c:channelNames}); }`
	// View for importing unknown docs
	// Key is [existing?, docid] where 'existing?' is false for unknown docs
	import_map := `function (doc, meta) {
                     if(meta.id.substring(0,6) != "_sync:") {
                       var exists = (doc["_sync"] !== undefined);
                       emit([exists, meta.id], null); } }`
	// View for compaction -- finds all revision docs
	// Key and value are ignored.
	oldrevs_map := `function (doc, meta) {
                     var sync = doc._sync;
                     if (meta.id.substring(0,10) == "_sync:rev:")
	                     emit("",null); }`

	// Sessions view - used for session delete
	// Key is username; value is docid
	sessions_map := `function (doc, meta) {
                     	var prefix = meta.id.substring(0,%d);
                     	if (prefix == %q)
                     		emit(doc.username, meta.id);}`
	sessions_map = fmt.Sprintf(sessions_map, len(auth.SessionKeyPrefix), auth.SessionKeyPrefix)

	// All-principals view
	// Key is name; value is true for user, false for role
	principals_map := `function (doc, meta) {
							 var prefix = meta.id.substring(0,11);
							 var isUser = (prefix == %q);
							 if (isUser || prefix == %q)
			                     emit(meta.id.substring(%d), isUser); }`
	principals_map = fmt.Sprintf(principals_map, auth.UserKeyPrefix, auth.RoleKeyPrefix,
		len(auth.UserKeyPrefix))
	// By-channels view.
	// Key is [channelname, sequence]; value is [docid, revid, flag?]
	// where flag is true for doc deletion, false for removed from channel, missing otherwise
	channels_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
						var sequence = sync.sequence;
	                    if (sequence === undefined)
	                        return;
	                    var value = {rev:sync.rev};
	                    if (sync.flags) {
	                    	value.flags = sync.flags
	                    } else if (sync.deleted) {
	                    	value.flags = %d // channels.Deleted
	                    }
	                    if (%v) // EnableStarChannelLog
							emit(["*", sequence], value);
						var channels = sync.channels;
						if (channels) {
							for (var name in channels) {
								removed = channels[name];
								if (!removed)
									emit([name, sequence], value);
								else {
									var flags = removed.del ? %d : %d; // channels.Removed/Deleted
									emit([name, removed.seq], {rev:removed.rev, flags: flags});
								}
							}
						}
					}`
	channels_map = fmt.Sprintf(channels_map, channels.Deleted, EnableStarChannelLog,
		channels.Removed|channels.Deleted, channels.Removed)
	// Channel access view, used by ComputeChannelsForPrincipal()
	// Key is username; value is dictionary channelName->firstSequence (compatible with TimedSet)
	access_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
	                    var access = sync.access;
	                    if (access) {
	                        for (var name in access) {
	                            emit(name, access[name]);
	                        }
	                    }
	               }`
	// Role access view, used by ComputeRolesForUser()
	// Key is username; value is array of role names
	roleAccess_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
	                    var access = sync.role_access;
	                    if (access) {
	                        for (var name in access) {
	                            emit(name, access[name]);
	                        }
	                    }
	               }`

	designDocMap := map[string]walrus.DesignDoc{}

	designDocMap[DesignDocSyncGateway] = walrus.DesignDoc{
		Views: walrus.ViewMap{
			ViewPrincipals: walrus.ViewDef{Map: principals_map},
			ViewChannels:   walrus.ViewDef{Map: channels_map},
			ViewAccess:     walrus.ViewDef{Map: access_map},
			ViewRoleAccess: walrus.ViewDef{Map: roleAccess_map},
		},
	}

	designDocMap[DesignDocSyncHousekeeping] = walrus.DesignDoc{
		Views: walrus.ViewMap{
			ViewAllBits:  walrus.ViewDef{Map: allbits_map},
			ViewAllDocs:  walrus.ViewDef{Map: alldocs_map, Reduce: "_count"},
			ViewImport:   walrus.ViewDef{Map: import_map, Reduce: "_count"},
			ViewOldRevs:  walrus.ViewDef{Map: oldrevs_map, Reduce: "_count"},
			ViewSessions: walrus.ViewDef{Map: sessions_map},
		},
	}

	// add all design docs from map into bucket
	for designDocName, designDoc := range designDocMap {
		if err := bucket.PutDDoc(designDocName, designDoc); err != nil {
			base.Warn("Error installing Couchbase design doc: %v", err)
			return err
		}
	}

	return nil
}
コード例 #9
0
ファイル: database.go プロジェクト: paulharter/sync_gateway
func installViews(bucket base.Bucket) error {
	// View for finding every Couchbase doc (used when deleting a database)
	// Key is docid; value is null
	allbits_map := `function (doc, meta) {
                      emit(meta.id, null); }`
	// View for _all_docs
	// Key is docid; value is [revid, sequence]
	alldocs_map := `function (doc, meta) {
                     var sync = doc._sync;
                     if (sync === undefined || meta.id.substring(0,6) == "_sync:")
                       return;
                     if ((sync.flags & 1) || sync.deleted)
                       return;
                     var channels = sync.channels;
                     var channelNames = [];
                     for (ch in channels) {
                     	if (channels[ch] == null)
                     		channelNames.push(ch);
                     }
                     emit(meta.id, {r:sync.rev, s:sync.sequence, c:channelNames}); }`
	// View for importing unknown docs
	// Key is [existing?, docid] where 'existing?' is false for unknown docs
	import_map := `function (doc, meta) {
                     if(meta.id.substring(0,6) != "_sync:") {
                       var exists = (doc["_sync"] !== undefined);
                       emit([exists, meta.id], null); } }`
	// View for compaction -- finds all revision docs
	// Key and value are ignored.
	oldrevs_map := `function (doc, meta) {
                     var sync = doc._sync;
                     if (meta.id.substring(0,10) == "_sync:rev:")
	                     emit("",null); }`

	// Sessions view - used for session delete
	// Key is username; value is docid
	sessions_map := `function (doc, meta) {
                     	var prefix = meta.id.substring(0,%d);
                     	if (prefix == %q)
                     		emit(doc.username, meta.id);}`
	sessions_map = fmt.Sprintf(sessions_map, len(auth.SessionKeyPrefix), auth.SessionKeyPrefix)

	// All-principals view
	// Key is name; value is true for user, false for role
	principals_map := `function (doc, meta) {
							 var prefix = meta.id.substring(0,11);
							 var isUser = (prefix == %q);
							 if (isUser || prefix == %q)
			                     emit(meta.id.substring(%d), isUser); }`
	principals_map = fmt.Sprintf(principals_map, auth.UserKeyPrefix, auth.RoleKeyPrefix,
		len(auth.UserKeyPrefix))
	// By-channels view.
	// Key is [channelname, sequence]; value is [docid, revid, flag?]
	// where flag is true for doc deletion, false for removed from channel, missing otherwise
	channels_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
						var sequence = sync.sequence;
	                    if (sequence === undefined)
	                        return;
	                    var value = {rev:sync.rev};
	                    if (sync.flags) {
	                    	value.flags = sync.flags
	                    } else if (sync.deleted) {
	                    	value.flags = %d // channels.Deleted
	                    }
	                    if (%v) // EnableStarChannelLog
							emit(["*", sequence], value);
						var channels = sync.channels;
						if (channels) {
							for (var name in channels) {
								removed = channels[name];
								if (!removed)
									emit([name, sequence], value);
								else {
									var flags = removed.del ? %d : %d; // channels.Removed/Deleted
									emit([name, removed.seq], {rev:removed.rev, flags: flags});
								}
							}
						}
					}`
	channels_map = fmt.Sprintf(channels_map, channels.Deleted, EnableStarChannelLog,
		channels.Removed|channels.Deleted, channels.Removed)
	// Channel access view, used by ComputeChannelsForPrincipal()
	// Key is username; value is dictionary channelName->firstSequence (compatible with TimedSet)
	access_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
	                    var access = sync.access;
	                    if (access) {
	                        for (var name in access) {
	                            emit(name, access[name]);
	                        }
	                    }
	               }`

	// Vbucket sequence version of channel access view, used by ComputeChannelsForPrincipal()
	// Key is username; value is dictionary channelName->firstSequence (compatible with TimedSet)

	access_vbSeq_map := `function (doc, meta) {
		                    var sync = doc._sync;
		                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
		                        return;
		                    var access = sync.access;
		                    if (access) {
		                        for (var name in access) {
		                        	// Build a timed set based on vb and vbseq of this revision
		                        	var value = {};
		                        	for (var channel in access[name]) {
		                        		var timedSetWithVbucket = {};
				                        timedSetWithVbucket["vb"] = parseInt(meta.vb, 10);
				                        timedSetWithVbucket["seq"] = parseInt(meta.seq, 10);
				                        value[channel] = timedSetWithVbucket;
			                        }
		                            emit(name, value)
		                        }

		                    }
		               }`

	// Role access view, used by ComputeRolesForUser()
	// Key is username; value is array of role names
	roleAccess_map := `function (doc, meta) {
	                    var sync = doc._sync;
	                    if (sync === undefined || meta.id.substring(0,6) == "_sync:")
	                        return;
	                    var access = sync.role_access;
	                    if (access) {
	                        for (var name in access) {
	                            emit(name, access[name]);
	                        }
	                    }
	               }`

	designDocMap := map[string]sgbucket.DesignDoc{}

	designDocMap[DesignDocSyncGateway] = sgbucket.DesignDoc{
		Views: sgbucket.ViewMap{
			ViewPrincipals:  sgbucket.ViewDef{Map: principals_map},
			ViewChannels:    sgbucket.ViewDef{Map: channels_map},
			ViewAccess:      sgbucket.ViewDef{Map: access_map},
			ViewAccessVbSeq: sgbucket.ViewDef{Map: access_vbSeq_map},
			ViewRoleAccess:  sgbucket.ViewDef{Map: roleAccess_map},
		},
	}

	designDocMap[DesignDocSyncHousekeeping] = sgbucket.DesignDoc{
		Views: sgbucket.ViewMap{
			ViewAllBits:  sgbucket.ViewDef{Map: allbits_map},
			ViewAllDocs:  sgbucket.ViewDef{Map: alldocs_map, Reduce: "_count"},
			ViewImport:   sgbucket.ViewDef{Map: import_map, Reduce: "_count"},
			ViewOldRevs:  sgbucket.ViewDef{Map: oldrevs_map, Reduce: "_count"},
			ViewSessions: sgbucket.ViewDef{Map: sessions_map},
		},
	}

	sleeper := base.CreateDoublingSleeperFunc(
		11, //MaxNumRetries approx 10 seconds total retry duration
		5,  //InitialRetrySleepTimeMS
	)

	// add all design docs from map into bucket
	for designDocName, designDoc := range designDocMap {

		//start a retry loop to put design document backing off double the delay each time
		worker := func() (shouldRetry bool, err error, value interface{}) {
			err = bucket.PutDDoc(designDocName, designDoc)
			if err != nil {
				base.Warn("Error installing Couchbase design doc: %v", err)
			}
			return err != nil, err, nil
		}

		description := fmt.Sprintf("Attempt to install Couchbase design doc bucket : %v", designDocName)
		err, _ := base.RetryLoop(description, worker, sleeper)

		if err != nil {
			return err
		}
	}

	return nil
}
コード例 #10
0
ファイル: database.go プロジェクト: couchbase/sync_gateway
// Creates a new DatabaseContext on a bucket. The bucket will be closed when this context closes.
func NewDatabaseContext(dbName string, bucket base.Bucket, autoImport bool, options DatabaseContextOptions) (*DatabaseContext, error) {
	if err := ValidateDatabaseName(dbName); err != nil {
		return nil, err
	}

	context := &DatabaseContext{
		Name:       dbName,
		Bucket:     bucket,
		StartTime:  time.Now(),
		RevsLimit:  DefaultRevsLimit,
		autoImport: autoImport,
		Options:    options,
	}
	context.revisionCache = NewRevisionCache(int(options.RevisionCacheCapacity), context.revCacheLoader)

	context.EventMgr = NewEventManager()

	var err error
	context.sequences, err = newSequenceAllocator(bucket)
	if err != nil {
		return nil, err
	}
	lastSeq, err := context.sequences.lastSequence()
	if err != nil {
		return nil, err
	}

	if options.IndexOptions == nil {
		// In-memory channel cache
		context.SequenceType = IntSequenceType
		context.changeCache = &changeCache{}
	} else {
		// KV channel index
		context.SequenceType = ClockSequenceType
		context.changeCache = &kvChangeIndex{}
		context.SequenceHasher, err = NewSequenceHasher(options.SequenceHashOptions)
		if err != nil {
			return nil, err
		}
	}

	context.changeCache.Init(context, SequenceID{Seq: lastSeq}, func(changedChannels base.Set) {
		context.tapListener.Notify(changedChannels)
	}, options.CacheOptions, options.IndexOptions)
	context.SetOnChangeCallback(context.changeCache.DocChanged)

	// Initialize the tap Listener for notify handling
	context.tapListener.Init(bucket.GetName())

	// If not using channel index, start the tap feed
	if options.IndexOptions == nil {
		if err = context.tapListener.Start(bucket, options.TrackDocs, func(bucket string, err error) {
			context.TakeDbOffline("Lost TAP Feed")
		}); err != nil {
			return nil, err
		}
	}

	// Load providers into provider map.  Does basic validation on the provider definition, and identifies the default provider.
	if options.OIDCOptions != nil {
		context.OIDCProviders = make(auth.OIDCProviderMap)

		for name, provider := range options.OIDCOptions.Providers {
			if provider.Issuer == "" || provider.ClientID == nil {
				base.Warn("Issuer and ClientID required for OIDC Provider - skipping provider %q", name)
				continue
			}

			if provider.ValidationKey == nil {
				base.Warn("Validation Key not defined in config for provider %q - auth code flow will not be supported for this provider", name)
			}

			if strings.Contains(name, "_") {
				return nil, fmt.Errorf("OpenID Connect provider names cannot contain underscore:%s", name)
			}
			provider.Name = name
			if _, ok := context.OIDCProviders[provider.Issuer]; ok {
				base.Warn("Multiple OIDC providers defined for issuer %v", provider.Issuer)
				return nil, fmt.Errorf("Multiple OIDC providers defined for issuer %v", provider.Issuer)
			}

			// If this is the default provider, or there's only one provider defined, set IsDefault
			if (options.OIDCOptions.DefaultProvider != nil && name == *options.OIDCOptions.DefaultProvider) || len(options.OIDCOptions.Providers) == 1 {
				provider.IsDefault = true
			}

			// If this isn't the default provider, add the provider to the callback URL (needed to identify provider to _oidc_callback)
			if !provider.IsDefault && provider.CallbackURL != nil {
				var updatedCallback string
				if strings.Contains(*provider.CallbackURL, "?") {
					updatedCallback = fmt.Sprintf("%s&provider=%s", *provider.CallbackURL, name)
				} else {
					updatedCallback = fmt.Sprintf("%s?provider=%s", *provider.CallbackURL, name)
				}
				provider.CallbackURL = &updatedCallback
			}

			context.OIDCProviders[name] = provider
		}
		if len(context.OIDCProviders) == 0 {
			return nil, errors.New("OpenID Connect defined in config, but no valid OpenID Connect providers specified.")
		}

	}

	go context.watchDocChanges()
	return context, nil
}
コード例 #11
0
func (d *DenseBlock) loadBlock(bucket base.Bucket) (err error) {
	d.value, d.cas, err = bucket.GetRaw(d.Key)
	d.clock = nil
	return err
}