Exemple #1
0
// NewCfgCBEx is a more advanced version of NewCfgCB(), with more
// initialization options via the options map.  Allowed options:
// "keyPrefix" - an optional string prefix that's prepended to any
// keys that are written to or read from the couchbase bucket.
func NewCfgCBEx(urlStr, bucket string,
	options map[string]interface{}) (*CfgCB, error) {
	url, err := couchbase.ParseURL(urlStr)
	if err != nil {
		return nil, err
	}

	keyPrefix := ""
	if options != nil {
		v, exists := options["keyPrefix"]
		if exists {
			keyPrefix = v.(string)
		}
	}

	c := &CfgCB{
		urlStr:  urlStr,
		url:     url,
		bucket:  bucket,
		cfgMem:  NewCfgMem(),
		cfgKey:  keyPrefix + "cfg",
		options: options,
	}

	b, err := c.getBucket() // TODO: Need to b.Close()?
	if err != nil {
		return nil, err
	}

	bucketUUID := ""
	vbucketIDs := []uint16{uint16(b.VBHash(c.cfgKey))}

	bds, err := cbdatasource.NewBucketDataSource(
		[]string{urlStr},
		"default",
		bucket, bucketUUID, vbucketIDs, c, c, cfgCBOptions)
	if err != nil {
		return nil, err
	}
	c.bds = bds

	err = bds.Start()
	if err != nil {
		return nil, err
	}

	return c, nil
}
Exemple #2
0
// NewDCPFeed creates a new, ready-to-be-started DCP feed.
func NewDCPFeed(name, indexName, url, poolName,
	bucketName, bucketUUID, paramsStr string,
	pf DestPartitionFunc, dests map[string]Dest,
	disable bool) (*DCPFeed, error) {
	params := NewDCPFeedParams()
	var stopAfter map[string]UUIDSeq

	if paramsStr != "" {
		err := json.Unmarshal([]byte(paramsStr), params)
		if err != nil {
			return nil, err
		}

		stopAfterSourceParams := StopAfterSourceParams{}
		err = json.Unmarshal([]byte(paramsStr), &stopAfterSourceParams)
		if err != nil {
			return nil, err
		}

		if stopAfterSourceParams.StopAfter == "markReached" {
			stopAfter = stopAfterSourceParams.MarkPartitionSeqs
		}
	}

	vbucketIds, err := ParsePartitionsToVBucketIds(dests)
	if err != nil {
		return nil, err
	}
	if len(vbucketIds) <= 0 {
		vbucketIds = nil
	}

	urls := strings.Split(url, ";")

	var auth couchbase.AuthHandler = params

	if params.AuthUser == "" &&
		params.AuthSaslUser == "" {
		for _, serverUrl := range urls {
			cbAuthHandler, err := NewCbAuthHandler(serverUrl)
			if err != nil {
				continue
			}
			params.AuthUser, params.AuthPassword, err =
				cbAuthHandler.GetCredentials()
			if err != nil {
				continue
			}
			params.AuthSaslUser, params.AuthSaslPassword, err =
				cbAuthHandler.GetSaslCredentials()
			if err != nil {
				continue
			}
			break
		}
	} else if params.AuthSaslUser != "" {
		auth = &DCPFeedParamsSasl{*params}
	}

	options := &cbdatasource.BucketDataSourceOptions{
		Name: fmt.Sprintf("%s-%x", name, rand.Int31()),
		ClusterManagerBackoffFactor: params.ClusterManagerBackoffFactor,
		ClusterManagerSleepInitMS:   params.ClusterManagerSleepInitMS,
		ClusterManagerSleepMaxMS:    params.ClusterManagerSleepMaxMS,
		DataManagerBackoffFactor:    params.DataManagerBackoffFactor,
		DataManagerSleepInitMS:      params.DataManagerSleepInitMS,
		DataManagerSleepMaxMS:       params.DataManagerSleepMaxMS,
		FeedBufferSizeBytes:         params.FeedBufferSizeBytes,
		FeedBufferAckThreshold:      params.FeedBufferAckThreshold,
	}

	feed := &DCPFeed{
		name:       name,
		indexName:  indexName,
		url:        url,
		poolName:   poolName,
		bucketName: bucketName,
		bucketUUID: bucketUUID,
		params:     params,
		pf:         pf,
		dests:      dests,
		disable:    disable,
		stopAfter:  stopAfter,
		stats:      NewDestStats(),
	}

	feed.bds, err = cbdatasource.NewBucketDataSource(
		urls, poolName, bucketName, bucketUUID,
		vbucketIds, auth, feed, options)
	if err != nil {
		return nil, err
	}

	return feed, nil
}
Exemple #3
0
// Start cbdatasource-based DCP feed, using DCPReceiver.
func (bucket CouchbaseBucket) StartDCPFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error) {

	// Recommended usage of cbdatasource is to let it manage it's own dedicated connection, so we're not
	// reusing the bucket connection we've already established.
	urls := []string{bucket.spec.Server}
	poolName := bucket.spec.PoolName
	if poolName == "" {
		poolName = "default"
	}
	bucketName := bucket.spec.BucketName

	vbucketIdsArr := []uint16(nil) // nil means get all the vbuckets.

	dcpReceiver := NewDCPReceiver()

	maxVbno, err := bucket.getMaxVbno()
	if err != nil {
		return nil, err
	}

	startSeqnos := make(map[uint16]uint64, maxVbno)
	vbuuids := make(map[uint16]uint64, maxVbno)

	// GetStatsVbSeqno retrieves high sequence number for each vbucket, to enable starting
	// DCP stream from that position.  Also being used as a check on whether the server supports
	// DCP.
	statsUuids, highSeqnos, err := bucket.GetStatsVbSeqno(maxVbno)
	if err != nil {
		return nil, errors.New("Error retrieving stats-vbseqno - DCP not supported")
	}

	if args.Backfill == sgbucket.TapNoBackfill {
		// For non-backfill, use vbucket uuids, high sequence numbers
		LogTo("Feed+", "Seeding seqnos: %v", highSeqnos)
		vbuuids = statsUuids
		startSeqnos = highSeqnos
	}
	dcpReceiver.SeedSeqnos(vbuuids, startSeqnos)

	auth := bucket.getDcpAuthHandler()

	LogTo("Feed+", "Connecting to new bucket datasource.  URLs:%s, pool:%s, name:%s, auth:%s", urls, poolName, bucketName, auth)
	bds, err := cbdatasource.NewBucketDataSource(
		urls,
		poolName,
		bucketName,
		"",
		vbucketIdsArr,
		auth,
		dcpReceiver,
		nil,
	)
	if err != nil {
		return nil, err
	}

	events := make(chan sgbucket.TapEvent)
	dcpFeed := couchbaseDCPFeedImpl{bds, events}

	if err = bds.Start(); err != nil {
		return nil, err
	}

	go func() {
		for dcpEvent := range dcpReceiver.GetEventFeed() {
			events <- dcpEvent
		}
	}()

	return &dcpFeed, nil
}
Exemple #4
0
// NewCfgCBEx is a more advanced version of NewCfgCB(), with more
// initialization options via the options map.  Allowed options:
// "keyPrefix" - an optional string prefix that's prepended to any
// keys that are written to or read from the couchbase bucket.
func NewCfgCBEx(urlStr, bucket string,
	options map[string]interface{}) (*CfgCB, error) {
	url, err := couchbase.ParseURL(urlStr)
	if err != nil {
		return nil, err
	}

	keyPrefix := ""
	logger := func(format string, args ...interface{}) {}

	if options != nil {
		v, exists := options["keyPrefix"]
		if exists {
			keyPrefix = v.(string)
		}

		loggeDebug, exists := options["loggerDebug"]
		if exists && loggeDebug.(bool) {
			logger = func(format string, args ...interface{}) {
				log.Printf(format, args...)
			}
		}

		loggerFunc, exists := options["loggerFunc"]
		if exists && loggerFunc != nil {
			logger = loggerFunc.(func(format string, args ...interface{}))
		}
	}

	c := &CfgCB{
		urlStr:  urlStr,
		url:     url,
		bucket:  bucket,
		cfgKey:  keyPrefix + "cfg",
		options: options,
		logger:  logger,

		subscriptions: make(map[string][]chan<- CfgEvent),
	}

	b, err := c.getBucket() // TODO: Need to b.Close()?
	if err != nil {
		return nil, err
	}

	// The following Add() ensures that we don't see CAS number 0, to
	// avoid situations where CAS number 0's alternate meaning of
	// "don't care" can lead to startup race issues.
	b.Add(c.cfgKey, 0, NewCfgMem())

	bucketUUID := ""
	vbucketIDs := []uint16{uint16(b.VBHash(c.cfgKey))}

	bds, err := cbdatasource.NewBucketDataSource(
		[]string{urlStr},
		"default",
		bucket, bucketUUID, vbucketIDs, c, c, cfgCBOptions)
	if err != nil {
		return nil, err
	}
	c.bds = bds

	err = bds.Start()
	if err != nil {
		return nil, err
	}

	return c, nil
}
Exemple #5
0
func main() {
	flag.Parse()

	go dumpOnSignalForPlatform()

	if *verbose > 0 {
		log.Printf("%s started", os.Args[0])
		flag.VisitAll(func(f *flag.Flag) { log.Printf("  -%s=%s\n", f.Name, f.Value) })
		log.Printf("  GOMAXPROCS=%d", runtime.GOMAXPROCS(-1))
	}

	serverURLs := []string{*serverURL}

	vbucketIdsArr := []uint16(nil) // A nil means get all the vbuckets.
	if *vbucketIds != "" {
		vbucketIdsArr = []uint16{}
		for _, vbucketIdStr := range strings.Split(*vbucketIds, ",") {
			if vbucketIdStr != "" {
				vbucketId, err := strconv.Atoi(vbucketIdStr)
				if err != nil {
					log.Fatalf("error: could not parse vbucketId: %s", vbucketIdStr)
				}
				vbucketIdsArr = append(vbucketIdsArr, uint16(vbucketId))
			}
		}
		if len(vbucketIdsArr) <= 0 {
			vbucketIdsArr = nil
		}
	}

	if *optionFeedBufferSizeBytes < 0 {
		log.Fatalf("error: optionFeedBufferSizeBytes must be >= 0")
	}

	options := &cbdatasource.BucketDataSourceOptions{
		ClusterManagerBackoffFactor: float32(*optionClusterManagerBackoffFactor),
		ClusterManagerSleepInitMS:   *optionClusterManagerSleepInitMS,
		ClusterManagerSleepMaxMS:    *optionClusterManagerSleepMaxMS,

		DataManagerBackoffFactor: float32(*optionDataManagerBackoffFactor),
		DataManagerSleepInitMS:   *optionDataManagerSleepInitMS,
		DataManagerSleepMaxMS:    *optionDataManagerSleepMaxMS,

		FeedBufferSizeBytes:    uint32(*optionFeedBufferSizeBytes),
		FeedBufferAckThreshold: float32(*optionFeedBufferAckThreshold),
	}

	var auth couchbase.AuthHandler = nil
	if *authUser != "" {
		auth = &authUserPswd{}
	}

	receiver := &ExampleReceiver{}

	var err error

	bds, err = cbdatasource.NewBucketDataSource(serverURLs,
		*poolName, *bucketName, *bucketUUID, vbucketIdsArr, auth, receiver, options)
	if err != nil {
		log.Fatalf(fmt.Sprintf("error: NewBucketDataSource, err: %v", err))
	}

	if err = bds.Start(); err != nil {
		log.Fatalf(fmt.Sprintf("error: Start, err: %v", err))
	}

	if *verbose > 0 {
		log.Printf("started bucket data source: %v", bds)
	}

	for {
		time.Sleep(1000 * time.Millisecond)
		reportStats(bds, false)
	}
}
Exemple #6
0
// NewDCPFeed creates a new, ready-to-be-started DCP feed.
func NewDCPFeed(name, indexName, url, poolName,
	bucketName, bucketUUID, paramsStr string,
	pf DestPartitionFunc, dests map[string]Dest,
	disable bool, mgr *Manager) (*DCPFeed, error) {
	log.Printf("feed_dcp: NewDCPFeed, name: %s, indexName: %s",
		name, indexName)

	var optionsMgr map[string]string
	if mgr != nil {
		optionsMgr = mgr.Options()
	}

	auth, err := CBAuth(bucketName, paramsStr, optionsMgr)
	if err != nil {
		return nil, fmt.Errorf("feed_dcp: NewDCPFeed CBAuth, err: %v", err)
	}

	var stopAfter map[string]UUIDSeq

	params := NewDCPFeedParams()

	if paramsStr != "" {
		err := json.Unmarshal([]byte(paramsStr), params)
		if err != nil {
			return nil, err
		}

		stopAfterSourceParams := StopAfterSourceParams{}
		err = json.Unmarshal([]byte(paramsStr), &stopAfterSourceParams)
		if err != nil {
			return nil, err
		}

		if stopAfterSourceParams.StopAfter == "markReached" {
			stopAfter = stopAfterSourceParams.MarkPartitionSeqs
		}
	}

	vbucketIds, err := ParsePartitionsToVBucketIds(dests)
	if err != nil {
		return nil, err
	}
	if len(vbucketIds) <= 0 {
		vbucketIds = nil
	}

	urls := strings.Split(url, ";")

	options := &cbdatasource.BucketDataSourceOptions{
		Name: fmt.Sprintf("%s%s-%x", DCPFeedPrefix, name, rand.Int31()),
		ClusterManagerBackoffFactor: params.ClusterManagerBackoffFactor,
		ClusterManagerSleepInitMS:   params.ClusterManagerSleepInitMS,
		ClusterManagerSleepMaxMS:    params.ClusterManagerSleepMaxMS,
		DataManagerBackoffFactor:    params.DataManagerBackoffFactor,
		DataManagerSleepInitMS:      params.DataManagerSleepInitMS,
		DataManagerSleepMaxMS:       params.DataManagerSleepMaxMS,
		FeedBufferSizeBytes:         params.FeedBufferSizeBytes,
		FeedBufferAckThreshold:      params.FeedBufferAckThreshold,
		Logf:          log.Printf,
		TraceCapacity: 20,
	}

	feed := &DCPFeed{
		name:       name,
		indexName:  indexName,
		url:        url,
		poolName:   poolName,
		bucketName: bucketName,
		bucketUUID: bucketUUID,
		paramsStr:  paramsStr,
		params:     params,
		pf:         pf,
		dests:      dests,
		disable:    disable,
		stopAfter:  stopAfter,
		mgr:        mgr,
		auth:       auth,
		stats:      NewDestStats(),
	}

	feed.bds, err = cbdatasource.NewBucketDataSource(
		urls, poolName, bucketName, bucketUUID,
		vbucketIds, auth, feed, options)
	if err != nil {
		return nil, err
	}

	return feed, nil
}