Exemple #1
0
// Starts and runs the server given its configuration. (This function never returns.)
func RunServer(config *ServerConfig) {
	PrettyPrint = config.Pretty

	base.Logf("==== %s ====", LongVersionString)

	if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 {
		cpus := runtime.NumCPU()
		if cpus > 1 {
			runtime.GOMAXPROCS(cpus)
			base.Logf("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus)
		}
	}

	SetMaxFileDescriptors(config.MaxFileDescriptors)

	sc := NewServerContext(config)
	for _, dbConfig := range config.Databases {
		if _, err := sc.AddDatabaseFromConfig(dbConfig); err != nil {
			base.LogFatal("Error opening database: %v", err)
		}
	}

	if config.ProfileInterface != nil {
		//runtime.MemProfileRate = 10 * 1024
		base.Logf("Starting profile server on %s", *config.ProfileInterface)
		go func() {
			http.ListenAndServe(*config.ProfileInterface, nil)
		}()
	}

	base.Logf("Starting admin server on %s", *config.AdminInterface)
	go config.Serve(*config.AdminInterface, CreateAdminHandler(sc))
	base.Logf("Starting server on %s ...", *config.Interface)
	config.Serve(*config.Interface, CreatePublicHandler(sc))
}
func (k *kvChangeIndexWriter) Init(context *DatabaseContext, options *CacheOptions, indexOptions *ChangeIndexOptions, indexPartitionsCallback IndexPartitionsFunc) (err error) {

	k.context = context
	k.pending = make(chan *LogEntry, maxCacheUpdate)

	k.indexPartitionsCallback = indexPartitionsCallback

	// start process to work pending sequences
	go func() {
		err := k.indexPending()
		if err != nil {
			base.LogFatal("Indexer failed with unrecoverable error:%v", err)
		}

	}()

	k.channelIndexWriters = make(map[string]*kvChannelIndex)
	k.indexWriteBucket, err = base.GetBucket(indexOptions.Spec, nil)
	if err != nil {
		base.Logf("Error opening index bucket %q, pool %q, server <%s>",
			indexOptions.Spec.BucketName, indexOptions.Spec.PoolName, indexOptions.Spec.Server)
		// TODO: revert to local index?
		return err
	}
	cbBucket, ok := k.indexWriteBucket.(base.CouchbaseBucket)
	var maxVbNo uint16
	if ok {
		maxVbNo, _ = cbBucket.GetMaxVbno()
	} else {
		// walrus, for unit testing
		maxVbNo = 1024
	}

	// Set of worker goroutines used to process incoming entries
	k.unmarshalWorkQueue = make(chan *unmarshalEntry, 500)

	// Start fixed set of goroutines to work the unmarshal work queue
	for i := 0; i < maxUnmarshalProcesses; i++ {
		go func() {
			for {
				select {
				case unmarshalEntry := <-k.unmarshalWorkQueue:
					unmarshalEntry.process()
				case <-k.terminator:
					return
				}
			}
		}()
	}

	// Initialize unmarshalWorkers
	k.unmarshalWorkers = make([]*unmarshalWorker, maxVbNo)

	return nil
}
Exemple #3
0
func (config *ServerConfig) serve(addr string, handler http.Handler) {
	maxConns := DefaultMaxIncomingConnections
	if config.MaxIncomingConnections != nil {
		maxConns = *config.MaxIncomingConnections
	}

	err := base.ListenAndServeHTTP(addr, maxConns, config.SSLCert, config.SSLKey, handler, config.ServerReadTimeout, config.ServerWriteTimeout)
	if err != nil {
		base.LogFatal("Failed to start HTTP server on %s: %v", addr, err)
	}
}
// Returns the stable sequence used for index writes.  If it hasn't been initialized, loads from
// bucket.
func (k *kvChangeIndexWriter) getWriterStableSequence() *base.ShardedClock {
	var err error
	if k.writerStableSequence == nil {
		k.writerStableSequence, err = k.initWriterStableSequence()
		if err != nil {
			base.LogFatal("Unable to initialize writer stable sequence")
		}
	}
	return k.writerStableSequence

}
func (k *kvChangeIndex) retrieveCBGTPartitions() (partitionDef base.PartitionStorageSet, err error) {

	var manager *cbgt.Manager
	if k.context != nil {
		manager = k.context.BucketSpec.CbgtContext.Manager
	} else {
		return nil, errors.New("Unable to retrieve CBGT partitions - no database context")
	}

	if manager == nil {
		return nil, errors.New("Unable to retrieve CBGT partitions - no CBGT manager")
	}

	_, planPIndexesByName, _ := manager.GetPlanPIndexes(true)
	indexName := k.context.GetCBGTIndexNameForBucket(k.context.Bucket)
	pindexes := planPIndexesByName[indexName]

	for _, pIndex := range pindexes {
		vbStrings := strings.Split(pIndex.SourcePartitions, ",")
		// convert string vbNos to uint16
		vbNos := make([]uint16, len(vbStrings))
		for i := 0; i < len(vbStrings); i++ {
			vbNumber, err := strconv.ParseUint(vbStrings[i], 10, 16)
			if err != nil {
				base.LogFatal("Error creating index partition definition - unable to parse vbucket number %s as integer:%v", vbStrings[i], err)
			}
			vbNos[i] = uint16(vbNumber)
		}
		entry := base.PartitionStorage{
			Index: uint16(0), // see below for index assignment
			Uuid:  pIndex.UUID,
			VbNos: vbNos,
		}
		partitionDef = append(partitionDef, entry)
	}

	// NOTE: the ordering of pindexes returned by manager.GetPlanPIndexes isn't fixed (it's doing a map iteration somewhere).
	//    The mapping from UUID to VbNos will always be consistent.  Sorting by UUID to maintain a consistent index ordering,
	// then assigning index values.
	partitionDef.Sort()
	for i := 0; i < len(partitionDef); i++ {
		partitionDef[i].Index = uint16(i)
	}
	return partitionDef, nil
}
func NewBitFlagStorage(bucket base.Bucket, channelName string, partitions *base.IndexPartitions) *BitFlagStorage {

	storage := &BitFlagStorage{
		bucket:      bucket,
		channelName: channelName,
		partitions:  partitions,
	}

	// Maximum theoretical block cache capacity is 1024 - if this index writer were indexing every vbucket,
	// and each vbucket sequence was in a different block.  More common case would be this index writer
	// having at most 512 vbuckets, and most of those vbuckets working the same block index per partition (16 vbs per
	// partition) == 32 blocks.  Setting default to 50 to handle any temporary spikes.
	var err error
	storage.indexBlockCache, err = base.NewLRUCache(50)
	if err != nil {
		base.LogFatal("Error creating LRU cache for index blocks: %v", err)
	}
	return storage
}
func (k *kvChangeIndexWriter) indexPending() error {

	// Read entries from the pending list into array
	entries := k.readFromPending()

	// Initialize partition map (lazy init)
	indexPartitions, err := k.indexPartitionsCallback()
	if err != nil {
		base.LogFatal("Unable to load index partition map - cannot write incoming entry to index")
	}

	// Generic channelStorage for log entry storage (if needed)
	channelStorage := NewChannelStorage(k.indexWriteBucket, "", indexPartitions)

	indexRetryCount := 0
	maxRetries := 15

	// Continual processing of arriving entries from the feed.
	var sleeper base.RetrySleeper
	for {
		latestWriteBatch.Set(int64(len(entries)))
		err := k.indexEntries(entries, indexPartitions.VbMap, channelStorage)
		if err != nil {
			if indexRetryCount == 0 {
				sleeper = base.CreateDoublingSleeperFunc(maxRetries, 5)
			}
			indexRetryCount++
			shouldContinue, sleepMs := sleeper(indexRetryCount)
			if !shouldContinue {
				return errors.New(fmt.Sprintf("Unable to successfully write to index after %d attempts", maxRetries))
			}
			<-time.After(time.Millisecond * time.Duration(sleepMs))
		} else {
			// Successful indexing, read next entries
			indexRetryCount = 0
			entries = k.readFromPending()
		}
	}
}
Exemple #8
0
func (config *ServerConfig) Serve(addr string, handler http.Handler) {
	maxConns := DefaultMaxIncomingConnections
	if config.MaxIncomingConnections != nil {
		maxConns = *config.MaxIncomingConnections
	}

	http2Enabled := false
	if config.Unsupported != nil && config.Unsupported.Http2Config != nil {
		http2Enabled = *config.Unsupported.Http2Config.Enabled
	}
	err := base.ListenAndServeHTTP(
		addr,
		maxConns,
		config.SSLCert,
		config.SSLKey,
		handler,
		config.ServerReadTimeout,
		config.ServerWriteTimeout,
		http2Enabled,
	)
	if err != nil {
		base.LogFatal("Failed to start HTTP server on %s: %v", addr, err)
	}
}
Exemple #9
0
// Reads the command line flags and the optional config file.
func ParseCommandLine() {

	siteURL := flag.String("personaOrigin", "", "Base URL that clients use to connect to the server")
	addr := flag.String("interface", DefaultInterface, "Address to bind to")
	authAddr := flag.String("adminInterface", DefaultAdminInterface, "Address to bind admin interface to")
	profAddr := flag.String("profileInterface", "", "Address to bind profile interface to")
	configServer := flag.String("configServer", "", "URL of server that can return database configs")
	deploymentID := flag.String("deploymentID", "", "Customer/project identifier for stats reporting")
	couchbaseURL := flag.String("url", DefaultServer, "Address of Couchbase server")
	poolName := flag.String("pool", DefaultPool, "Name of pool")
	bucketName := flag.String("bucket", "sync_gateway", "Name of bucket")
	dbName := flag.String("dbname", "", "Name of Couchbase Server database (defaults to name of bucket)")
	pretty := flag.Bool("pretty", false, "Pretty-print JSON responses")
	verbose := flag.Bool("verbose", false, "Log more info about requests")
	logKeys := flag.String("log", "", "Log keywords, comma separated")
	logFilePath := flag.String("logFilePath", "", "Path to log file")
	skipRunModeValidation := flag.Bool("skipRunModeValidation", false, "Skip config validation for runmode (accel vs normal sg)")

	flag.Parse()

	if flag.NArg() > 0 {
		// Read the configuration file(s), if any:
		for i := 0; i < flag.NArg(); i++ {
			filename := flag.Arg(i)
			c, err := ReadServerConfig(filename)
			if err != nil {
				base.LogFatal("Error reading config file %s: %v", filename, err)
			}
			if config == nil {
				config = c
			} else {
				if err := config.MergeWith(c); err != nil {
					base.LogFatal("Error reading config file %s: %v", filename, err)
				}
			}
		}

		// Override the config file with global settings from command line flags:
		if *addr != DefaultInterface {
			config.Interface = addr
		}
		if *authAddr != DefaultAdminInterface {
			config.AdminInterface = authAddr
		}
		if *profAddr != "" {
			config.ProfileInterface = profAddr
		}
		if *configServer != "" {
			config.ConfigServer = configServer
		}
		if *deploymentID != "" {
			config.DeploymentID = deploymentID
		}
		if *pretty {
			config.Pretty = *pretty
		}
		if config.Log != nil {
			base.ParseLogFlags(config.Log)
		}

		// If the interfaces were not specified in either the config file or
		// on the command line, set them to the default values
		if config.Interface == nil {
			config.Interface = &DefaultInterface
		}
		if config.AdminInterface == nil {
			config.AdminInterface = &DefaultAdminInterface
		}

		if *logFilePath != "" {
			config.LogFilePath = logFilePath
		}

		if *skipRunModeValidation == true {
			config.SkipRunmodeValidation = *skipRunModeValidation
		}

	} else {
		// If no config file is given, create a default config, filled in from command line flags:
		if *dbName == "" {
			*dbName = *bucketName
		}

		// At this point the addr is either:
		//   - A value provided by the user, in which case we want to leave it as is
		//   - The default value (":4984"), which is actually _not_ the default value we
		//     want for this case, since we are enabling insecure mode.  We want "localhost:4984" instead.
		// See #708 for more details
		if *addr == DefaultInterface {
			*addr = "localhost:4984"
		}

		config = &ServerConfig{
			Interface:        addr,
			AdminInterface:   authAddr,
			ProfileInterface: profAddr,
			Pretty:           *pretty,
			Databases: map[string]*DbConfig{
				*dbName: {
					Name: *dbName,
					BucketConfig: BucketConfig{
						Server: couchbaseURL,
						Bucket: bucketName,
						Pool:   poolName,
					},
					Users: map[string]*db.PrincipalConfig{
						base.GuestUsername: &db.PrincipalConfig{
							Disabled:         false,
							ExplicitChannels: base.SetFromArray([]string{"*"}),
						},
					},
				},
			},
		}
	}

	if *siteURL != "" {
		if config.Persona == nil {
			config.Persona = new(PersonaConfig)
		}
		config.Persona.Origin = *siteURL
	}

	base.EnableLogKey("HTTP")
	if *verbose {
		base.EnableLogKey("HTTP+")
	}
	base.ParseLogFlag(*logKeys)

	//return config
}
func (k *kvChangeIndex) initIndexPartitions() (*base.IndexPartitions, error) {

	k.indexPartitionsLock.Lock()
	defer k.indexPartitionsLock.Unlock()

	// Check if it's been initialized while we waited for the lock
	if k.indexPartitions != nil {
		return k.indexPartitions, nil
	}

	var partitionDef []base.PartitionStorage
	// First attempt to load from the bucket
	value, _, err := k.reader.indexReadBucket.GetRaw(base.KIndexPartitionKey)
	indexExpvars.Add("get_indexPartitionMap", 1)
	if err == nil {
		if err = json.Unmarshal(value, &partitionDef); err != nil {
			return nil, err
		}
	}

	// If unable to load from index bucket - attempt to initialize based on cbgt partitions
	if partitionDef == nil {
		var manager *cbgt.Manager
		if k.context != nil {
			manager = k.context.BucketSpec.CbgtContext.Manager
		} else {
			return nil, errors.New("Unable to determine partition map for index - not found in index, and no database context")
		}

		if manager == nil {
			return nil, errors.New("Unable to determine partition map for index - not found in index, and no CBGT manager")
		}

		_, planPIndexesByName, _ := manager.GetPlanPIndexes(true)
		indexName := k.context.GetCBGTIndexNameForBucket(k.context.Bucket)
		pindexes := planPIndexesByName[indexName]

		for index, pIndex := range pindexes {
			vbStrings := strings.Split(pIndex.SourcePartitions, ",")
			// convert string vbNos to uint16
			vbNos := make([]uint16, len(vbStrings))
			for i := 0; i < len(vbStrings); i++ {
				vbNumber, err := strconv.ParseUint(vbStrings[i], 10, 16)
				if err != nil {
					base.LogFatal("Error creating index partition definition - unable to parse vbucket number %s as integer:%v", vbStrings[i], err)
				}
				vbNos[i] = uint16(vbNumber)
			}
			entry := base.PartitionStorage{
				Index: uint16(index),
				Uuid:  pIndex.UUID,
				VbNos: vbNos,
			}
			partitionDef = append(partitionDef, entry)
		}

		// Persist to bucket
		value, err = json.Marshal(partitionDef)
		if err != nil {
			return nil, err
		}
		k.reader.indexReadBucket.SetRaw(base.KIndexPartitionKey, 0, value)
	}

	// Create k.indexPartitions based on partitionDef
	k.indexPartitions = base.NewIndexPartitions(partitionDef)
	return k.indexPartitions, nil
}