Пример #1
0
func newMCops() *mcops {
	return &mcops{
		moved:   platform.NewAlignedUint64(0),
		success: platform.NewAlignedUint64(0),
		errored: platform.NewAlignedUint64(0),
	}

}
Пример #2
0
func NewStatsManager(supvCmdch MsgChannel,
	supvMsgch MsgChannel, config common.Config) (statsManager, Message) {
	s := statsManager{
		supvCmdch:    supvCmdch,
		supvMsgch:    supvMsgch,
		lastStatTime: time.Unix(0, 0),
		statsLogDumpInterval: platform.NewAlignedUint64(
			config["settings.statsLogDumpInterval"].Uint64()),
	}

	s.config.Store(config)

	http.HandleFunc("/stats", s.handleStatsReq)
	http.HandleFunc("/stats/mem", s.handleMemStatsReq)
	http.HandleFunc("/stats/reset", s.handleStatsResetReq)
	go s.run()
	go s.runStatsDumpLogger()
	return s, &MsgSuccess{}
}
Пример #3
0
// NewScanCoordinator returns an instance of scanCoordinator or err message
// It listens on supvCmdch for command and every command is followed
// by a synchronous response on the supvCmdch.
// Any async message to supervisor is sent to supvMsgch.
// If supvCmdch get closed, ScanCoordinator will shut itself down.
func NewScanCoordinator(supvCmdch MsgChannel, supvMsgch MsgChannel,
	config common.Config, snapshotNotifych chan IndexSnapshot) (ScanCoordinator, Message) {
	var err error

	s := &scanCoordinator{
		supvCmdch:        supvCmdch,
		supvMsgch:        supvMsgch,
		lastSnapshot:     make(map[common.IndexInstId]IndexSnapshot),
		snapshotNotifych: snapshotNotifych,
		logPrefix:        "ScanCoordinator",
		reqCounter:       platform.NewAlignedUint64(0),
	}

	s.config.Store(config)

	addr := net.JoinHostPort("", config["scanPort"].String())
	queryportCfg := config.SectionConfig("queryport.", true)
	s.serv, err = queryport.NewServer(addr, s.serverCallback, queryportCfg)

	if err != nil {
		errMsg := &MsgError{err: Error{code: ERROR_SCAN_COORD_QUERYPORT_FAIL,
			severity: FATAL,
			category: SCAN_COORD,
			cause:    err,
		},
		}
		return nil, errMsg
	}

	// main loop
	go s.run()
	go s.listenSnapshot()

	return s, &MsgSuccess{}

}
Пример #4
0
func RunCommands(cluster string, cfg *Config, statsW io.Writer) (*Result, error) {
	t0 := time.Now()
	var result Result

	var clients []*qclient.GsiClient
	var jobQ chan *Job
	var aggrQ chan *JobResult
	var wg1, wg2 sync.WaitGroup

	if len(cfg.LatencyBuckets) == 0 {
		cfg.LatencyBuckets = defaultLatencyBuckets
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	config.SetValue("settings.poolSize", int(cfg.Concurrency))
	client, err := qclient.NewGsiClient(cluster, config)
	if err != nil {
		return nil, err
	}
	defer client.Close()

	indexes, err := client.Refresh()
	if err != nil {
		return nil, err
	}

	clients = make([]*qclient.GsiClient, cfg.Clients)
	for i := 0; i < cfg.Clients; i++ {
		c, err := qclient.NewGsiClient(cluster, config)
		if err != nil {
			return nil, err
		}

		defer c.Close()
		clients[i] = c

	}

	jobQ = make(chan *Job, cfg.Concurrency*1000)
	aggrQ = make(chan *JobResult, cfg.Concurrency*1000)
	for i := 0; i < cfg.Concurrency; i++ {
		wg1.Add(1)
		go Worker(jobQ, clients[i%cfg.Clients], aggrQ, &wg1)
	}

	wg2.Add(1)
	go ResultAggregator(aggrQ, statsW, &wg2)

	for i, spec := range cfg.ScanSpecs {
		if spec.Id == 0 {
			spec.Id = uint64(i)
		}

		for _, index := range indexes {
			if index.Definition.Bucket == spec.Bucket &&
				index.Definition.Name == spec.Index {
				spec.DefnId = uint64(index.Definition.DefnId)
			}
		}

		hFn := func(v int64) string {
			if v == math.MinInt64 {
				return "0"
			} else if v == math.MaxInt64 {
				return "inf"
			}
			return fmt.Sprint(time.Nanosecond * time.Duration(v))
		}

		res := new(ScanResult)
		res.ErrorCount = platform.NewAlignedUint64(0)
		res.LatencyHisto.Init(cfg.LatencyBuckets, hFn)
		res.Id = spec.Id
		result.ScanResults = append(result.ScanResults, res)
	}

	// warming up GsiClient
	for _, client := range clients {
		for _, spec := range cfg.ScanSpecs {
			job := &Job{spec: spec, result: nil}
			RunJob(client, job, nil)
			break
		}
	}

	fmt.Println("GsiClients warmed up ...")
	result.WarmupDuration = float64(time.Since(t0).Nanoseconds()) / float64(time.Second)

	// Round robin scheduling of jobs
	var allFinished bool

loop:
	for {
		allFinished = true
		for i, spec := range cfg.ScanSpecs {
			if iter := platform.LoadUint32(&spec.iteration); iter < spec.Repeat+1 {
				j := &Job{
					spec:   spec,
					result: result.ScanResults[i],
				}

				jobQ <- j
				platform.AddUint32(&spec.iteration, 1)
				allFinished = false
			}
		}

		if allFinished {
			break loop
		}
	}

	close(jobQ)
	wg1.Wait()
	close(aggrQ)
	wg2.Wait()

	return &result, err
}
Пример #5
0
//NewForestDBSlice initiailizes a new slice with forestdb backend.
//Both main and back index gets initialized with default config.
//Slice methods are not thread-safe and application needs to
//handle the synchronization. The only exception being Insert and
//Delete can be called concurrently.
//Returns error in case slice cannot be initialized.
func NewForestDBSlice(path string, sliceId SliceId, idxDefnId common.IndexDefnId,
	idxInstId common.IndexInstId, isPrimary bool,
	sysconf common.Config, idxStats *IndexStats) (*fdbSlice, error) {

	info, err := os.Stat(path)
	if err != nil || err == nil && info.IsDir() {
		os.Mkdir(path, 0777)
	}

	filepath := newFdbFile(path, false)
	slice := &fdbSlice{}
	slice.idxStats = idxStats

	slice.get_bytes = platform.NewAlignedInt64(0)
	slice.insert_bytes = platform.NewAlignedInt64(0)
	slice.delete_bytes = platform.NewAlignedInt64(0)
	slice.extraSnapDataSize = platform.NewAlignedInt64(0)
	slice.flushedCount = platform.NewAlignedUint64(0)
	slice.committedCount = platform.NewAlignedUint64(0)

	config := forestdb.DefaultConfig()
	config.SetDurabilityOpt(forestdb.DRB_ASYNC)

	memQuota := sysconf["settings.memory_quota"].Uint64()
	logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)
	config.SetBufferCacheSize(memQuota)
	logging.Debugf("NewForestDBSlice(): buffer cache size %d", memQuota)

	prob := sysconf["settings.max_writer_lock_prob"].Int()
	config.SetMaxWriterLockProb(uint8(prob))
	logging.Debugf("NewForestDBSlice(): max writer lock prob %d", prob)

	kvconfig := forestdb.DefaultKVStoreConfig()

retry:
	if slice.dbfile, err = forestdb.Open(filepath, config); err != nil {
		if err == forestdb.RESULT_NO_DB_HEADERS {
			logging.Warnf("NewForestDBSlice(): Open failed with no_db_header error...Resetting the forestdb file")
			os.Remove(filepath)
			goto retry
		}
		return nil, err
	}

	slice.config = config
	slice.sysconf = sysconf

	//open a separate file handle for compaction
	if slice.compactFd, err = forestdb.Open(filepath, config); err != nil {
		return nil, err
	}

	config.SetOpenFlags(forestdb.OPEN_FLAG_RDONLY)
	if slice.statFd, err = forestdb.Open(filepath, config); err != nil {
		return nil, err
	}

	slice.numWriters = sysconf["numSliceWriters"].Int()
	slice.main = make([]*forestdb.KVStore, slice.numWriters)
	for i := 0; i < slice.numWriters; i++ {
		if slice.main[i], err = slice.dbfile.OpenKVStore("main", kvconfig); err != nil {
			return nil, err
		}
	}

	//create a separate back-index for non-primary indexes
	if !isPrimary {
		slice.back = make([]*forestdb.KVStore, slice.numWriters)
		for i := 0; i < slice.numWriters; i++ {
			if slice.back[i], err = slice.dbfile.OpenKVStore("back", kvconfig); err != nil {
				return nil, err
			}
		}
	}

	// Make use of default kvstore provided by forestdb
	if slice.meta, err = slice.dbfile.OpenKVStore("default", kvconfig); err != nil {
		return nil, err
	}

	slice.path = path
	slice.currfile = filepath
	slice.idxInstId = idxInstId
	slice.idxDefnId = idxDefnId
	slice.id = sliceId

	sliceBufSize := sysconf["settings.sliceBufSize"].Uint64()
	slice.cmdCh = make(chan interface{}, sliceBufSize)
	slice.workerDone = make([]chan bool, slice.numWriters)
	slice.stopCh = make([]DoneChannel, slice.numWriters)

	slice.isPrimary = isPrimary

	for i := 0; i < slice.numWriters; i++ {
		slice.stopCh[i] = make(DoneChannel)
		slice.workerDone[i] = make(chan bool)
		go slice.handleCommandsWorker(i)
	}

	logging.Debugf("ForestDBSlice:NewForestDBSlice \n\t Created New Slice Id %v IndexInstId %v "+
		"WriterThreads %v", sliceId, idxInstId, slice.numWriters)

	slice.setCommittedCount()

	return slice, nil
}