// NewGroupStore creates a GroupStore for use in storing []byte values
// referenced by 128 bit keys; the store, restart channel (chan error), or any
// error during construction is returned.
//
// The restart channel (chan error) should be read from continually during the
// life of the store and, upon any error from the channel, the store should be
// restarted with Shutdown and Startup. This restart procedure is needed when
// data on disk is detected as corrupted and cannot be easily recovered from; a
// restart will cause only good entries to be loaded therefore discarding any
// bad entries due to the corruption. A restart may also be requested if the
// store reaches an unrecoverable state, such as no longer being able to open
// new files.
//
// Note that a lot of buffering, multiple cores, and background processes can
// be in use and therefore Shutdown should be called prior to the process
// exiting to ensure all processing is done and the buffers are flushed.
func NewGroupStore(c *GroupStoreConfig) (GroupStore, chan error) {
	cfg := resolveGroupStoreConfig(c)
	_ = os.MkdirAll(cfg.Path, 0755)
	_ = os.MkdirAll(cfg.PathTOC, 0755)
	lcmap := cfg.GroupLocMap
	if lcmap == nil {
		lcmap = locmap.NewGroupLocMap(nil)
	}
	lcmap.SetInactiveMask(_TSB_INACTIVE)
	store := &defaultGroupStore{
		logCritical:             cfg.LogCritical,
		logError:                cfg.LogError,
		logDebug:                cfg.LogDebug,
		logDebugOn:              cfg.LogDebug != nil,
		rand:                    cfg.Rand,
		path:                    cfg.Path,
		pathtoc:                 cfg.PathTOC,
		locmap:                  lcmap,
		workers:                 cfg.Workers,
		recoveryBatchSize:       cfg.RecoveryBatchSize,
		replicationIgnoreRecent: (uint64(cfg.ReplicationIgnoreRecent) * uint64(time.Second) / 1000) << _TSB_UTIL_BITS,
		valueCap:                uint32(cfg.ValueCap),
		pageSize:                uint32(cfg.PageSize),
		minValueAlloc:           cfg.minValueAlloc,
		writePagesPerWorker:     cfg.WritePagesPerWorker,
		fileCap:                 uint32(cfg.FileCap),
		fileReaders:             cfg.FileReaders,
		checksumInterval:        uint32(cfg.ChecksumInterval),
		msgRing:                 cfg.MsgRing,
		restartChan:             make(chan error),
		openReadSeeker:          cfg.OpenReadSeeker,
		openWriteSeeker:         cfg.OpenWriteSeeker,
		readdirnames:            cfg.Readdirnames,
		createWriteCloser:       cfg.CreateWriteCloser,
		stat:                    cfg.Stat,
		remove:                  cfg.Remove,
		rename:                  cfg.Rename,
		isNotExist:              cfg.IsNotExist,
	}
	if store.logCritical == nil {
		store.logCritical = flog.Default.CriticalPrintf
	}
	if store.logError == nil {
		store.logError = flog.Default.ErrorPrintf
	}
	if store.logDebug == nil {
		store.logDebug = func(string, ...interface{}) {}
	}
	store.tombstoneDiscardConfig(cfg)
	store.compactionConfig(cfg)
	store.auditConfig(cfg)
	store.pullReplicationConfig(cfg)
	store.pushReplicationConfig(cfg)
	store.bulkSetConfig(cfg)
	store.bulkSetAckConfig(cfg)
	store.flusherConfig(cfg)
	store.watcherConfig(cfg)
	return store, store.restartChan
}
func lowMemGroupStoreConfig() *GroupStoreConfig {
	locmap := locmap.NewGroupLocMap(&locmap.GroupLocMapConfig{
		Roots:    1,
		PageSize: 1,
	})
	return &GroupStoreConfig{
		ValueCap:                  1024,
		Workers:                   2,
		ChecksumInterval:          1024,
		PageSize:                  1,
		WritePagesPerWorker:       1,
		GroupLocMap:               locmap,
		MsgCap:                    1,
		FileCap:                   1024 * 1024,
		FileReaders:               2,
		RecoveryBatchSize:         1024,
		TombstoneDiscardBatchSize: 1024,
		OutPullReplicationBloomN:  1000,
	}
}
Exemple #3
0
// NewGroupStore creates a DefaultGroupStore for use in storing []byte values
// referenced by 128 bit keys; the store, restart channel (chan error), or any
// error during construction is returned.
//
// The restart channel should be read from continually during the life of the
// store and, upon any error from the channel, the store should be discarded
// and a new one created in its place. This restart procedure is needed when
// data on disk is detected as corrupted and cannot be easily recovered from; a
// restart will cause only good entries to be loaded therefore discarding any
// bad entries due to the corruption.
//
// Note that a lot of buffering, multiple cores, and background processes can
// be in use and therefore DisableAll() and Flush() should be called prior to
// the process exiting to ensure all processing is done and the buffers are
// flushed.
func NewGroupStore(c *GroupStoreConfig) (*DefaultGroupStore, chan error, error) {
	cfg := resolveGroupStoreConfig(c)
	lcmap := cfg.GroupLocMap
	if lcmap == nil {
		lcmap = locmap.NewGroupLocMap(nil)
	}
	lcmap.SetInactiveMask(_TSB_INACTIVE)
	store := &DefaultGroupStore{
		logCritical:             cfg.LogCritical,
		logError:                cfg.LogError,
		logWarning:              cfg.LogWarning,
		logInfo:                 cfg.LogInfo,
		logDebug:                cfg.LogDebug,
		rand:                    cfg.Rand,
		locBlocks:               make([]groupLocBlock, math.MaxUint16),
		path:                    cfg.Path,
		pathtoc:                 cfg.PathTOC,
		locmap:                  lcmap,
		workers:                 cfg.Workers,
		recoveryBatchSize:       cfg.RecoveryBatchSize,
		replicationIgnoreRecent: (uint64(cfg.ReplicationIgnoreRecent) * uint64(time.Second) / 1000) << _TSB_UTIL_BITS,
		valueCap:                uint32(cfg.ValueCap),
		pageSize:                uint32(cfg.PageSize),
		minValueAlloc:           cfg.minValueAlloc,
		writePagesPerWorker:     cfg.WritePagesPerWorker,
		fileCap:                 uint32(cfg.FileCap),
		fileReaders:             cfg.FileReaders,
		checksumInterval:        uint32(cfg.ChecksumInterval),
		msgRing:                 cfg.MsgRing,
		restartChan:             make(chan error),
	}
	store.freeableMemBlockChans = make([]chan *groupMemBlock, store.workers)
	for i := 0; i < cap(store.freeableMemBlockChans); i++ {
		store.freeableMemBlockChans[i] = make(chan *groupMemBlock, store.workers)
	}
	store.freeMemBlockChan = make(chan *groupMemBlock, store.workers*store.writePagesPerWorker)
	store.freeWriteReqChans = make([]chan *groupWriteReq, store.workers)
	store.pendingWriteReqChans = make([]chan *groupWriteReq, store.workers)
	store.fileMemBlockChan = make(chan *groupMemBlock, store.workers)
	store.freeTOCBlockChan = make(chan []byte, store.workers*2)
	store.pendingTOCBlockChan = make(chan []byte, store.workers)
	store.flushedChan = make(chan struct{}, 1)
	for i := 0; i < cap(store.freeMemBlockChan); i++ {
		memBlock := &groupMemBlock{
			store:  store,
			toc:    make([]byte, 0, store.pageSize),
			values: make([]byte, 0, store.pageSize),
		}
		var err error
		memBlock.id, err = store.addLocBlock(memBlock)
		if err != nil {
			return nil, nil, err
		}
		store.freeMemBlockChan <- memBlock
	}
	for i := 0; i < len(store.freeWriteReqChans); i++ {
		store.freeWriteReqChans[i] = make(chan *groupWriteReq, store.workers*2)
		for j := 0; j < store.workers*2; j++ {
			store.freeWriteReqChans[i] <- &groupWriteReq{errChan: make(chan error, 1)}
		}
	}
	for i := 0; i < len(store.pendingWriteReqChans); i++ {
		store.pendingWriteReqChans[i] = make(chan *groupWriteReq)
	}
	for i := 0; i < cap(store.freeTOCBlockChan); i++ {
		store.freeTOCBlockChan <- make([]byte, 0, store.pageSize)
	}
	go store.tocWriter()
	go store.fileWriter()
	for i := 0; i < len(store.freeableMemBlockChans); i++ {
		go store.memClearer(store.freeableMemBlockChans[i])
	}
	for i := 0; i < len(store.pendingWriteReqChans); i++ {
		go store.memWriter(store.pendingWriteReqChans[i])
	}
	store.tombstoneDiscardConfig(cfg)
	store.compactionConfig(cfg)
	store.auditConfig(cfg)
	store.pullReplicationConfig(cfg)
	store.pushReplicationConfig(cfg)
	store.bulkSetConfig(cfg)
	store.bulkSetAckConfig(cfg)
	store.flusherConfig(cfg)
	store.diskWatcherConfig(cfg)
	err := store.recovery()
	if err != nil {
		return nil, nil, err
	}
	return store, store.restartChan, nil
}