// NewValueStore creates a ValueStore for use in storing []byte values
// referenced by 128 bit keys; the store, restart channel (chan error), or any
// error during construction is returned.
//
// The restart channel (chan error) should be read from continually during the
// life of the store and, upon any error from the channel, the store should be
// restarted with Shutdown and Startup. This restart procedure is needed when
// data on disk is detected as corrupted and cannot be easily recovered from; a
// restart will cause only good entries to be loaded therefore discarding any
// bad entries due to the corruption. A restart may also be requested if the
// store reaches an unrecoverable state, such as no longer being able to open
// new files.
//
// Note that a lot of buffering, multiple cores, and background processes can
// be in use and therefore Shutdown should be called prior to the process
// exiting to ensure all processing is done and the buffers are flushed.
func NewValueStore(c *ValueStoreConfig) (ValueStore, chan error) {
	cfg := resolveValueStoreConfig(c)
	_ = os.MkdirAll(cfg.Path, 0755)
	_ = os.MkdirAll(cfg.PathTOC, 0755)
	lcmap := cfg.ValueLocMap
	if lcmap == nil {
		lcmap = locmap.NewValueLocMap(nil)
	}
	lcmap.SetInactiveMask(_TSB_INACTIVE)
	store := &defaultValueStore{
		logCritical:             cfg.LogCritical,
		logError:                cfg.LogError,
		logDebug:                cfg.LogDebug,
		logDebugOn:              cfg.LogDebug != nil,
		rand:                    cfg.Rand,
		path:                    cfg.Path,
		pathtoc:                 cfg.PathTOC,
		locmap:                  lcmap,
		workers:                 cfg.Workers,
		recoveryBatchSize:       cfg.RecoveryBatchSize,
		replicationIgnoreRecent: (uint64(cfg.ReplicationIgnoreRecent) * uint64(time.Second) / 1000) << _TSB_UTIL_BITS,
		valueCap:                uint32(cfg.ValueCap),
		pageSize:                uint32(cfg.PageSize),
		minValueAlloc:           cfg.minValueAlloc,
		writePagesPerWorker:     cfg.WritePagesPerWorker,
		fileCap:                 uint32(cfg.FileCap),
		fileReaders:             cfg.FileReaders,
		checksumInterval:        uint32(cfg.ChecksumInterval),
		msgRing:                 cfg.MsgRing,
		restartChan:             make(chan error),
		openReadSeeker:          cfg.OpenReadSeeker,
		openWriteSeeker:         cfg.OpenWriteSeeker,
		readdirnames:            cfg.Readdirnames,
		createWriteCloser:       cfg.CreateWriteCloser,
		stat:                    cfg.Stat,
		remove:                  cfg.Remove,
		rename:                  cfg.Rename,
		isNotExist:              cfg.IsNotExist,
	}
	if store.logCritical == nil {
		store.logCritical = flog.Default.CriticalPrintf
	}
	if store.logError == nil {
		store.logError = flog.Default.ErrorPrintf
	}
	if store.logDebug == nil {
		store.logDebug = func(string, ...interface{}) {}
	}
	store.tombstoneDiscardConfig(cfg)
	store.compactionConfig(cfg)
	store.auditConfig(cfg)
	store.pullReplicationConfig(cfg)
	store.pushReplicationConfig(cfg)
	store.bulkSetConfig(cfg)
	store.bulkSetAckConfig(cfg)
	store.flusherConfig(cfg)
	store.watcherConfig(cfg)
	return store, store.restartChan
}
Пример #2
0
func lowMemValueStoreConfig() *ValueStoreConfig {
	locmap := locmap.NewValueLocMap(&locmap.ValueLocMapConfig{
		Roots:    1,
		PageSize: 1,
	})
	return &ValueStoreConfig{
		ValueCap:                  1024,
		Workers:                   2,
		ChecksumInterval:          1024,
		PageSize:                  1,
		WritePagesPerWorker:       1,
		ValueLocMap:               locmap,
		MsgCap:                    1,
		FileCap:                   1024 * 1024,
		FileReaders:               2,
		RecoveryBatchSize:         1024,
		TombstoneDiscardBatchSize: 1024,
		OutPullReplicationBloomN:  1000,
	}
}
Пример #3
0
// NewValueStore creates a DefaultValueStore for use in storing []byte values
// referenced by 128 bit keys; the store, restart channel (chan error), or any
// error during construction is returned.
//
// The restart channel should be read from continually during the life of the
// store and, upon any error from the channel, the store should be discarded
// and a new one created in its place. This restart procedure is needed when
// data on disk is detected as corrupted and cannot be easily recovered from; a
// restart will cause only good entries to be loaded therefore discarding any
// bad entries due to the corruption.
//
// Note that a lot of buffering, multiple cores, and background processes can
// be in use and therefore DisableAll() and Flush() should be called prior to
// the process exiting to ensure all processing is done and the buffers are
// flushed.
func NewValueStore(c *ValueStoreConfig) (*DefaultValueStore, chan error, error) {
	cfg := resolveValueStoreConfig(c)
	lcmap := cfg.ValueLocMap
	if lcmap == nil {
		lcmap = locmap.NewValueLocMap(nil)
	}
	lcmap.SetInactiveMask(_TSB_INACTIVE)
	store := &DefaultValueStore{
		logCritical:             cfg.LogCritical,
		logError:                cfg.LogError,
		logWarning:              cfg.LogWarning,
		logInfo:                 cfg.LogInfo,
		logDebug:                cfg.LogDebug,
		rand:                    cfg.Rand,
		locBlocks:               make([]valueLocBlock, math.MaxUint16),
		path:                    cfg.Path,
		pathtoc:                 cfg.PathTOC,
		locmap:                  lcmap,
		workers:                 cfg.Workers,
		recoveryBatchSize:       cfg.RecoveryBatchSize,
		replicationIgnoreRecent: (uint64(cfg.ReplicationIgnoreRecent) * uint64(time.Second) / 1000) << _TSB_UTIL_BITS,
		valueCap:                uint32(cfg.ValueCap),
		pageSize:                uint32(cfg.PageSize),
		minValueAlloc:           cfg.minValueAlloc,
		writePagesPerWorker:     cfg.WritePagesPerWorker,
		fileCap:                 uint32(cfg.FileCap),
		fileReaders:             cfg.FileReaders,
		checksumInterval:        uint32(cfg.ChecksumInterval),
		msgRing:                 cfg.MsgRing,
		restartChan:             make(chan error),
	}
	store.freeableMemBlockChans = make([]chan *valueMemBlock, store.workers)
	for i := 0; i < cap(store.freeableMemBlockChans); i++ {
		store.freeableMemBlockChans[i] = make(chan *valueMemBlock, store.workers)
	}
	store.freeMemBlockChan = make(chan *valueMemBlock, store.workers*store.writePagesPerWorker)
	store.freeWriteReqChans = make([]chan *valueWriteReq, store.workers)
	store.pendingWriteReqChans = make([]chan *valueWriteReq, store.workers)
	store.fileMemBlockChan = make(chan *valueMemBlock, store.workers)
	store.freeTOCBlockChan = make(chan []byte, store.workers*2)
	store.pendingTOCBlockChan = make(chan []byte, store.workers)
	store.flushedChan = make(chan struct{}, 1)
	for i := 0; i < cap(store.freeMemBlockChan); i++ {
		memBlock := &valueMemBlock{
			store:  store,
			toc:    make([]byte, 0, store.pageSize),
			values: make([]byte, 0, store.pageSize),
		}
		var err error
		memBlock.id, err = store.addLocBlock(memBlock)
		if err != nil {
			return nil, nil, err
		}
		store.freeMemBlockChan <- memBlock
	}
	for i := 0; i < len(store.freeWriteReqChans); i++ {
		store.freeWriteReqChans[i] = make(chan *valueWriteReq, store.workers*2)
		for j := 0; j < store.workers*2; j++ {
			store.freeWriteReqChans[i] <- &valueWriteReq{errChan: make(chan error, 1)}
		}
	}
	for i := 0; i < len(store.pendingWriteReqChans); i++ {
		store.pendingWriteReqChans[i] = make(chan *valueWriteReq)
	}
	for i := 0; i < cap(store.freeTOCBlockChan); i++ {
		store.freeTOCBlockChan <- make([]byte, 0, store.pageSize)
	}
	go store.tocWriter()
	go store.fileWriter()
	for i := 0; i < len(store.freeableMemBlockChans); i++ {
		go store.memClearer(store.freeableMemBlockChans[i])
	}
	for i := 0; i < len(store.pendingWriteReqChans); i++ {
		go store.memWriter(store.pendingWriteReqChans[i])
	}
	store.tombstoneDiscardConfig(cfg)
	store.compactionConfig(cfg)
	store.auditConfig(cfg)
	store.pullReplicationConfig(cfg)
	store.pushReplicationConfig(cfg)
	store.bulkSetConfig(cfg)
	store.bulkSetAckConfig(cfg)
	store.flusherConfig(cfg)
	store.diskWatcherConfig(cfg)
	err := store.recovery()
	if err != nil {
		return nil, nil, err
	}
	return store, store.restartChan, nil
}