Ejemplo n.º 1
0
func (q *Queue) initQueue() {
	q.head = q.getIndexId("head", 1)
	q.tail = q.getIndexId("tail", 0)
	if q.useTailing {
		store := q.store
		q.tailIterator = store.NewIteratorCF(store.ro, q.cfHandle)
	}
	log.Debugf("[Queue] init queue from store, name=%s, head=%d, tail=%d", q.name, q.head, q.tail)
}
Ejemplo n.º 2
0
func (q *Queue) Enqueue(data []byte) (uint64, error) {
	id := atomic.AddUint64(&q.tail, 1)
	wb := rocks.NewWriteBatch()
	defer wb.Destroy()
	wb.MergeCF(q.cfHandle, q.metaKey("tail"), oneBinary)
	wb.PutCF(q.cfHandle, q.key(id), data)
	err := q.store.Write(q.store.wo, wb)

	log.Debugf("[Queue] Enqueued data id=%d, err=%v", id, err)
	return id, err
}
Ejemplo n.º 3
0
func (s *Server) loadJsonAssetsMapping() {
	if s.extraAssetsJson == "" {
		return
	}
	if data, err := ioutil.ReadFile(s.extraAssetsJson); err == nil {
		mapping := make(map[string]string)
		if err := json.Unmarshal(data, &mapping); err == nil {
			s.extraAssetsMapping = mapping
			log.Debugf("Server extra assets loaded from json file: %s", s.extraAssetsJson)
		}
	}
}
Ejemplo n.º 4
0
// compile all the templates
func (r *Render) compile() {
	for _, ts := range r.templates {
		fileList := make([]string, len(ts.fileList))
		for i, f := range ts.fileList {
			fileList[i] = path.Join(r.opt.Directory, f)
		}
		ts.template = template.New(ts.entry)
		ts.template.Delims(r.opt.Delims.Left, r.opt.Delims.Right)
		for _, funcs := range r.opt.Funcs {
			ts.template.Funcs(funcs)
		}
		ts.template = template.Must(ts.template.ParseFiles(fileList...))
	}
	log.Debugf("Templates have been compiled, count=%d", len(r.templates))
}
Ejemplo n.º 5
0
func (q *Queue) Dequeue(startId ...uint64) (uint64, []byte, error) {
	store := q.store

	var seekId uint64 = 1
	if len(startId) > 0 {
		seekId = startId[0]
	}
	if seekId < 1 {
		seekId = 1
	}

	var it *rocks.Iterator
	if q.useTailing {
		it = q.tailIterator
		if !it.Valid() {
			// FIXME?(mijia): When Dequeue happens faster than enqueue, the tail iterator would be exhausted
			// so we have seek it again.
			it.Seek(q.key(seekId))
		}
	} else {
		it = store.NewIteratorCF(store.ro, q.cfHandle)
		defer it.Close()
		it.Seek(q.key(seekId))
	}

	if !it.Valid() {
		return 0, nil, EmptyQueue
	}

	wb := rocks.NewWriteBatch()
	defer wb.Destroy()
	key := makeSlice(it.Key())
	value := makeSlice(it.Value())
	wb.DeleteCF(q.cfHandle, key)
	wb.MergeCF(q.cfHandle, q.metaKey("head"), oneBinary)
	err := store.Write(store.wo, wb)
	if err == nil {
		atomic.AddUint64(&q.head, 1)
		if q.useTailing {
			it.Next()
		}
	}

	id := q.id(key)
	log.Debugf("[Queue] Dequeued data id=%d, err=%v", id, err)
	return id, value, err
}
Ejemplo n.º 6
0
// NewStore returns the Store a rocksdb wrapper
func NewStore(options StoreOptions) (*Store, error) {
	options.SetDefaults()
	if options.Directory == "" {
		return nil, fmt.Errorf("Empty directory of store options")
	}
	if options.IsDebug {
		log.EnableDebug()
	}

	s := &Store{
		directory:  options.Directory,
		useTailing: !options.DisableTailing,
		cfHandles:  make(map[string]*rocks.ColumnFamilyHandle),
		queues:     make(map[string]*Queue),
	}

	opts := rocks.NewDefaultOptions()
	opts.SetCreateIfMissing(true)
	opts.IncreaseParallelism(options.Parallel)
	opts.SetMergeOperator(&_CountMerger{})
	opts.SetMaxSuccessiveMerges(64)

	opts.SetWriteBufferSize(options.WriteBufferSize)
	opts.SetMaxWriteBufferNumber(options.WriteBufferNumber)
	opts.SetTargetFileSizeBase(options.FileSizeBase)
	opts.SetLevel0FileNumCompactionTrigger(8)
	opts.SetLevel0SlowdownWritesTrigger(16)
	opts.SetLevel0StopWritesTrigger(24)
	opts.SetNumLevels(4)
	opts.SetMaxBytesForLevelBase(512 * 1024 * 1024)
	opts.SetMaxBytesForLevelMultiplier(8)
	opts.SetCompression(options.Compression)
	opts.SetDisableAutoCompactions(options.DisableAutoCompaction)

	bbto := rocks.NewDefaultBlockBasedTableOptions()
	bbto.SetBlockCache(rocks.NewLRUCache(options.MemorySize))
	bbto.SetFilterPolicy(rocks.NewBloomFilter(10))
	opts.SetBlockBasedTableFactory(bbto)

	opts.SetMaxOpenFiles(-1)
	opts.SetMemtablePrefixBloomBits(8 * 1024 * 1024)

	var err error
	if err = os.MkdirAll(options.Directory, 0755); err != nil {
		log.Errorf("Failed to mkdir %q, %s", options.Directory, err)
		return nil, err
	}

	cfNames, err := rocks.ListColumnFamilies(opts, options.Directory)
	if err != nil {
		// FIXME: we need to be sure if this means the db does not exist for now
		// so that we cannot list the column families
		log.Errorf("Failed to collect the column family names, %s", err)
	} else {
		log.Debugf("Got column family names for the existing db, %+v", cfNames)
	}

	if len(cfNames) == 0 {
		// We create the default column family to get the column family handle
		cfNames = []string{"default"}
	}
	cfOpts := make([]*rocks.Options, len(cfNames))
	for i := range cfNames {
		cfOpts[i] = opts
	}
	db, cfHandles, err := rocks.OpenDbColumnFamilies(opts, options.Directory, cfNames, cfOpts)
	if err != nil {
		log.Errorf("Failed to open rocks database, %s", err)
		return nil, err
	}

	s.DB = db
	s.dbOpts = opts
	s.ro = rocks.NewDefaultReadOptions()
	s.ro.SetFillCache(false)
	s.ro.SetTailing(!options.DisableTailing)
	s.wo = rocks.NewDefaultWriteOptions()
	s.wo.DisableWAL(options.DisableWAL)
	s.wo.SetSync(options.Sync)

	if len(cfNames) > 0 {
		for i := range cfNames {
			s.cfHandles[cfNames[i]] = cfHandles[i]
		}
	}
	return s, nil
}