func (db *DB) SsPut(key, member []byte, score uint64) *skv.Reply { batch := new(leveldb.Batch) // if prev := db.SsGet(key, member); prev.Status == skv.ReplyOK && prev.Uint64() != score { batch.Delete(skv.SortSetsNsScoreKey(key, member, prev.Uint64())) } else if prev.Status == skv.ReplyNotFound { db.RawIncrby(skv.SortSetsNsLengthKey(key), 1) } // batch.Put(skv.SortSetsNsScoreKey(key, member, score), []byte{}) // batch.Put(skv.SortSetsNsEntryKey(key, member), []byte(strconv.FormatUint(score, 10))) rpl := skv.NewReply("") if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } return rpl }
func (db *DB) RawPut(key, value []byte, ttl int64) *skv.Reply { rpl := skv.NewReply("") if len(key) < 2 { rpl.Status = skv.ReplyBadArgument return rpl } if ttl > 0 { if ttl < 1000 { return rpl } switch key[0] { case skv.NsKvEntry: if ok := db._raw_ssttlat_put(key[0], key[1:], dbutil.MetaTimeNowAddMS(ttl)); !ok { rpl.Status = skv.ReplyBadArgument return rpl } default: rpl.Status = skv.ReplyBadArgument return rpl } } if err := db.ldb.Put(key, value, nil); err != nil { rpl.Status = err.Error() } return rpl }
func (db *DB) _raw_put_json(key []byte, value interface{}, ttl int64) *skv.Reply { bvalue, err := dbutil.JsonEncode(value) if err != nil { return skv.NewReply(err.Error()) } return db.RawPut(key, bvalue, ttl) }
func (db *DB) HashDel(key, field []byte) *skv.Reply { bkey := skv.HashNsEntryKey(key, field) if rs := db.RawGet(bkey); rs.Status == skv.ReplyOK { db.RawIncrby(skv.HashNsLengthKey(key), -1) return db.RawDel(bkey) } return skv.NewReply("") }
func (db *DB) ObjectDocDel(fold, key string) *skv.Reply { var ( rpl = skv.NewReply("") opath = skv.NewObjectPathKey(fold, key) bkey = opath.EntryIndex() prevobj *skv.Object previdx = map[uint8]skv.ObjectDocSchemaIndexEntryBytes{} ) if rs := db.RawGet(bkey); rs.Status == skv.ReplyNotFound { return rpl } else if rs.Status != skv.ReplyOK { return rs } else { prevobj = rs.Object() var prev map[string]interface{} if err := prevobj.Data.JsonDecode(&prev); err == nil { previdx = skv.ObjectDocIndexDataExport(_obj_doc_indexes, opath.Fold, prev) } } batch := new(leveldb.Batch) for piKey, piEntry := range previdx { batch.Delete(append(append(skv.ObjectDocIndexFieldPrefix(opath.Fold, piKey), piEntry.Data...), opath.Field...)) } batch.Delete(bkey) if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } else { db._obj_meta_sync(prevobj.Meta.Type, &prevobj.Meta, opath, -1, 0, _obj_options_def) // if _obj_event_handler != nil { // _obj_event_handler(opath, skv.ObjectEventDeleted, 0) // } } return rpl }
func (db *DB) RawDel(keys ...[]byte) *skv.Reply { rpl := skv.NewReply("") batch := new(leveldb.Batch) for _, key := range keys { batch.Delete(key) } if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } return rpl }
func (db *DB) RawGet(key []byte) *skv.Reply { rpl := skv.NewReply("") if data, err := db.ldb.Get(key, nil); err != nil { if err.Error() == "leveldb: not found" { rpl.Status = skv.ReplyNotFound } else { rpl.Status = err.Error() } } else { rpl.Data = []dbtypes.Bytex{data} } return rpl }
func (db *DB) _raw_ssttl_get(ns byte, key []byte) *skv.Reply { key = skv.RawNsKeyConcat(ns, key) rpl, ttl := skv.NewReply(""), int64(0) if ttlat := dbutil.BytesToUint64(db.RawGet(skv.RawTtlEntry(key)).Bytes()); ttlat > 0 { ttl = (dbutil.MetaTimeParse(ttlat).UnixNano() - time.Now().UTC().UnixNano()) / 1e6 } if ttl < 0 { ttl = 0 } rpl.Data = append(rpl.Data, []byte(strconv.FormatInt(ttl, 10))) return rpl }
func (db *DB) HashScan(key, cursor, end []byte, limit uint32) *skv.Reply { var ( prefix = skv.RawNsKeyEncode(skv.NsHashEntry, key) prelen = len(prefix) cstart = append(prefix, cursor...) cend = append(prefix, end...) rpl = skv.NewReply("") ) for i := len(cend); i < 256; i++ { cend = append(cend, 0xff) } if limit > skv.ScanLimitMax { limit = skv.ScanLimitMax } iter := db.ldb.NewIterator(&util.Range{Start: cstart, Limit: append(cend)}, nil) for iter.Next() { if limit < 1 { break } if len(iter.Key()) < prelen { continue } rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()[prelen:])) rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Value())) limit-- } iter.Release() if iter.Error() != nil { rpl.Status = iter.Error().Error() } return rpl }
func (db *DB) SsDel(key, member []byte) *skv.Reply { batch := new(leveldb.Batch) batch.Delete(skv.SortSetsNsEntryKey(key, member)) if prev := db.SsGet(key, member); prev.Status == skv.ReplyOK { db.RawIncrby(skv.SortSetsNsLengthKey(key), -1) batch.Delete(skv.SortSetsNsScoreKey(key, member, prev.Uint64())) } rpl := skv.NewReply("") if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } return rpl }
func (db *DB) RawIncrby(key []byte, step int64) *skv.Reply { if step == 0 { return skv.NewReply("") } _raw_incr_locker.Lock() defer _raw_incr_locker.Unlock() num := uint64(0) rpl := db.RawGet(key) if rpl.Status == skv.ReplyOK { num = rpl.Uint64() } if step == 0 { rpl.Data = append(rpl.Data, []byte(strconv.FormatUint(num, 10))) rpl.Status = skv.ReplyOK return rpl } if step < 0 { if uint64(-step) > num { num = 0 } else { num = num - uint64(-step) } } else { num += uint64(step) } bnum := []byte(strconv.FormatUint(num, 10)) rpl = db.RawPut(key, bnum, 0) if rpl.Status == skv.ReplyOK { rpl.Data = append(rpl.Data, bnum) } return rpl }
func (db *DB) RawRevScan(cursor, end []byte, limit uint32) *skv.Reply { rpl := skv.NewReply("") if len(cursor) < 1 { cursor = end } for i := len(cursor); i < 256; i++ { cursor = append(cursor, 0x00) } for i := len(end); i < 256; i++ { end = append(end, 0xff) } if limit > skv.ScanLimitMax { limit = skv.ScanLimitMax } iter := db.ldb.NewIterator(&util.Range{Start: cursor, Limit: end}, nil) for ok := iter.Last(); ok; ok = iter.Prev() { if limit < 1 { break } rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key())) rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Value())) limit-- } iter.Release() if iter.Error() != nil { rpl.Status = iter.Error().Error() } return rpl }
func (db *DB) _raw_ssttlat_range(score_start, score_end, limit uint64) *skv.Reply { var ( bs_start = skv.RawTtlQueuePrefix(score_start) bs_end = skv.RawTtlQueuePrefix(score_end) rpl = skv.NewReply("") ) for i := len(bs_end); i < 256; i++ { bs_end = append(bs_end, 0xff) } iter := db.ldb.NewIterator(&util.Range{Start: bs_start, Limit: bs_end}, nil) for iter.Next() { if limit < 1 { break } if len(iter.Key()) < 10 { db.RawDel(iter.Key()) continue } ui64 := binary.BigEndian.Uint64(iter.Key()[1:9]) rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key())) rpl.Data = append(rpl.Data, []byte(strconv.FormatUint(ui64, 10))) limit-- } iter.Release() if iter.Error() != nil { rpl.Status = iter.Error().Error() } return rpl }
func (db *DB) SsRange(key []byte, score_start, score_end uint64, limit uint32) *skv.Reply { var ( bs_start = skv.SortSetsNsScorePrefix(key, score_start) bs_end = skv.SortSetsNsScorePrefix(key, score_end) rpl = skv.NewReply("") ) for i := len(bs_end); i < 256; i++ { bs_end = append(bs_end, 0xff) } iter := db.ldb.NewIterator(&util.Range{Start: bs_start, Limit: bs_end}, nil) for iter.Next() { if limit < 1 { break } if len(iter.Key()) < (len(key) + 10) { db.RawDel(iter.Key()) continue } ui64 := binary.BigEndian.Uint64(iter.Key()[len(key)+2 : (len(key) + 10)]) rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()[(len(key)+10):])) rpl.Data = append(rpl.Data, dbutil.BytesClone([]byte(strconv.FormatUint(ui64, 10)))) limit-- } iter.Release() if iter.Error() != nil { rpl.Status = iter.Error().Error() } return rpl }
func (db *DB) ObjectDocSchemaSync(fold string, schema skv.ObjectDocSchema) *skv.Reply { _obj_doc_global_locker.Lock() defer _obj_doc_global_locker.Unlock() var ( rpl = skv.NewReply("") key = skv.ObjectDocFoldKey(fold) prev skv.ObjectDocSchema ) if len(schema.Indexes) > skv.ObjectDocSchemaMaxIndex { rpl.Status = skv.ReplyBadArgument return rpl } if rs := db.RawGet(skv.ObjectDocSchemaKey(key)); rs.Status == skv.ReplyOK { rs.JsonDecode(&prev) } for i, ei := range schema.Indexes { ei.Column = skv.ObjectDocIndexStringFilter(ei.Column) schema.Indexes[i] = ei } for _, pi := range prev.Indexes { removed, changed := true, false for j, ei := range schema.Indexes { if pi.Column == ei.Column { removed = false if pi.Type != ei.Type || pi.Length != ei.Length || pi.Unique != ei.Unique || pi.AutoIncr != ei.AutoIncr { changed = true } ei.Seq = pi.Seq schema.Indexes[j] = ei break } } if removed || changed { objIdxKeyPrefix, limit := skv.ObjectDocIndexFieldPrefix(key, pi.Seq), 1000 // fmt.Println("WARN CLEAN prev INDEXES", pi.Column) for { rs := db.RawScan(objIdxKeyPrefix, objIdxKeyPrefix, uint32(limit)).Hash() if len(rs) > 0 { batch := new(leveldb.Batch) for _, entry := range rs { batch.Delete(entry.Key) } db.ldb.Write(batch, nil) } if len(rs) < limit { break } } } } for i, ei := range schema.Indexes { reidx := true for _, pi := range prev.Indexes { if pi.Column == ei.Column { ei.Seq = pi.Seq schema.Indexes[i] = ei if pi.Type == ei.Type && pi.Length == ei.Length && pi.Unique == ei.Unique && pi.AutoIncr == ei.AutoIncr && pi.Seq > 0 { reidx = false } break } } // for _, ci := range schema.Indexes { if ei.Seq == ci.Seq && ei.Column != ci.Column { ei.Seq = 0 schema.Indexes[i] = ei break } } if ei.Seq == 0 { reidx = true for j := uint8(1); j <= 255; j++ { dup := false for _, ci := range schema.Indexes { if j == ci.Seq && ei.Column != ci.Column { dup = true break } } if !dup { ei.Seq = j schema.Indexes[i] = ei break } } } if reidx && ei.Seq > 0 { // fmt.Println("WARN NEW INDEXES", ei.Column, ei.Seq) offset, limit := "", 1000 for { rs := db.ObjectScan(fold, offset, "", uint32(limit)).ObjectList() batch := new(leveldb.Batch) for _, entry := range rs { var obj map[string]interface{} if err := entry.Data.JsonDecode(&obj); err == nil { for mk, mv := range obj { mk = skv.ObjectDocIndexStringFilter(mk) if mk != ei.Column { continue } if bs, ok := skv.ObjectDocIndexValue(&ei, reflect.ValueOf(mv)); ok { batch.Put(dbutil.BytesConcat(skv.ObjectDocIndexFieldPrefix(key, ei.Seq), bs, entry.Key), []byte{}) } break } offset = dbutil.BytesToHexString(entry.Key) } } db.ldb.Write(batch, nil) if len(rs) < limit { break } } } } skey := string(key) rpl = db._raw_put_json(skv.ObjectDocSchemaKey(key), schema, 0) if rpl.Status == skv.ReplyOK { _obj_doc_indexes[skey] = schema } return rpl }
func (db *DB) ObjectDocPut(fold, key string, obj interface{}, opts *skv.ObjectWriteOptions) *skv.Reply { _obj_doc_global_locker.Lock() _obj_doc_global_locker.Unlock() var ( opath = skv.NewObjectPathKey(fold, key) rpl = skv.NewReply("") ) if len(opath.Fold) > skv.ObjectDocKeyLenMax || len(opath.Field) > skv.ObjectDocPriLenMax || obj == nil { rpl.Status = skv.ReplyBadArgument return rpl } var ( bkey = opath.EntryIndex() objt = reflect.TypeOf(obj) objv = reflect.ValueOf(obj) prev = map[string]interface{}{} previdx = map[uint8]skv.ObjectDocSchemaIndexEntryBytes{} set = map[string]interface{}{} ) if opts == nil { opts = _obj_options_def } prevobj := db.RawGet(bkey).Object() if prevobj.Status == skv.ReplyOK { if err := prevobj.Data.JsonDecode(&prev); err == nil { previdx = skv.ObjectDocIndexDataExport(_obj_doc_indexes, opath.Fold, prev) } } if objt.Kind() == reflect.Struct { for i := 0; i < objt.NumField(); i++ { set[skv.ObjectDocIndexStringFilter(objt.Field(i).Name)] = objv.Field(i).Interface() } } else if objt.Kind() == reflect.Map { mks := objv.MapKeys() for _, mkv := range mks { if mkv.Kind() == reflect.String { set[skv.ObjectDocIndexStringFilter(mkv.String())] = objv.MapIndex(mkv).Interface() } } } else { rpl.Status = skv.ReplyBadArgument return rpl } setidx, idxnew, idxdup := skv.ObjectDocIndexDataExport(_obj_doc_indexes, opath.Fold, set), [][]byte{}, [][]byte{} // fmt.Println("\tsetidx", setidx) // fmt.Println("\tprevidx", previdx) for siKey, siEntry := range setidx { var incr_set, incr_prev uint64 if siEntry.AutoIncr { incr_set = dbutil.BytesToUint64(siEntry.Data) } // if piEntry, ok := previdx[siKey]; ok { if siEntry.AutoIncr && incr_set == 0 { if incr_prev = dbutil.BytesToUint64(piEntry.Data); incr_prev > 0 { siEntry.Data, incr_set = piEntry.Data, incr_prev set[siEntry.FieldName] = incr_set continue } } else if bytes.Compare(piEntry.Data, siEntry.Data) == 0 { continue } idxdup = append(idxdup, append(append(skv.ObjectDocIndexFieldPrefix(opath.Fold, siKey), piEntry.Data...), opath.Field...)) } // if siEntry.AutoIncr { if incr_set == 0 { incr_set = db.RawIncrby(skv.ObjectDocIndexIncrKey(opath.Fold, siEntry.Seq), 1).Uint64() ibs := make([]byte, 8) binary.BigEndian.PutUint64(ibs, incr_set) siEntry.Data = ibs[(8 - len(siEntry.Data)):] set[siEntry.FieldName] = incr_set } else if incr_set > 0 && incr_set > incr_prev { if db.RawGet(skv.ObjectDocIndexIncrKey(opath.Fold, siEntry.Seq)).Uint64() < incr_set { db.RawPut(skv.ObjectDocIndexIncrKey(opath.Fold, siEntry.Seq), []byte(strconv.FormatUint(incr_set, 10)), 0) } } } if siEntry.Unique || siEntry.AutoIncr { objIdxKeyPrefix := append(skv.ObjectDocIndexFieldPrefix(opath.Fold, siKey), siEntry.Data...) if rs := db.RawScan(objIdxKeyPrefix, []byte{}, 1).Hash(); len(rs) > 0 { rpl.Status = skv.ReplyBadArgument return rpl } } idxnew = append(idxnew, append(append(skv.ObjectDocIndexFieldPrefix(opath.Fold, siKey), siEntry.Data...), opath.Field...)) } // // batch := new(leveldb.Batch) for _, idxkey := range idxdup { batch.Delete(idxkey) } for _, idxkey := range idxnew { batch.Put(idxkey, []byte{}) } bvalue, _ := dbutil.JsonEncode(set) sum := crc32.ChecksumIEEE(bvalue) if prevobj.Meta.Sum == sum { return skv.NewReply(skv.ReplyOK) } db._obj_meta_sync(skv.ObjectTypeDocument, &prevobj.Meta, opath, int64(len(bvalue)), sum, _obj_options_def) batch.Put(bkey, append(prevobj.Meta.Export(), bvalue...)) if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } return rpl }
// TODO btree // https://github.com/petar/GoLLRB // https://github.com/google/btree func (db *DB) ObjectDocQuery(fold string, qry *skv.ObjectDocQuerySet) *skv.Reply { var ( rpl = skv.NewReply(skv.ReplyBadArgument) key = skv.ObjectDocFoldKey(fold) skey = string(key) ) schema, ok := _obj_doc_indexes[skey] if !ok { return rpl } idxs := map[string]skv.ObjectDocSchemaIndexEntry{} for _, idx := range schema.Indexes { if qry.SortField == idx.Column && idx.Type != skv.ObjectDocSchemaIndexTypeUint { return rpl } idxs[idx.Column] = idx } for _, filter := range qry.Filters { if _, ok := idxs[filter.Field]; !ok { return rpl } } sls := [][]byte{} if idx, ok := idxs[qry.SortField]; ok { start, end := skv.ObjectDocIndexFieldPrefix(key, idx.Seq), skv.ObjectDocIndexFieldPrefix(key, idx.Seq) rs := []skv.ReplyEntry{} for { if qry.SortMode == skv.ObjectDocQuerySortAttrDesc { rs = db.RawRevScan(start, end, skv.ObjectDocScanMax).Hash() } else { rs = db.RawScan(start, end, skv.ObjectDocScanMax).Hash() } for _, v := range rs { if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok { sls = append(sls, dbutil.BytesClone(bkey)) } if qry.SortMode == skv.ObjectDocQuerySortAttrDesc { end = skv.ObjectDocBytesDecr(v.Key) } else { start = skv.ObjectDocBytesIncr(v.Key) } } if uint32(len(rs)) < skv.ObjectDocScanMax { break } } } sls_ok := false if len(sls) > 0 { sls_ok = true } for _, filter := range qry.Filters { idx, ok := idxs[filter.Field] if !ok { continue } if idx.Type != skv.ObjectDocSchemaIndexTypeUint { continue } vstart, vend, values := []byte{}, []byte{}, [][]byte{} for _, v := range filter.Values { vb := dbutil.SintToBytes(v, idx.Length) dup := false for _, pvb := range values { if bytes.Compare(pvb, vb) == 0 { dup = true break } } if !dup { values = append(values, vb) if (filter.Type == skv.ObjectDocQueryFilterValues && !filter.Exclude) || filter.Type == skv.ObjectDocQueryFilterRange { if len(vstart) < 1 { vstart = vb } else if bytes.Compare(vb, vstart) < 1 { vstart = vb } if bytes.Compare(vb, vend) > 0 { vend = vb } } } } var ( kpre = skv.ObjectDocIndexFieldPrefix(key, idx.Seq) start = append(kpre, vstart...) end = append(kpre, vend...) fitkeys = map[string]empty{} ) for { rs := db.RawScan(start, end, skv.ObjectDocScanMax).Hash() for _, v := range rs { if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok { if sls_ok { fitkeys[string(bkey)] = empty{} } else { sls = append(sls, dbutil.BytesClone(bkey)) } } start = skv.ObjectDocBytesIncr(v.Key) } if uint32(len(rs)) < skv.ObjectDocScanMax { break } } if sls_ok { sls_buf := sls sls = [][]byte{} for _, gv := range sls_buf { if _, ok := fitkeys[string(gv)]; ok { sls = append(sls, gv) } } } sls_ok = true } if !sls_ok { // TOPO tls := db.ObjectScan(fold, "", "", uint32(qry.Offset+qry.Limit)).Hash() for i := qry.Offset; i < len(tls); i++ { rpl.Data = append(rpl.Data, tls[i].Key, tls[i].Value) } return rpl } if len(sls) <= qry.Offset { return rpl } cutoff := qry.Offset + qry.Limit if cutoff > len(sls) { cutoff = len(sls) } for i := qry.Offset; i < cutoff; i++ { if rs := db.ObjectDocGet(fold, dbutil.BytesToHexString(sls[i])); rs.Status == skv.ReplyOK { rpl.Data = append(rpl.Data, sls[i], rs.Bytes()) } } rpl.Status = skv.ReplyOK return rpl }