// TODO btree // https://github.com/petar/GoLLRB // https://github.com/google/btree func (db *DB) ObjectDocQuery(fold string, qry *skv.ObjectDocQuerySet) *skv.Reply { var ( rpl = skv.NewReply(skv.ReplyBadArgument) key = skv.ObjectDocFoldKey(fold) skey = string(key) ) schema, ok := _obj_doc_indexes[skey] if !ok { return rpl } idxs := map[string]skv.ObjectDocSchemaIndexEntry{} for _, idx := range schema.Indexes { if qry.SortField == idx.Column && idx.Type != skv.ObjectDocSchemaIndexTypeUint { return rpl } idxs[idx.Column] = idx } for _, filter := range qry.Filters { if _, ok := idxs[filter.Field]; !ok { return rpl } } sls := [][]byte{} if idx, ok := idxs[qry.SortField]; ok { start, end := skv.ObjectDocIndexFieldPrefix(key, idx.Seq), skv.ObjectDocIndexFieldPrefix(key, idx.Seq) rs := []skv.ReplyEntry{} for { if qry.SortMode == skv.ObjectDocQuerySortAttrDesc { rs = db.RawRevScan(start, end, skv.ObjectDocScanMax).Hash() } else { rs = db.RawScan(start, end, skv.ObjectDocScanMax).Hash() } for _, v := range rs { if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok { sls = append(sls, dbutil.BytesClone(bkey)) } if qry.SortMode == skv.ObjectDocQuerySortAttrDesc { end = skv.ObjectDocBytesDecr(v.Key) } else { start = skv.ObjectDocBytesIncr(v.Key) } } if uint32(len(rs)) < skv.ObjectDocScanMax { break } } } sls_ok := false if len(sls) > 0 { sls_ok = true } for _, filter := range qry.Filters { idx, ok := idxs[filter.Field] if !ok { continue } if idx.Type != skv.ObjectDocSchemaIndexTypeUint { continue } vstart, vend, values := []byte{}, []byte{}, [][]byte{} for _, v := range filter.Values { vb := dbutil.SintToBytes(v, idx.Length) dup := false for _, pvb := range values { if bytes.Compare(pvb, vb) == 0 { dup = true break } } if !dup { values = append(values, vb) if (filter.Type == skv.ObjectDocQueryFilterValues && !filter.Exclude) || filter.Type == skv.ObjectDocQueryFilterRange { if len(vstart) < 1 { vstart = vb } else if bytes.Compare(vb, vstart) < 1 { vstart = vb } if bytes.Compare(vb, vend) > 0 { vend = vb } } } } var ( kpre = skv.ObjectDocIndexFieldPrefix(key, idx.Seq) start = append(kpre, vstart...) end = append(kpre, vend...) fitkeys = map[string]empty{} ) for { rs := db.RawScan(start, end, skv.ObjectDocScanMax).Hash() for _, v := range rs { if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok { if sls_ok { fitkeys[string(bkey)] = empty{} } else { sls = append(sls, dbutil.BytesClone(bkey)) } } start = skv.ObjectDocBytesIncr(v.Key) } if uint32(len(rs)) < skv.ObjectDocScanMax { break } } if sls_ok { sls_buf := sls sls = [][]byte{} for _, gv := range sls_buf { if _, ok := fitkeys[string(gv)]; ok { sls = append(sls, gv) } } } sls_ok = true } if !sls_ok { // TOPO tls := db.ObjectScan(fold, "", "", uint32(qry.Offset+qry.Limit)).Hash() for i := qry.Offset; i < len(tls); i++ { rpl.Data = append(rpl.Data, tls[i].Key, tls[i].Value) } return rpl } if len(sls) <= qry.Offset { return rpl } cutoff := qry.Offset + qry.Limit if cutoff > len(sls) { cutoff = len(sls) } for i := qry.Offset; i < cutoff; i++ { if rs := db.ObjectDocGet(fold, dbutil.BytesToHexString(sls[i])); rs.Status == skv.ReplyOK { rpl.Data = append(rpl.Data, sls[i], rs.Bytes()) } } rpl.Status = skv.ReplyOK return rpl }
func (db *DB) ObjectDocSchemaSync(fold string, schema skv.ObjectDocSchema) *skv.Reply { _obj_doc_global_locker.Lock() defer _obj_doc_global_locker.Unlock() var ( rpl = skv.NewReply("") key = skv.ObjectDocFoldKey(fold) prev skv.ObjectDocSchema ) if len(schema.Indexes) > skv.ObjectDocSchemaMaxIndex { rpl.Status = skv.ReplyBadArgument return rpl } if rs := db.RawGet(skv.ObjectDocSchemaKey(key)); rs.Status == skv.ReplyOK { rs.JsonDecode(&prev) } for i, ei := range schema.Indexes { ei.Column = skv.ObjectDocIndexStringFilter(ei.Column) schema.Indexes[i] = ei } for _, pi := range prev.Indexes { removed, changed := true, false for j, ei := range schema.Indexes { if pi.Column == ei.Column { removed = false if pi.Type != ei.Type || pi.Length != ei.Length || pi.Unique != ei.Unique || pi.AutoIncr != ei.AutoIncr { changed = true } ei.Seq = pi.Seq schema.Indexes[j] = ei break } } if removed || changed { objIdxKeyPrefix, limit := skv.ObjectDocIndexFieldPrefix(key, pi.Seq), 1000 // fmt.Println("WARN CLEAN prev INDEXES", pi.Column) for { rs := db.RawScan(objIdxKeyPrefix, objIdxKeyPrefix, uint32(limit)).Hash() if len(rs) > 0 { batch := new(leveldb.Batch) for _, entry := range rs { batch.Delete(entry.Key) } db.ldb.Write(batch, nil) } if len(rs) < limit { break } } } } for i, ei := range schema.Indexes { reidx := true for _, pi := range prev.Indexes { if pi.Column == ei.Column { ei.Seq = pi.Seq schema.Indexes[i] = ei if pi.Type == ei.Type && pi.Length == ei.Length && pi.Unique == ei.Unique && pi.AutoIncr == ei.AutoIncr && pi.Seq > 0 { reidx = false } break } } // for _, ci := range schema.Indexes { if ei.Seq == ci.Seq && ei.Column != ci.Column { ei.Seq = 0 schema.Indexes[i] = ei break } } if ei.Seq == 0 { reidx = true for j := uint8(1); j <= 255; j++ { dup := false for _, ci := range schema.Indexes { if j == ci.Seq && ei.Column != ci.Column { dup = true break } } if !dup { ei.Seq = j schema.Indexes[i] = ei break } } } if reidx && ei.Seq > 0 { // fmt.Println("WARN NEW INDEXES", ei.Column, ei.Seq) offset, limit := "", 1000 for { rs := db.ObjectScan(fold, offset, "", uint32(limit)).ObjectList() batch := new(leveldb.Batch) for _, entry := range rs { var obj map[string]interface{} if err := entry.Data.JsonDecode(&obj); err == nil { for mk, mv := range obj { mk = skv.ObjectDocIndexStringFilter(mk) if mk != ei.Column { continue } if bs, ok := skv.ObjectDocIndexValue(&ei, reflect.ValueOf(mv)); ok { batch.Put(dbutil.BytesConcat(skv.ObjectDocIndexFieldPrefix(key, ei.Seq), bs, entry.Key), []byte{}) } break } offset = dbutil.BytesToHexString(entry.Key) } } db.ldb.Write(batch, nil) if len(rs) < limit { break } } } } skey := string(key) rpl = db._raw_put_json(skv.ObjectDocSchemaKey(key), schema, 0) if rpl.Status == skv.ReplyOK { _obj_doc_indexes[skey] = schema } return rpl }