func (op *ObjectPath) NsGroupStatusIndex(group_number uint32) []byte { return dbutil.BytesConcat([]byte{NsObjectGroupStatus}, op.BucketBytes(), dbutil.Uint32ToBytes(group_number)) }
func (op *ObjectPath) NsLogCounterIndex(group_number uint32) []byte { return dbutil.BytesConcat([]byte{NsObjectLogCounter}, op.BucketBytes(), dbutil.Uint32ToBytes(group_number)) }
func (op *ObjectPath) NsLogEntryIndex(group_number uint32, num uint64) []byte { return dbutil.BytesConcat([]byte{NsObjectLogEntry}, op.BucketBytes(), dbutil.Uint32ToBytes(group_number), dbutil.Uint64ToBytes(num)) }
func (db *DB) ObjectDocSchemaSync(fold string, schema skv.ObjectDocSchema) *skv.Reply { _obj_doc_global_locker.Lock() defer _obj_doc_global_locker.Unlock() var ( rpl = skv.NewReply("") key = skv.ObjectDocFoldKey(fold) prev skv.ObjectDocSchema ) if len(schema.Indexes) > skv.ObjectDocSchemaMaxIndex { rpl.Status = skv.ReplyBadArgument return rpl } if rs := db.RawGet(skv.ObjectDocSchemaKey(key)); rs.Status == skv.ReplyOK { rs.JsonDecode(&prev) } for i, ei := range schema.Indexes { ei.Column = skv.ObjectDocIndexStringFilter(ei.Column) schema.Indexes[i] = ei } for _, pi := range prev.Indexes { removed, changed := true, false for j, ei := range schema.Indexes { if pi.Column == ei.Column { removed = false if pi.Type != ei.Type || pi.Length != ei.Length || pi.Unique != ei.Unique || pi.AutoIncr != ei.AutoIncr { changed = true } ei.Seq = pi.Seq schema.Indexes[j] = ei break } } if removed || changed { objIdxKeyPrefix, limit := skv.ObjectDocIndexFieldPrefix(key, pi.Seq), 1000 // fmt.Println("WARN CLEAN prev INDEXES", pi.Column) for { rs := db.RawScan(objIdxKeyPrefix, objIdxKeyPrefix, uint32(limit)).Hash() if len(rs) > 0 { batch := new(leveldb.Batch) for _, entry := range rs { batch.Delete(entry.Key) } db.ldb.Write(batch, nil) } if len(rs) < limit { break } } } } for i, ei := range schema.Indexes { reidx := true for _, pi := range prev.Indexes { if pi.Column == ei.Column { ei.Seq = pi.Seq schema.Indexes[i] = ei if pi.Type == ei.Type && pi.Length == ei.Length && pi.Unique == ei.Unique && pi.AutoIncr == ei.AutoIncr && pi.Seq > 0 { reidx = false } break } } // for _, ci := range schema.Indexes { if ei.Seq == ci.Seq && ei.Column != ci.Column { ei.Seq = 0 schema.Indexes[i] = ei break } } if ei.Seq == 0 { reidx = true for j := uint8(1); j <= 255; j++ { dup := false for _, ci := range schema.Indexes { if j == ci.Seq && ei.Column != ci.Column { dup = true break } } if !dup { ei.Seq = j schema.Indexes[i] = ei break } } } if reidx && ei.Seq > 0 { // fmt.Println("WARN NEW INDEXES", ei.Column, ei.Seq) offset, limit := "", 1000 for { rs := db.ObjectScan(fold, offset, "", uint32(limit)).ObjectList() batch := new(leveldb.Batch) for _, entry := range rs { var obj map[string]interface{} if err := entry.Data.JsonDecode(&obj); err == nil { for mk, mv := range obj { mk = skv.ObjectDocIndexStringFilter(mk) if mk != ei.Column { continue } if bs, ok := skv.ObjectDocIndexValue(&ei, reflect.ValueOf(mv)); ok { batch.Put(dbutil.BytesConcat(skv.ObjectDocIndexFieldPrefix(key, ei.Seq), bs, entry.Key), []byte{}) } break } offset = dbutil.BytesToHexString(entry.Key) } } db.ldb.Write(batch, nil) if len(rs) < limit { break } } } } skey := string(key) rpl = db._raw_put_json(skv.ObjectDocSchemaKey(key), schema, 0) if rpl.Status == skv.ReplyOK { _obj_doc_indexes[skey] = schema } return rpl }