func (x *fileIndex) Delete(indexedValues []interface{}, h int64) error { for i, indexedValue := range indexedValues { chunk, ok := indexedValue.(chunk) if ok { indexedValues[i] = chunk.b } } t := x.t var k []byte var err error switch { case !x.unique: k, err = lldb.EncodeScalars(append(indexedValues, h)...) case isIndexNull(indexedValues): // unique, NULL k, err = lldb.EncodeScalars(nil, h) default: // unique, non NULL k, err = lldb.EncodeScalars(append(indexedValues, int64(0))...) } if err != nil { return err } return t.Delete(k) }
func (t *fileTemp) Set(k, v []interface{}) (err error) { if err = expand(k); err != nil { return } if err = expand(v); err != nil { return } infer(k, &t.colsK) infer(v, &t.colsV) if err = t.flatten(k); err != nil { return } bk, err := lldb.EncodeScalars(k...) if err != nil { return } if err = t.flatten(v); err != nil { return } bv, err := lldb.EncodeScalars(v...) if err != nil { return } return t.t.Set(bk, bv) }
func encVal(val interface{}) (r []byte, err error) { switch x := val.(type) { case []interface{}: return lldb.EncodeScalars(x...) default: return lldb.EncodeScalars(x) } }
func (c *ColumnCoordinate) String() string { b, err := lldb.EncodeScalars(c.Table, c.Row, c.Family, c.Qual) if err != nil { log.Fatal(err) } return string(b) }
func (c *Column) String() string { b, err := lldb.EncodeScalars(c.Family, c.Qual) if err != nil { log.Fatal(err) } return string(b) }
func (t *fileTemp) Set(k, v []interface{}) (err error) { infer(k, &t.colsK) infer(v, &t.colsV) bk, err := lldb.EncodeScalars(k...) if err != nil { return } bv, err := lldb.EncodeScalars(v...) if err != nil { return } return t.t.Set(bk, bv) }
func create2(a *lldb.Allocator, data ...interface{}) (h int64, err error) { b, err := lldb.EncodeScalars(data...) if err != nil { return } return a.Alloc(b) }
// The []byte version of the key in the BTree shares chunks, if any, with // the value stored in the record. func (x *fileIndex) Create(indexedValues []interface{}, h int64) error { for i, indexedValue := range indexedValues { chunk, ok := indexedValue.(chunk) if ok { indexedValues[i] = chunk.b } } t := x.t switch { case !x.unique: k, err := lldb.EncodeScalars(append(indexedValues, h)...) if err != nil { return err } return t.Set(k, gbZeroInt64) case isIndexNull(indexedValues): // unique, NULL k, err := lldb.EncodeScalars(nil, h) if err != nil { return err } return t.Set(k, gbZeroInt64) default: // unique, non NULL k, err := lldb.EncodeScalars(append(indexedValues, int64(0))...) if err != nil { return err } v, err := lldb.EncodeScalars(h) if err != nil { return err } _, _, err = t.Put(nil, k, func(key, old []byte) (new []byte, write bool, err error) { if old == nil { return v, true, nil } return nil, false, fmt.Errorf("(file-018) cannot insert into unique index: duplicate value(s): %v", indexedValues) }) return err } }
func (s *file) Update(h int64, data ...interface{}) (err error) { b, err := lldb.EncodeScalars(data...) if err != nil { return } defer s.lock()() return s.a.Realloc(h, b) }
func (x *fileIndex) Seek(indexedValues []interface{}) (indexIterator, bool, error) { //TODO(indices) blobs: +test k, err := lldb.EncodeScalars(append(indexedValues, 0)...) if err != nil { return nil, false, err } en, hit, err := x.t.Seek(k) if err != nil { return nil, false, err } return &fileIndexIterator{x.f, en, x.unique}, hit, nil }
func (t *fileTemp) Get(k []interface{}) (v []interface{}, err error) { bk, err := lldb.EncodeScalars(k...) if err != nil { return } bv, err := t.t.Get(nil, bk) if err != nil { return } return lldb.DecodeScalars(bv) }
// The []byte version of the key in the BTree shares chunks, if any, with // the value stored in the record. func (x *fileIndex) Create(indexedValue interface{}, h int64) error { t := x.t switch { case !x.unique: k, err := lldb.EncodeScalars(indexedValue, h) if err != nil { return err } return t.Set(k, gbZeroInt64) case indexedValue == nil: // unique, NULL k, err := lldb.EncodeScalars(nil, h) if err != nil { return err } return t.Set(k, gbZeroInt64) default: // unique, non NULL k, err := lldb.EncodeScalars(indexedValue, int64(0)) if err != nil { return err } v, err := lldb.EncodeScalars(h) if err != nil { return err } _, _, err = t.Put(nil, k, func(key, old []byte) (new []byte, write bool, err error) { if old == nil { return v, true, nil } return nil, false, fmt.Errorf("(file-018) cannot insert into unique index: duplicate value: %v", indexedValue) }) return err } }
func (x *fileIndex) Delete(indexedValue interface{}, h int64) error { chunk, ok := indexedValue.(chunk) if ok { indexedValue = chunk.b } t := x.t var k []byte var err error switch { case !x.unique: k, err = lldb.EncodeScalars(indexedValue, h) case indexedValue == nil: // unique, NULL k, err = lldb.EncodeScalars(nil, h) default: // unique, non NULL k, err = lldb.EncodeScalars(indexedValue, int64(0)) } if err != nil { return err } return t.Delete(k) }
func (s *file) Create(data ...interface{}) (h int64, err error) { if err = expand(data); err != nil { return } if err = s.flatten(data); err != nil { return } b, err := lldb.EncodeScalars(data...) if err != nil { return } defer s.lock()() return s.a.Alloc(b) }
func (t *fileTemp) Get(k []interface{}) (v []interface{}, err error) { if err = expand(k); err != nil { return } if err = t.flatten(k); err != nil { return nil, err } bk, err := lldb.EncodeScalars(k...) if err != nil { return } bv, err := t.t.Get(nil, bk) if err != nil { return } return lldb.DecodeScalars(bv) }
// Do calls f for every subscripts-value pair in s in ascending collation order // of the subscripts. Do returns non nil error for general errors (eg. file // read error). If f returns false or a non nil error then Do terminates and // returns the value of error from f. // // Note: f can get called with a subscripts-value pair which actually may no // longer exist - if some other goroutine introduces such data race. // Coordination required to avoid this situation, if applicable/desirable, must // be provided by the client of dbm. func (s *Slice) Do(f func(subscripts, value []interface{}) (bool, error)) (err error) { var ( db = s.a.db noVal bool ) if err = db.enter(); err != nil { return } doLeave := true defer func() { if doLeave { db.leave(&err) } }() ok, err := s.a.validate(false) if !ok { return err } tree := s.a.tree if !tree.IsMem() && tree.Handle() == 1 { noVal = true } switch { case s.from == nil && s.to == nil: bprefix, err := lldb.EncodeScalars(s.prefix...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } if n := len(s.prefix); n != 0 { if len(k) < len(s.prefix) { return nil } c, err := lldb.Collate(k[:n], s.prefix, nil) if err != nil { return err } if c > 0 { return nil } } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from == nil && s.to != nil: bprefix, err := lldb.EncodeScalars(s.prefix...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } to := append(append([]interface{}(nil), s.prefix...), s.to...) for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return err } c, err := lldb.Collate(k, to, nil) if err != nil { return err } if c > 0 { return err } v, err := lldb.DecodeScalars(bv) if err != nil { return noEof(err) } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from != nil && s.to == nil: bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } if n := len(s.prefix); n != 0 { if len(k) < len(s.prefix) { return nil } c, err := lldb.Collate(k[:n], s.prefix, nil) if err != nil { return err } if c > 0 { return nil } } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from != nil && s.to != nil: bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } to := append(append([]interface{}(nil), s.prefix...), s.to...) for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } c, err := lldb.Collate(k, to, nil) if err != nil { return err } if c > 0 { return err } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } default: panic("slice.go: internal error") } }
// []interface{}{qltype, ...}->[]interface{}{lldb scalar type, ...} // + long blobs are (pre)written to a chain of chunks. func (s *file) flatten(data []interface{}) (err error) { for i, v := range data { tag := 0 var b []byte switch x := v.(type) { case []byte: tag = qBlob b = x case *big.Int: tag = qBigInt b, err = s.codec.encode(x) case *big.Rat: tag = qBigRat b, err = s.codec.encode(x) case time.Time: tag = qTime b, err = s.codec.encode(x) case time.Duration: tag = qDuration b, err = s.codec.encode(x) default: continue } if err != nil { return } const chunk = 1 << 16 chunks := 0 var next int64 var buf []byte for rem := len(b); rem > shortBlob; { n := mathutil.Min(rem, chunk) part := b[rem-n:] b = b[:rem-n] rem -= n switch next { case 0: // last chunk buf, err = lldb.EncodeScalars([]interface{}{part}...) default: // middle chunk buf, err = lldb.EncodeScalars([]interface{}{next, part}...) } if err != nil { return } h, err := s.a.Alloc(buf) if err != nil { return err } next = h chunks++ } switch next { case 0: // single chunk buf, err = lldb.EncodeScalars([]interface{}{tag, b}...) default: // multi chunks buf, err = lldb.EncodeScalars([]interface{}{tag, next, b}...) } if err != nil { return } data[i] = buf } return }
func init() { var err error if gbZeroInt64, err = lldb.EncodeScalars(int64(0)); err != nil { panic(err) } }