func (s *file) loadChunks(enc []byte) (v interface{}, err error) { items, err := lldb.DecodeScalars(enc) if err != nil { return } var ok bool var next int64 switch len(items) { case 2: // nop case 3: if next, ok = items[1].(int64); !ok || next == 0 { return nil, fmt.Errorf("corrupted DB: first chunk link") } default: return nil, fmt.Errorf("corrupted DB: first chunk") } typ, ok := items[0].(int64) if !ok { return nil, fmt.Errorf("corrupted DB: first chunk tag") } buf, ok := items[len(items)-1].([]byte) if !ok { return nil, fmt.Errorf("corrupted DB: first chunk data") } for next != 0 { b, err := s.a.Get(nil, next) if err != nil { return nil, err } if items, err = lldb.DecodeScalars(b); err != nil { return nil, err } switch len(items) { case 1: next = 0 case 2: if next, ok = items[0].(int64); !ok { return nil, fmt.Errorf("corrupted DB: chunk link") } b = b[1:] default: return nil, fmt.Errorf("corrupted DB: chunk items %d (%v)", len(items), items) } if b, ok = items[0].([]byte); !ok { return nil, fmt.Errorf("corrupted DB: chunk data") } buf = append(buf, b...) } return s.codec.decode(buf, int(typ)) }
func (s *file) CreateTemp(asc bool) (bt temp, err error) { f, err := s.tempFile("", "ql-tmp-") if err != nil { return nil, err } fn := f.Name() filer := lldb.NewOSFiler(f) a, err := lldb.NewAllocator(filer, &lldb.Options{}) if err != nil { f.Close() os.Remove(fn) return nil, err } k := 1 if !asc { k = -1 } t, _, err := lldb.CreateBTree(a, func(a, b []byte) int { //TODO w/ error return da, err := lldb.DecodeScalars(a) if err != nil { log.Panic(err) } if err = s.expandBytes(da); err != nil { log.Panic(err) } db, err := lldb.DecodeScalars(b) if err != nil { log.Panic(err) } if err = s.expandBytes(db); err != nil { log.Panic(err) } return k * collate(da, db) }) if err != nil { f.Close() if fn != "" { os.Remove(fn) } return nil, err } x := &fileTemp{file: &file{ a: a, codec: newGobCoder(), f0: f, }, t: t} return x, nil }
func (s *file) freeChunks(enc []byte) (err error) { items, err := lldb.DecodeScalars(enc) if err != nil { return } var ok bool var next int64 switch len(items) { case 2: return case 3: if next, ok = items[1].(int64); !ok || next == 0 { return fmt.Errorf("(file-007) corrupted DB: first chunk link") } default: return fmt.Errorf("(file-008) corrupted DB: first chunk") } for next != 0 { s.mu.Lock() b, err := s.a.Get(nil, next) s.mu.Unlock() if err != nil { return err } if items, err = lldb.DecodeScalars(b); err != nil { return err } var h int64 switch len(items) { case 1: // nop case 2: if h, ok = items[0].(int64); !ok { return fmt.Errorf("(file-009) corrupted DB: chunk link") } default: return fmt.Errorf("(file-010) corrupted DB: chunk items %d (%v)", len(items), items) } s.mu.Lock() if err = s.a.Free(next); err != nil { s.mu.Unlock() return err } s.mu.Unlock() next = h } return }
func (s *file) free(h int64, blobCols []*col) (err error) { b, err := s.a.Get(nil, h) //LATER +bufs if err != nil { return } rec, err := lldb.DecodeScalars(b) if err != nil { return } for _, col := range blobCols { if col.index >= len(rec) { return fmt.Errorf("(file-004) file.free: corrupted DB (record len)") } if col.index+2 >= len(rec) { continue } switch x := rec[col.index+2].(type) { case nil: // nop case []byte: if err = s.freeChunks(x); err != nil { return } } } defer s.lock()() return s.a.Free(h) }
func (s *file) free(h int64, blobCols []*col) (err error) { b, err := s.a.Get(nil, h) //LATER +bufs if err != nil { return } rec, err := lldb.DecodeScalars(b) if err != nil { return } for _, col := range blobCols { if col.index >= len(rec) { return fmt.Errorf("file.free: corrupted DB (record len)") } var ok bool if b, ok = rec[col.index+2].([]byte); !ok { return fmt.Errorf("file.free: corrupted DB (chunk []byte)") } if err = s.freeChunks(b); err != nil { return } } return s.a.Free(h) }
func (c *Column) ParseFromString(s string) { pairs, err := lldb.DecodeScalars([]byte(s)) if err != nil { log.Fatal(err) } c.Family = pairs[0].([]byte) c.Qual = pairs[1].([]byte) }
func (it *fileBTreeIterator) Next() (k, v []interface{}, err error) { bk, bv, err := it.en.Next() if err != nil { return } if k, err = lldb.DecodeScalars(bk); err != nil { return } for i, val := range k { b, ok := val.([]byte) if !ok { continue } c := chunk{it.t.file, b} if k[i], err = c.expand(); err != nil { return nil, nil, err } } if err = enforce(k, it.t.colsK); err != nil { return } if v, err = lldb.DecodeScalars(bv); err != nil { return } for i, val := range v { b, ok := val.([]byte) if !ok { continue } c := chunk{it.t.file, b} if v[i], err = c.expand(); err != nil { return nil, nil, err } } err = enforce(v, it.t.colsV) return }
func collate(a, b []byte) (r int) { da, err := lldb.DecodeScalars(a) if err != nil { panic(err) } db, err := lldb.DecodeScalars(b) if err != nil { panic(err) } r, err = lldb.Collate(da, db, nil) if err != nil { panic(err) } return }
func (s *file) Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error) { //NTYPE if s.wal != nil { defer s.rLock()() } b, err := s.a.Get(nil, h) //LATER +bufs if err != nil { return } rec, err := lldb.DecodeScalars(b) if err != nil { return } for _, col := range cols { i := col.index + 2 switch col.typ { case 0: case qBool: case qComplex64: rec[i] = complex64(rec[i].(complex128)) case qComplex128: case qFloat32: rec[i] = float32(rec[i].(float64)) case qFloat64: case qInt8: rec[i] = int8(rec[i].(int64)) case qInt16: rec[i] = int16(rec[i].(int64)) case qInt32: rec[i] = int32(rec[i].(int64)) case qInt64: case qString: case qUint8: rec[i] = uint8(rec[i].(uint64)) case qUint16: rec[i] = uint16(rec[i].(uint64)) case qUint32: rec[i] = uint32(rec[i].(uint64)) case qUint64: case qBlob, qBigInt, qBigRat, qTime, qDuration: b, ok := rec[i].([]byte) if !ok { return nil, fmt.Errorf("corrupted DB: chunk type is not []byte") } rec[i] = chunk{f: s, b: b} default: log.Panic("internal error") } } return rec, nil }
func (it *fileBTreeIterator) Next() (k, v []interface{}, err error) { bk, bv, err := it.en.Next() if err != nil { return } if k, err = lldb.DecodeScalars(bk); err != nil { return } if err = enforce(k, it.t.colsK); err != nil { return } if v, err = lldb.DecodeScalars(bv); err != nil { return } err = enforce(v, it.t.colsV) return }
func (s *file) collate(a, b []byte) int { //TODO w/ error return da, err := lldb.DecodeScalars(a) if err != nil { log.Panic(err) } if err = s.expandBytes(da); err != nil { log.Panic(err) } db, err := lldb.DecodeScalars(b) if err != nil { log.Panic(err) } if err = s.expandBytes(db); err != nil { log.Panic(err) } return collate(da, db) }
func (t *fileTemp) Get(k []interface{}) (v []interface{}, err error) { bk, err := lldb.EncodeScalars(k...) if err != nil { return } bv, err := t.t.Get(nil, bk) if err != nil { return } return lldb.DecodeScalars(bv) }
func (i *fileIndexIterator) nextPrev(f func() ([]byte, []byte, error)) ([]interface{}, int64, error) { //TODO(indices) blobs: +test bk, bv, err := f() if err != nil { return nil, -1, err } dk, err := lldb.DecodeScalars(bk) if err != nil { return nil, -1, err } b, ok := dk[0].([]byte) if ok { dk[0] = chunk{i.f, b} if expand(dk[:1]); err != nil { return nil, -1, err } } var k indexKey k.value = dk[:len(dk)-1] switch i.unique { case true: if isIndexNull(k.value) { return nil, dk[len(dk)-1].(int64), nil } dv, err := lldb.DecodeScalars(bv) if err != nil { return nil, -1, err } return k.value, dv[0].(int64), nil default: return k.value, dk[len(dk)-1].(int64), nil } }
func read2(a *lldb.Allocator, dst []interface{}, h int64, cols ...*col) (data []interface{}, err error) { b, err := a.Get(nil, h) if err != nil { return } rec, err := lldb.DecodeScalars(b) if err != nil { return } for _, col := range cols { i := col.index + 2 switch col.typ { case 0: case qBool: case qComplex64: rec[i] = complex64(rec[i].(complex128)) case qComplex128: case qFloat32: rec[i] = float32(rec[i].(float64)) case qFloat64: case qInt8: rec[i] = int8(rec[i].(int64)) case qInt16: rec[i] = int16(rec[i].(int64)) case qInt32: rec[i] = int32(rec[i].(int64)) case qInt64: case qString: case qUint8: rec[i] = uint8(rec[i].(uint64)) case qUint16: rec[i] = uint16(rec[i].(uint64)) case qUint32: rec[i] = uint32(rec[i].(uint64)) case qUint64: default: log.Panic("internal error") } } return rec, nil }
func (t *fileTemp) Get(k []interface{}) (v []interface{}, err error) { if err = expand(k); err != nil { return } if err = t.flatten(k); err != nil { return nil, err } bk, err := lldb.EncodeScalars(k...) if err != nil { return } bv, err := t.t.Get(nil, bk) if err != nil { return } return lldb.DecodeScalars(bv) }
//NTYPE func infer(from []interface{}, to *[]*col) { if len(*to) == 0 { *to = make([]*col, len(from)) for i := range *to { (*to)[i] = &col{} } } for i, c := range *to { if f := from[i]; f != nil { switch x := f.(type) { //case nil: case idealComplex: c.typ = qComplex128 from[i] = complex128(x) case idealFloat: c.typ = qFloat64 from[i] = float64(x) case idealInt: c.typ = qInt64 from[i] = int64(x) case idealRune: c.typ = qInt32 from[i] = int32(x) case idealUint: c.typ = qUint64 from[i] = uint64(x) case bool: c.typ = qBool case complex128: c.typ = qComplex128 case complex64: c.typ = qComplex64 case float64: c.typ = qFloat64 case float32: c.typ = qFloat32 case int8: c.typ = qInt8 case int16: c.typ = qInt16 case int32: c.typ = qInt32 case int64: c.typ = qInt64 case string: c.typ = qString case uint8: c.typ = qUint8 case uint16: c.typ = qUint16 case uint32: c.typ = qUint32 case uint64: c.typ = qUint64 case []byte: c.typ = qBlob case *big.Int: c.typ = qBigInt case *big.Rat: c.typ = qBigRat case time.Time: c.typ = qTime case time.Duration: c.typ = qDuration case chunk: vals, err := lldb.DecodeScalars([]byte(x.b)) if err != nil { log.Panic("err") } if len(vals) == 0 { log.Panic("internal error") } i, ok := vals[0].(int64) if !ok { log.Panic("internal error") } c.typ = int(i) default: log.Panic("internal error") } } } }
func (s *file) Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error) { //NTYPE b, err := s.a.Get(nil, h) //LATER +bufs if err != nil { return } rec, err := lldb.DecodeScalars(b) if err != nil { return } for _, col := range cols { i := col.index + 2 if i >= len(rec) || rec[i] == nil { continue } switch col.typ { case 0: case qBool: case qComplex64: rec[i] = complex64(rec[i].(complex128)) case qComplex128: case qFloat32: rec[i] = float32(rec[i].(float64)) case qFloat64: case qInt8: rec[i] = int8(rec[i].(int64)) case qInt16: rec[i] = int16(rec[i].(int64)) case qInt32: rec[i] = int32(rec[i].(int64)) case qInt64: case qString: case qUint8: rec[i] = uint8(rec[i].(uint64)) case qUint16: rec[i] = uint16(rec[i].(uint64)) case qUint32: rec[i] = uint32(rec[i].(uint64)) case qUint64: case qBlob, qBigInt, qBigRat, qTime, qDuration: switch x := rec[i].(type) { case []byte: rec[i] = chunk{f: s, b: x} default: return nil, fmt.Errorf("(file-006) corrupted DB: non nil chunk type is not []byte") } default: panic("internal error 045") } } if cols != nil { for n, dn := len(cols)+2, len(rec); dn < n; dn++ { rec = append(rec, nil) } } return rec, nil }
// Do calls f for every subscripts-value pair in s in ascending collation order // of the subscripts. Do returns non nil error for general errors (eg. file // read error). If f returns false or a non nil error then Do terminates and // returns the value of error from f. // // Note: f can get called with a subscripts-value pair which actually may no // longer exist - if some other goroutine introduces such data race. // Coordination required to avoid this situation, if applicable/desirable, must // be provided by the client of dbm. func (s *Slice) Do(f func(subscripts, value []interface{}) (bool, error)) (err error) { var ( db = s.a.db noVal bool ) if err = db.enter(); err != nil { return } doLeave := true defer func() { if doLeave { db.leave(&err) } }() ok, err := s.a.validate(false) if !ok { return err } tree := s.a.tree if !tree.IsMem() && tree.Handle() == 1 { noVal = true } switch { case s.from == nil && s.to == nil: bprefix, err := lldb.EncodeScalars(s.prefix...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } if n := len(s.prefix); n != 0 { if len(k) < len(s.prefix) { return nil } c, err := lldb.Collate(k[:n], s.prefix, nil) if err != nil { return err } if c > 0 { return nil } } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from == nil && s.to != nil: bprefix, err := lldb.EncodeScalars(s.prefix...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } to := append(append([]interface{}(nil), s.prefix...), s.to...) for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return err } c, err := lldb.Collate(k, to, nil) if err != nil { return err } if c > 0 { return err } v, err := lldb.DecodeScalars(bv) if err != nil { return noEof(err) } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from != nil && s.to == nil: bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } if n := len(s.prefix); n != 0 { if len(k) < len(s.prefix) { return nil } c, err := lldb.Collate(k[:n], s.prefix, nil) if err != nil { return err } if c > 0 { return nil } } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } case s.from != nil && s.to != nil: bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) if err != nil { return err } enum, _, err := tree.Seek(bprefix) if err != nil { return noEof(err) } to := append(append([]interface{}(nil), s.prefix...), s.to...) for { bk, bv, err := enum.Next() if err != nil { return noEof(err) } k, err := lldb.DecodeScalars(bk) if err != nil { return noEof(err) } c, err := lldb.Collate(k, to, nil) if err != nil { return err } if c > 0 { return err } v, err := lldb.DecodeScalars(bv) if err != nil { return err } doLeave = false if db.leave(&err) != nil { return err } if noVal && v != nil { v = []interface{}{0} } if more, err := f(k[len(s.prefix):], v); !more || err != nil { return noEof(err) } if err = db.enter(); err != nil { return err } doLeave = true } default: panic("slice.go: internal error") } }