func (f *wfile) Close() error { if rem := f.Chunker.Remaining(); len(rem) > 0 { if len(f.chunks) == 0 { // Store the file inline. Data is uint32 + len(metadata) + uint32 + rem total := 4 + len(f.metadata) + 4 + len(rem) data := make([]byte, total) out := putMetadata(data, f.metadata) // 0 chunks indicates the data is inline littleEndian.PutUint32(out, uint32(0)) copy(out[4:], rem) id := f.id wfilesPool.Put(f) return f.drv.files.Put(internal.StringToBytes(id), data, nil) } if err := f.Chunker.Flush(); err != nil { return err } } if err := f.flushBatch(); err != nil { return err } // Reserve uint32 + len(metadata) + n sha1 hashes + n uint32 + 1 uint32 (for the chunk count) total := 4 + len(f.metadata) + (len(f.chunks) * (sha1.Size + 4)) + 4 data := make([]byte, total) out := putMetadata(data, f.metadata) littleEndian.PutUint32(out, uint32(len(f.chunks))) pos := 4 for _, chunk := range f.chunks { littleEndian.PutUint32(out[pos:], uint32(len(chunk))) pos += 4 n := copy(out[pos:], chunk) pos += n } id := f.id wfilesPool.Put(f) return f.drv.files.Put(internal.StringToBytes(id), data, nil) }
func (d *DB) preparedStmt(s string) *sql.Stmt { // Don't bother making a prepared statement if // the transaction is non-nil, since the current // implementation of d.tx.Stmt() (as of Go 1.8 freeze) // will end up parsing the query string again. Given // that tx prepared statements won't be valid once // the transaction goes away, we'll most likely end // up parsing twice for one execution. // // XXX: Also, sqlite has some issues with prepared stmts // and transactions. TestMigrations in gnd.la/orm wouldn't // pass because the connection which created the 1st // "migration "table was incorrectly in a transaction and when // creating the table for the 1st migration (BadMigration1), // the inspection would return that no table was present. // Eventually, the table would appear and some other part // of the test would fail. // // TLDR: Make sure the sqlite tests pass if you remove the following // check if d.tx != nil { return nil } key := crc32.ChecksumIEEE(internal.StringToBytes(s)) d.mu.RLock() cached, ok := d.cache[key] d.mu.RUnlock() if ok && cached.sql == s { if d.tx != nil { return d.tx.Stmt(cached.stmt) } return cached.stmt } stmt, _ := d.sqlDb.Prepare(s) if stmt == nil { // Let the non-prepared method report the error return nil } d.mu.Lock() if d.cache == nil { d.cache = make(map[uint32]cacheEntry) } d.cache[key] = cacheEntry{sql: s, stmt: stmt} d.mu.Unlock() if d.tx != nil { return d.tx.Stmt(stmt) } return stmt }
func (d *leveldbDriver) Open(id string) (driver.RFile, error) { value, err := d.files.Get(internal.StringToBytes(id), nil) if err != nil { if err == leveldb.ErrNotFound { return nil, fmt.Errorf("file %s not found", id) } return nil, err } metaLen := int(littleEndian.Uint32(value)) value = value[4:] metadata := value[:metaLen] value = value[metaLen:] count := int(littleEndian.Uint32(value)) value = value[4:] if count == 0 { // Data is inline return &rfile{metadata: metadata, chunks: [][]byte{value}}, nil } pos := 0 chunks := make([][]byte, count) for ii := 0; ii < count; ii++ { size := int(littleEndian.Uint32(value[pos:])) pos += 4 key := value[pos : pos+size] chunk, err := d.chunks.Get(key, nil) if err != nil { if err == leveldb.ErrNotFound { return nil, fmt.Errorf("chunk %s in file %s not found", hex.EncodeToString(key), id) } return nil, err } chunks[ii] = chunk pos += size } return &rfile{metadata: metadata, chunks: chunks}, nil }