func (be *CachedBlobEntry) Sync() error { be.mu.Lock() defer be.mu.Unlock() // Wait for invalidation to complete for be.state == cacheEntryInvalidating { logger.Infof(mylog, "Waiting for cache to be fully invalidated before sync.") be.validlenExtended.Wait() } if !be.state.IsActive() { logger.Warningf(mylog, "Attempted to sync already uninitialized/closed entry: %+v", be.infoWithLock()) return nil } if be.state == cacheEntryClean { return nil } logger.Infof(mylog, "Sync entry: %+v", be.infoWithLock()) errC := make(chan error) go func() { if err := be.writeBackWithLock(); err != nil { errC <- fmt.Errorf("Failed to writeback dirty: %v", err) } else { errC <- nil } }() go func() { if cs, ok := be.cachebh.(util.Syncer); ok { if err := cs.Sync(); err != nil { errC <- fmt.Errorf("Failed to sync cache blob: %v", err) } else { errC <- nil } } else { errC <- nil } }() errs := []error{} for i := 0; i < 2; i++ { if err := <-errC; err != nil { errs = append(errs, err) } } be.syncCount++ be.lastSync = time.Now() return util.ToErrors(errs) }
func (fs *FileSystem) OpenFile(id inodedb.ID, flags int) (*FileHandle, error) { logger.Infof(fslog, "OpenFile(id: %v, flags rok: %t wok: %t)", id, fl.IsReadAllowed(flags), fl.IsWriteAllowed(flags)) tryLock := fl.IsWriteAllowed(flags) if tryLock && !fl.IsWriteAllowed(fs.bs.Flags()) { return nil, EACCES } of := fs.getOrCreateOpenFile(id) of.mu.Lock() defer of.mu.Unlock() ofIsInitialized := of.nlock.ID != 0 if ofIsInitialized && (of.nlock.HasTicket() || !tryLock) { // No need to upgrade lock. Just use cached filehandle. logger.Infof(fslog, "Using cached of for inode id: %v", id) return of.OpenHandleWithoutLock(flags), nil } // upgrade lock or acquire new lock... v, nlock, err := fs.idb.QueryNode(id, tryLock) if err != nil { return nil, err } if v.GetType() != inodedb.FileNodeT { if err := fs.idb.UnlockNode(nlock); err != nil { logger.Warningf(fslog, "Unlock node failed for non-file node: %v", err) } if v.GetType() == inodedb.DirNodeT { return nil, EISDIR } return nil, fmt.Errorf("Specified node not file but has type %v", v.GetType()) } of.nlock = nlock caio := NewINodeDBChunksArrayIO(fs.idb, nlock) of.cfio = chunkstore.NewChunkedFileIO(fs.bs, fs.c, caio) of.cfio.SetOrigFilename(fs.tryGetOrigPath(nlock.ID)) if fl.IsWriteTruncate(flags) { if err := of.truncateWithLock(0); err != nil { return nil, fmt.Errorf("Failed to truncate file: %v", err) } } fh := of.OpenHandleWithoutLock(flags) return fh, nil }
func SetupFluentLogger(cfg *Config) error { if cfg.Fluent.FluentHost == "" { logger.Infof(mylog, "The fluentd host is not specified. Skipping fluent logger instantiation.") return nil } logger.Infof(mylog, "Initializing fluent logger based on config: %+v", cfg.Fluent) fcli, err := gfluent.New(cfg.Fluent) if err != nil { return err } logger.Registry().AddOutput(fluent.FluentLogger{fcli}) return nil }
func (f *FileBlobStore) TotalSize() (int64, error) { start := time.Now() d, err := os.Open(f.base) if err != nil { return 0, fmt.Errorf("Open dir failed: %v", err) } defer d.Close() fis, err := d.Readdir(-1) if err != nil { return 0, fmt.Errorf("Readdir failed: %v", err) } totalSize := int64(0) for _, fi := range fis { if !fi.Mode().IsRegular() { continue } totalSize += fi.Size() } logger.Infof(mylog, "FileBlobStore.TotalSize() was %s. took %s.", humanize.Bytes(uint64(totalSize)), time.Since(start)) return totalSize, nil }
func (be *CachedBlobEntry) PWrite(p []byte, offset int64) error { be.mu.Lock() defer be.mu.Unlock() // Avoid any write when in invalidating state. // FIXME: maybe allow when offset+len(p) < be.validlen for be.state == cacheEntryInvalidating { logger.Infof(mylog, "Waiting for cache to be fully invalidated before write.") be.validlenExtended.Wait() } if len(p) == 0 { return nil } be.markDirtyWithLock() if err := be.cachebh.PWrite(p, offset); err != nil { return err } right := offset + int64(len(p)) if right > be.bloblen { be.bloblen = right be.validlen = right } return nil }
func (loc *INodeDBSSLocator) tryPutOnce(blobpath string, txid int64) error { start := time.Now() e := sslocentry{BlobPath: blobpath, TxID: txid, CreatedAt: start} cli, err := loc.cfg.getClient(context.TODO()) if err != nil { return err } dstx, err := cli.NewTransaction(context.TODO(), datastore.Serializable) if err != nil { return err } key := datastore.NewKey(ctxNoNamespace, kindINodeDBSS, "", int64(e.TxID), loc.rootKey) if _, err := dstx.Put(key, &e); err != nil { dstx.Rollback() return err } if _, err := dstx.Commit(); err != nil { return err } logger.Infof(sslog, "Put(%s, %d) took %s.", blobpath, txid, time.Since(start)) return nil }
func (be *CachedBlobEntry) closeWithLock(abandon bool) error { if len(be.handles) > 0 { return fmt.Errorf("Entry has %d handles", len(be.handles)) } logger.Infof(mylog, "Close entry: %+v", be.infoWithLock()) if !abandon { for be.state == cacheEntryInvalidating { logger.Warningf(mylog, "Waiting for cache to be fully invalidated before close. (shouldn't come here, as PWrite should block)") be.validlenExtended.Wait() } if err := be.writeBackWithLock(); err != nil { return fmt.Errorf("Failed to writeback dirty: %v", err) } be.syncCount++ be.lastSync = time.Now() } if err := be.cachebh.Close(); err != nil { return fmt.Errorf("Failed to close cache bh: %v", err) } be.state = cacheEntryClosed return nil }
func (loc *INodeDBSSLocator) tryLocateOnce(history int) (string, error) { start := time.Now() cli, err := loc.cfg.getClient(context.TODO()) if err != nil { return "", err } dstx, err := cli.NewTransaction(context.TODO(), datastore.Serializable) if err != nil { return "", err } q := datastore.NewQuery(kindINodeDBSS).Ancestor(loc.rootKey).Order("-TxID").Offset(history).Limit(1).Transaction(dstx) it := cli.Run(context.TODO(), q) var e sslocentry if _, err := it.Next(&e); err != nil { dstx.Rollback() if err == datastore.Done { return "", EEMPTY } return "", err } if _, err := dstx.Commit(); err != nil { return "", err } logger.Infof(sslog, "LocateSnapshot(%d) took %s. Found entry: %+v", history, time.Since(start), e) return e.BlobPath, nil }
func (db *DB) RestoreVersion(version TxID) error { logger.Infof(mylog, "RestoreVersion(%s) start.", version) state, err := db.snapshotIO.RestoreSnapshot() if err != nil { return fmt.Errorf("Failed to restore snapshot: %v", err) } oldState := db.state db.state = state ssver := state.version logger.Infof(mylog, "Restored snapshot of ver %d.", ssver) if state.version > version { return fmt.Errorf("Can't rollback to old version %d which is older than snapshot version %d", version, state.version) } logger.Infof(mylog, "RestoreVersion(%s): restored ver: %s", version, ssver) txlog, err := db.txLogIO.QueryTransactions(ssver + 1) if txlog == nil || err != nil { db.state = oldState return fmt.Errorf("Failed to query txlog: %v", err) } for _, tx := range txlog { logger.Debugf(mylog, "RestoreVersion(%s): apply tx ver %s", version, tx.TxID) if _, err := db.applyTransactionInternal(tx, skipTxLog); err != nil { db.state = oldState return fmt.Errorf("Failed to replay tx: %v", err) } } logger.Infof(mylog, "Fast forward txlog from ver %d to %d", ssver, state.version) return nil }
func (be *CachedBlobEntry) PRead(p []byte, offset int64) error { // FIXME: may be we should allow stale reads w/o lock be.mu.Lock() defer be.mu.Unlock() be.lastUsed = time.Now() requiredlen := util.Int64Min(offset+int64(len(p)), be.bloblen) for be.validlen < requiredlen { logger.Infof(mylog, "Waiting for cache to be fulfilled: reqlen: %d, validlen: %d", requiredlen, be.validlen) be.validlenExtended.Wait() } return be.cachebh.PRead(p, offset) }
func (of *OpenFile) downgradeToReadLock() { logger.Infof(fslog, "Downgrade %v to read lock.", of) // Note: assumes of.mu is Lock()-ed if !of.nlock.HasTicket() { logger.Warningf(fslog, "Attempt to downgrade node lock, but no excl lock found. of: %v", of) return } if err := of.fs.idb.UnlockNode(of.nlock); err != nil { logger.Warningf(fslog, "Unlocking node to downgrade to read lock failed: %v", err) } of.nlock.Ticket = inodedb.NoTicket caio := NewINodeDBChunksArrayIO(of.fs.idb, of.nlock) of.cfio = chunkstore.NewChunkedFileIO(of.fs.bs, of.fs.c, caio) }
func (loc *INodeDBSSLocator) DeleteAll() ([]string, error) { start := time.Now() cli, err := loc.cfg.getClient(context.TODO()) if err != nil { return nil, err } dstx, err := cli.NewTransaction(context.TODO(), datastore.Serializable) if err != nil { return nil, err } keys := make([]*datastore.Key, 0) blobpaths := make([]string, 0) q := datastore.NewQuery(kindINodeDBSS).Ancestor(loc.rootKey).Transaction(dstx) it := cli.Run(context.TODO(), q) for { var e sslocentry k, err := it.Next(&e) if err != nil { if err == datastore.Done { break } dstx.Rollback() return nil, err } keys = append(keys, k) blobpaths = append(blobpaths, e.BlobPath) } logger.Debugf(sslog, "keys to delete: %v", keys) if err := dstx.DeleteMulti(keys); err != nil { dstx.Rollback() return nil, err } if _, err := dstx.Commit(); err != nil { return nil, err } logger.Infof(sslog, "DeleteAll() deleted %d entries. Took %s", len(keys), time.Since(start)) return blobpaths, nil }
func ServeFUSE(bucketName string, mountpoint string, ofs *otaru.FileSystem, ready chan<- bool) error { fsName := fmt.Sprintf("otaru+gs://%s", bucketName) volName := fmt.Sprintf("Otaru %s", bucketName) c, err := bfuse.Mount( mountpoint, bfuse.FSName(fsName), bfuse.Subtype("otarufs"), bfuse.VolumeName(volName), ) if err != nil { return fmt.Errorf("bfuse.Mount failed: %v", err) } defer c.Close() serveC := make(chan error) go func() { if err := bfs.Serve(c, FileSystem{ofs}); err != nil { serveC <- err close(serveC) return } close(serveC) }() // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { return err } logger.Infof(mylog, "Mountpoint \"%s\" should be ready now!", mountpoint) if ready != nil { close(ready) } if err := <-serveC; err != nil { return nil } if err := ofs.Sync(); err != nil { return fmt.Errorf("Failed to Sync fs: %v", err) } return nil }
func (be *CachedBlobEntry) Truncate(newsize int64) error { be.mu.Lock() defer be.mu.Unlock() // Avoid truncate when in invalidating state. // FIXME: maybe allow if newsize < be.validlen for be.state == cacheEntryInvalidating { logger.Infof(mylog, "Waiting for cache to be fully invalidated before truncate.") be.validlenExtended.Wait() } if be.bloblen == newsize { return nil } be.markDirtyWithLock() if err := be.cachebh.Truncate(newsize); err != nil { return err } be.bloblen = newsize be.validlen = newsize return nil }
func (fs *FileSystem) SetAttr(id inodedb.ID, a Attr, valid ValidAttrFields) error { logger.Infof(fslog, "SetAttr id: %d, a: %+v, valid: %s", id, a, valid) ops := make([]inodedb.DBOperation, 0, 4) if valid&UidValid != 0 { ops = append(ops, &inodedb.UpdateUidOp{ID: id, Uid: a.Uid}) } if valid&GidValid != 0 { ops = append(ops, &inodedb.UpdateGidOp{ID: id, Gid: a.Gid}) } if valid&PermModeValid != 0 { ops = append(ops, &inodedb.UpdatePermModeOp{ID: id, PermMode: a.PermMode}) } if valid&ModifiedTValid != 0 { ops = append(ops, &inodedb.UpdateModifiedTOp{ID: id, ModifiedT: a.ModifiedT}) } if _, err := fs.idb.ApplyTransaction(inodedb.DBTransaction{Ops: ops}); err != nil { return err } return nil }
func (f *FileBlobStore) ListBlobs() ([]string, error) { start := time.Now() d, err := os.Open(f.base) if err != nil { return nil, fmt.Errorf("Open dir failed: %v", err) } defer d.Close() fis, err := d.Readdir(-1) if err != nil { return nil, fmt.Errorf("Readdir failed: %v", err) } blobs := make([]string, 0, len(fis)) for _, fi := range fis { if !fi.Mode().IsRegular() { continue } blobs = append(blobs, fi.Name()) } logger.Infof(mylog, "FileBlobStore.ListBlobs() found %d blobs, took %s.", len(blobs), time.Since(start)) return blobs, nil }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) logger.Registry().AddOutput(logger.WriterLogger{os.Stderr}) flag.Usage = Usage flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { logger.Criticalf(mylog, "%v", err) Usage() os.Exit(2) } if flag.NArg() != 1 { Usage() os.Exit(2) } mountpoint := flag.Arg(0) if err := facade.SetupFluentLogger(cfg); err != nil { logger.Criticalf(mylog, "Failed to setup fluentd logger: %v", err) os.Exit(1) } o, err := facade.NewOtaru(cfg, &facade.OneshotConfig{Mkfs: *flagMkfs}) if err != nil { logger.Criticalf(mylog, "NewOtaru failed: %v", err) os.Exit(1) } var muClose sync.Mutex closeOtaruAndExit := func(exitCode int) { muClose.Lock() defer muClose.Unlock() if err := bfuse.Unmount(mountpoint); err != nil { logger.Warningf(mylog, "umount err: %v", err) } if o != nil { if err := o.Close(); err != nil { logger.Warningf(mylog, "Otaru.Close() returned errs: %v", err) } o = nil } os.Exit(exitCode) } defer closeOtaruAndExit(0) sigC := make(chan os.Signal, 1) signal.Notify(sigC, os.Interrupt) signal.Notify(sigC, syscall.SIGTERM) go func() { for s := range sigC { logger.Warningf(mylog, "Received signal: %v", s) closeOtaruAndExit(1) } }() logger.Registry().AddOutput(logger.HandleCritical(func() { logger.Warningf(mylog, "Starting shutdown due to critical event.") closeOtaruAndExit(1) })) bfuseLogger := logger.Registry().Category("bfuse") bfuse.Debug = func(msg interface{}) { logger.Debugf(bfuseLogger, "%v", msg) } if err := fuse.ServeFUSE(cfg.BucketName, mountpoint, o.FS, nil); err != nil { logger.Warningf(mylog, "ServeFUSE failed: %v", err) closeOtaruAndExit(1) } logger.Infof(mylog, "ServeFUSE end!") }
func GC(ctx context.Context, bs GCableBlobStore, idb inodedb.DBFscker, dryrun bool) error { start := time.Now() logger.Infof(mylog, "GC start. Dryrun: %t. Listing blobs.", dryrun) allbs, err := bs.ListBlobs() if err != nil { return fmt.Errorf("ListBlobs failed: %v", err) } logger.Infof(mylog, "List blobs done. %d blobs found.", len(allbs)) if err := ctx.Err(); err != nil { logger.Infof(mylog, "Detected cancel. Bailing out.") return err } logger.Infof(mylog, "Starting INodeDB fsck.") usedbs, errs := idb.Fsck() if len(errs) != 0 { return fmt.Errorf("Fsck returned err: %v", err) } logger.Infof(mylog, "Fsck done. %d used blobs found.", len(usedbs)) if err := ctx.Err(); err != nil { logger.Infof(mylog, "Detected cancel. Bailing out.") return err } logger.Infof(mylog, "Converting used blob list to a hashset") usedbset := make(map[string]struct{}) for _, b := range usedbs { usedbset[b] = struct{}{} } logger.Infof(mylog, "Convert used blob list to a hashset: Done.") if err := ctx.Err(); err != nil { logger.Infof(mylog, "Detected cancel. Bailing out.") return err } logger.Infof(mylog, "Listing unused blobpaths.") unusedbs := make([]string, 0, util.IntMax(len(allbs)-len(usedbs), 0)) for _, b := range allbs { if _, ok := usedbset[b]; ok { continue } if metadata.IsMetadataBlobpath(b) { logger.Infof(mylog, "Marking metadata blobpath as used: %s", b) continue } unusedbs = append(unusedbs, b) } traceend := time.Now() logger.Infof(mylog, "GC Found %d unused blobpaths. (Trace took %v)", len(unusedbs), traceend.Sub(start)) for _, b := range unusedbs { if err := ctx.Err(); err != nil { logger.Infof(mylog, "Detected cancel. Bailing out.") return err } if dryrun { logger.Infof(mylog, "Dryrun found unused blob: %s", b) } else { logger.Infof(mylog, "Removing unused blob: %s", b) if err := bs.RemoveBlob(b); err != nil { return fmt.Errorf("Removing unused blob \"%s\" failed: %v", b, err) } } } sweepend := time.Now() logger.Infof(mylog, "GC success. Dryrun: %t. (Sweep took %v. The whole GC took %v.)", dryrun, sweepend.Sub(traceend), sweepend.Sub(start)) return err }
func (txio *DBTransactionLogIO) Sync() error { start := time.Now() txio.muSync.Lock() defer txio.muSync.Unlock() txio.mu.Lock() if len(txio.committing) != 0 { panic("I should be the only one committing.") } txio.committing = txio.nextbatch batch := txio.committing txio.nextbatch = make([]inodedb.DBTransaction, 0) txio.mu.Unlock() rollback := func() { txio.mu.Lock() txio.nextbatch = append(txio.committing, txio.nextbatch...) txio.committing = []inodedb.DBTransaction{} txio.mu.Unlock() } if len(batch) == 0 { return nil } cli, err := txio.cfg.getClient(context.Background()) if err != nil { rollback() return err } keys := make([]*datastore.Key, 0, len(batch)) stxs := make([]*storedbtx, 0, len(batch)) for _, tx := range batch { key, stx, err := txio.encode(tx) if err != nil { rollback() return err } keys = append(keys, key) stxs = append(stxs, stx) } dstx, err := cli.NewTransaction(context.Background(), datastore.Serializable) if err != nil { rollback() return err } if _, err := dstx.PutMulti(keys, stxs); err != nil { rollback() dstx.Rollback() return err } if _, err := dstx.Commit(); err != nil { rollback() return err } txio.mu.Lock() txio.committing = []inodedb.DBTransaction{} txio.mu.Unlock() logger.Infof(txlog, "Sync() took %s. Committed %d txs", time.Since(start), len(stxs)) return nil }
func (txio *DBTransactionLogIO) DeleteTransactions(smallerThanID inodedb.TxID) error { start := time.Now() txio.mu.Lock() batch := make([]inodedb.DBTransaction, 0, len(txio.nextbatch)) for _, tx := range txio.nextbatch { if tx.TxID < smallerThanID { continue } batch = append(batch, tx) } txio.nextbatch = batch txio.mu.Unlock() cli, err := txio.cfg.getClient(context.Background()) if err != nil { return err } ndel := 0 for { needAnotherTx := false txStart := time.Now() dstx, err := cli.NewTransaction(context.Background(), datastore.Serializable) if err != nil { return err } keys := []*datastore.Key{} ltkey := txio.encodeKey(smallerThanID) q := datastore.NewQuery(kindTransaction).Ancestor(txio.rootKey).Filter("__key__ <", ltkey).KeysOnly().Transaction(dstx) it := cli.Run(context.Background(), q) for { k, err := it.Next(nil) if err != nil { if err == datastore.Done { break } dstx.Rollback() return err } keys = append(keys, k) if len(keys) == maxWriteEntriesPerTx { needAnotherTx = true break } } if err := dstx.DeleteMulti(keys); err != nil { dstx.Rollback() return err } if _, err := dstx.Commit(); err != nil { return err } ndel += len(keys) if needAnotherTx { logger.Infof(txlog, "DeleteTransactions(%v): A tx deleting %d entries took %s. Starting next tx to delete more.", smallerThanID, len(keys), time.Since(txStart)) } else { break } } logger.Infof(txlog, "DeleteTransactions(%v) deleted %d entries. tx took %s", smallerThanID, ndel, time.Since(start)) return nil }
func (cbs *CachedBlobStore) ReduceCache(ctx context.Context, desiredSize int64, dryrun bool) error { start := time.Now() tsizer, ok := cbs.cachebs.(blobstore.TotalSizer) if !ok { return fmt.Errorf("Cache backend \"%s\" doesn't support TotalSize() method, required to ReduceCache(). aborting.", util.TryGetImplName(cbs.cachebs)) } blobsizer, ok := cbs.cachebs.(blobstore.BlobSizer) if !ok { return fmt.Errorf("Cache backend \"%s\" doesn't support BlobSize() method, required to ReduceCache(). aborting.", util.TryGetImplName(cbs.cachebs)) } blobremover, ok := cbs.cachebs.(blobstore.BlobRemover) if !ok { return fmt.Errorf("Cache backend \"%s\" doesn't support RemoveBlob() method, required to ReduceCache(). aborting.", util.TryGetImplName(cbs.cachebs)) } totalSizeBefore, err := tsizer.TotalSize() if err != nil { return fmt.Errorf("Failed to query current total cache size: %v", err) } needsReduce := totalSizeBefore - desiredSize if needsReduce < 0 { logger.Infof(mylog, "ReduceCache: No need to reduce cache as its already under desired size! No-op.") return nil } logger.Infof(mylog, "ReduceCache: Current cache bs total size: %s. Desired size: %s. Needs to reduce %s.", humanize.IBytes(uint64(totalSizeBefore)), humanize.IBytes(uint64(desiredSize)), humanize.IBytes(uint64(needsReduce))) bps := cbs.usagestats.FindLeastUsed() for _, bp := range bps { size, err := blobsizer.BlobSize(bp) if err != nil { if os.IsNotExist(err) { logger.Infof(mylog, "Attempted to drop blob cache \"%s\", but not found. Maybe it's already removed.", bp) continue } return fmt.Errorf("Failed to query size for cache blob \"%s\": %v", bp, err) } logger.Infof(mylog, "ReduceCache: Drop entry \"%s\" to release %s", bp, humanize.IBytes(uint64(size))) if !dryrun { if err := cbs.entriesmgr.DropCacheEntry(bp, blobremover); err != nil { return fmt.Errorf("Failed to remove cache blob \"%s\": %v", bp, err) } } needsReduce -= size if needsReduce < 0 { break } } totalSizeAfter, err := tsizer.TotalSize() if err != nil { return fmt.Errorf("Failed to query current total cache size: %v", err) } logger.Infof(mylog, "ReduceCache done. Cache bs total size: %s -> %s. Dryrun: %t. Took: %s", humanize.IBytes(uint64(totalSizeBefore)), humanize.IBytes(uint64(totalSizeAfter)), dryrun, time.Since(start)) return nil }
func (txio *DBTransactionLogIO) queryTransactionsOnce(minID inodedb.TxID) ([]inodedb.DBTransaction, error) { start := time.Now() txs := []inodedb.DBTransaction{} txio.mu.Lock() for _, tx := range txio.committing { if tx.TxID >= minID { txs = append(txs, tx) } } for _, tx := range txio.nextbatch { if tx.TxID >= minID { txs = append(txs, tx) } } txio.mu.Unlock() cli, err := txio.cfg.getClient(context.Background()) if err != nil { return nil, err } dstx, err := cli.NewTransaction(context.Background(), datastore.Serializable) if err != nil { return nil, err } q := datastore.NewQuery(kindTransaction).Ancestor(txio.rootKey).Transaction(dstx) if minID != inodedb.AnyVersion { minKey := txio.encodeKey(minID) q = q.Filter("__key__ >=", minKey) } it := cli.Run(context.Background(), q) for { var stx storedbtx key, err := it.Next(&stx) if err != nil { if err == datastore.Done { break } dstx.Commit() return []inodedb.DBTransaction{}, err } tx, err := decode(txio.cfg.c, key, &stx) if err != nil { dstx.Commit() return []inodedb.DBTransaction{}, err } txs = append(txs, tx) } // FIXME: not sure if Rollback() is better if _, err := dstx.Commit(); err != nil { return nil, err } sort.Sort(txsorter(txs)) uniqed := make([]inodedb.DBTransaction, 0, len(txs)) var prevId inodedb.TxID for _, tx := range txs { if tx.TxID == prevId { continue } uniqed = append(uniqed, tx) prevId = tx.TxID } logger.Infof(txlog, "QueryTransactions(%v) took %s", minID, time.Since(start)) return uniqed, nil }