func (o *Otaru) Close() error { errs := []error{} if o.S != nil { o.S.AbortAllAndStop() } if o.FS != nil { if err := o.FS.Sync(); err != nil { errs = append(errs, err) } } if o.IDBSS != nil { o.IDBSS.Stop() } if o.IDBS != nil { o.IDBS.Quit() } if o.IDBBE != nil { if err := o.IDBBE.Sync(); err != nil { errs = append(errs, err) } } if o.CSS != nil { o.CSS.Stop() } return util.ToErrors(errs) }
func Restore(blobpath string, c btncrypt.Cipher, bs blobstore.BlobStore, cb DecodeCallback) error { r, err := bs.OpenReader(blobpath) if err != nil { return err } cr, err := chunkstore.NewChunkReader(r, c) if err != nil { return err } logger.Debugf(mylog, "serialized blob size: %d", cr.Length()) zr, err := zlib.NewReader(&io.LimitedReader{cr, int64(cr.Length())}) if err != nil { return err } logger.Debugf(mylog, "statesnapshot.Restore: zlib init success!") dec := gob.NewDecoder(zr) es := []error{} if err := cb(dec); err != nil { es = append(es, fmt.Errorf("Failed to decode state: %v", err)) } if err := zr.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close zlib Reader: %v", err)) } if err := r.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close BlobHandle: %v", err)) } if err := util.ToErrors(es); err != nil { return err } return nil }
func Save(blobpath string, c btncrypt.Cipher, bs blobstore.BlobStore, cb EncodeCallback) error { var buf bytes.Buffer zw := zlib.NewWriter(&buf) enc := gob.NewEncoder(zw) es := []error{} if err := cb(enc); err != nil { es = append(es, fmt.Errorf("Failed to encode state: %v", err)) } if err := zw.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close zlib Writer: %v", err)) } if err := util.ToErrors(es); err != nil { return err } w, err := bs.OpenWriter(blobpath) if err != nil { return err } cw, err := chunkstore.NewChunkWriter(w, c, chunkstore.ChunkHeader{ PayloadLen: uint32(buf.Len()), PayloadVersion: 1, OrigFilename: blobpath, OrigOffset: 0, }) if err != nil { return err } if _, err := cw.Write(buf.Bytes()); err != nil { es = append(es, fmt.Errorf("Failed to write to ChunkWriter: %v", err)) } if err := cw.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close ChunkWriter: %v", err)) } if err := w.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close blobhandle: %v", err)) } if err := util.ToErrors(es); err != nil { return err } return nil }
func (cbs *CachedBlobStore) SaveState(c btncrypt.Cipher) error { errs := []error{} if err := cbs.bever.SaveStateToBlobstore(c, cbs.backendbs); err != nil { errs = append(errs, err) } return util.ToErrors(errs) }
func (mgr *CachedBlobEntriesManager) doSyncAll() error { errs := []error{} for blobpath, be := range mgr.entries { if err := be.Sync(); err != nil { errs = append(errs, fmt.Errorf("Failed to sync \"%s\": %v", blobpath, err)) } } return util.ToErrors(errs) }
func (be *CachedBlobEntry) Sync() error { be.mu.Lock() defer be.mu.Unlock() // Wait for invalidation to complete for be.state == cacheEntryInvalidating { logger.Infof(mylog, "Waiting for cache to be fully invalidated before sync.") be.validlenExtended.Wait() } if !be.state.IsActive() { logger.Warningf(mylog, "Attempted to sync already uninitialized/closed entry: %+v", be.infoWithLock()) return nil } if be.state == cacheEntryClean { return nil } logger.Infof(mylog, "Sync entry: %+v", be.infoWithLock()) errC := make(chan error) go func() { if err := be.writeBackWithLock(); err != nil { errC <- fmt.Errorf("Failed to writeback dirty: %v", err) } else { errC <- nil } }() go func() { if cs, ok := be.cachebh.(util.Syncer); ok { if err := cs.Sync(); err != nil { errC <- fmt.Errorf("Failed to sync cache blob: %v", err) } else { errC <- nil } } else { errC <- nil } }() errs := []error{} for i := 0; i < 2; i++ { if err := <-errC; err != nil { errs = append(errs, err) } } be.syncCount++ be.lastSync = time.Now() return util.ToErrors(errs) }
func (fs *FileSystem) Sync() error { es := []error{} if s, ok := fs.idb.(util.Syncer); ok { if err := s.Sync(); err != nil { es = append(es, fmt.Errorf("Failed to sync INodeDB: %v", err)) } } // FIXME: sync active handles return util.ToErrors(es) }
func (sio *BlobStoreDBStateSnapshotIO) SaveSnapshot(s *inodedb.DBState) error { currVer := s.Version() if sio.snapshotVer > currVer { log.Printf("SaveSnapshot: ASSERT fail: snapshot version %d newer than current ver %d", sio.snapshotVer, currVer) } else if sio.snapshotVer == currVer { log.Printf("SaveSnapshot: Current ver %d is already snapshotted. No-op.", sio.snapshotVer) return nil } raw, err := sio.bs.Open(metadata.INodeDBSnapshotBlobpath, fl.O_RDWR|fl.O_CREATE) if err != nil { return err } if err := raw.Truncate(0); err != nil { return err } cio := chunkstore.NewChunkIOWithMetadata(raw, sio.c, chunkstore.ChunkHeader{ OrigFilename: metadata.INodeDBSnapshotBlobpath, OrigOffset: 0, }) bufio := bufio.NewWriter(&blobstore.OffsetWriter{cio, 0}) zw := zlib.NewWriter(bufio) enc := gob.NewEncoder(zw) es := []error{} if err := s.EncodeToGob(enc); err != nil { es = append(es, fmt.Errorf("Failed to encode DBState: %v", err)) } if err := zw.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close zlib Writer: %v", err)) } if err := bufio.Flush(); err != nil { es = append(es, fmt.Errorf("Failed to close bufio: %v", err)) } if err := cio.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close ChunkIO: %v", err)) } if err := raw.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close blobhandle: %v", err)) } if err := util.ToErrors(es); err != nil { return err } sio.snapshotVer = s.Version() return nil }
func (mgr *CachedBlobEntriesManager) SyncAll() (err error) { ch := make(chan struct{}) mgr.reqC <- func() { defer close(ch) errs := []error{} for blobpath, be := range mgr.entries { if err := be.Sync(); err != nil { errs = append(errs, fmt.Errorf("Failed to sync \"%s\": %v", blobpath, err)) } } err = util.ToErrors(errs) } <-ch return }
func (sio *BlobStoreDBStateSnapshotIO) RestoreSnapshot() (*inodedb.DBState, error) { raw, err := sio.bs.Open(metadata.INodeDBSnapshotBlobpath, fl.O_RDONLY) if err != nil { return nil, err } cio := chunkstore.NewChunkIO(raw, sio.c) log.Printf("serialized blob size: %d", cio.Size()) zr, err := zlib.NewReader(&io.LimitedReader{&blobstore.OffsetReader{cio, 0}, cio.Size()}) if err != nil { return nil, err } log.Printf("LoadINodeDBFromBlobStore: zlib init success!") dec := gob.NewDecoder(zr) es := []error{} state, err := inodedb.DecodeDBStateFromGob(dec) if err != nil { es = append(es, fmt.Errorf("Failed to decode dbstate: %v", err)) } if err := zr.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close zlib Reader: %v", err)) } if err := cio.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close ChunkIO: %v", err)) } if err := raw.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close BlobHandle: %v", err)) } if err := util.ToErrors(es); err != nil { return nil, err } sio.snapshotVer = state.Version() return state, nil }