Пример #1
0
func (db *DB) applyTransactionInternal(tx DBTransaction, writeTxLogFlag bool) (TxID, error) {
	logger.Debugf(mylog, "applyTransactionInternal(%+v, writeTxLog: %t)", tx, writeTxLogFlag)

	if tx.TxID == AnyVersion {
		tx.TxID = db.state.version + 1
	} else if tx.TxID != db.state.version+1 {
		return 0, fmt.Errorf("Attempted to apply tx %d to dbver %d. Next accepted tx is %d", tx.TxID, db.state.version, db.state.version+1)
	}

	for _, op := range tx.Ops {
		if err := op.Apply(db.state); err != nil {
			if rerr := db.RestoreVersion(db.state.version); rerr != nil {
				logger.Panicf(mylog, "Following Error: %v. DB rollback failed!!!: %v", err, rerr)
			}
			return 0, err
		}
	}
	if writeTxLogFlag == writeTxLog {
		if err := db.txLogIO.AppendTransaction(tx); err != nil {
			if rerr := db.RestoreVersion(db.state.version); rerr != nil {
				logger.Panicf(mylog, "Failed to write txlog: %v. DB rollback failed!!!: %v", err, rerr)
			}
			return 0, fmt.Errorf("Failed to write txlog: %v", err)
		}
	}

	db.state.version = tx.TxID
	db.stats.LastTx = time.Now()
	return tx.TxID, nil
}
Пример #2
0
func GenHostName() string {
	hostname, err := os.Hostname()
	if err != nil {
		logger.Panicf(mylog, "Failed to query local hostname: %v", err)
	}
	pid := os.Getpid()
	return fmt.Sprintf("%s-%d", hostname, pid)
}
Пример #3
0
func (h FileBlobHandle) Size() int64 {
	fi, err := h.Fp.Stat()
	if err != nil {
		logger.Panicf(mylog, "Stat failed: %v", err)
	}

	return fi.Size()
}
Пример #4
0
func (h ChunkHeader) WriteTo(w io.Writer, c btncrypt.Cipher) error {
	h.FrameEncapsulation = CurrentFrameEncapsulation

	if h.PayloadLen > MaxChunkPayloadLen {
		return fmt.Errorf("payload length too big: %d", h.PayloadLen)
	}

	if len(h.OrigFilename) > MaxOrigFilenameLen {
		h.OrigFilename = filepath.Base(h.OrigFilename)
		if len(h.OrigFilename) > MaxOrigFilenameLen {
			h.OrigFilename = "<filename_too_long>"
		}
	}

	if _, err := w.Write([]byte{ChunkSignatureMagic1, ChunkSignatureMagic2}); err != nil {
		return fmt.Errorf("Failed to write signature magic: %v", err)
	}
	if _, err := w.Write([]byte{CurrentFormat}); err != nil {
		return fmt.Errorf("Failed to write format byte: %v", err)
	}

	var b bytes.Buffer
	enc := gob.NewEncoder(&b)
	if err := enc.Encode(h); err != nil {
		return err
	}
	framelen := ChunkHeaderLength - c.FrameOverhead() - SignatureLength - 1
	paddinglen := framelen - b.Len()
	if paddinglen < 0 {
		logger.Panicf(mylog, "SHOULD NOT BE REACHED: Marshaled ChunkHeader size too large")
	}

	bew, err := btncrypt.NewWriteCloser(w, c, framelen)
	if _, err := b.WriteTo(bew); err != nil {
		return fmt.Errorf("Failed to initialize frame encryptor: %v", err)
	}
	if err != nil {
		return fmt.Errorf("Header frame gob payload write failed: %v", err)
	}
	// zero padding
	if _, err := bew.Write(make([]byte, paddinglen)); err != nil {
		return fmt.Errorf("Header frame zero padding write failed: %v", err)
	}
	if err := bew.Close(); err != nil {
		return fmt.Errorf("Header frame close failed: %v", err)
	}
	return nil
}
Пример #5
0
func (be *CachedBlobEntry) markDirtyWithLock() {
	now := time.Now()
	be.lastUsed = now
	be.lastWrite = now

	if be.state == cacheEntryDirty {
		return
	}
	if be.state != cacheEntryClean {
		logger.Panicf(mylog, "markDirty called from unexpected state: %+v", be.infoWithLock())
	}
	be.state = cacheEntryDirty

	if be.lastSync.IsZero() {
		be.lastSync = time.Now()
	}
}
Пример #6
0
func (f *frameEncryptor) Sync() ([]byte, error) {
	if f.Written() > BtnFrameMaxPayload {
		return nil, fmt.Errorf("frame payload size exceeding max len: %d > %d", f.Written(), BtnFrameMaxPayload)
	}

	nonce := util.RandomBytes(f.c.gcm.NonceSize())

	f.encrypted = f.encrypted[:len(nonce)]
	copy(f.encrypted, nonce)

	f.encrypted = f.c.gcm.Seal(f.encrypted, nonce, f.b.Bytes(), nil)
	if len(f.encrypted) != f.c.EncryptedFrameSize(f.Written()) {
		logger.Panicf(mylog, "EncryptedFrameSize mismatch. expected: %d, actual: %v", f.c.EncryptedFrameSize(f.Written()), len(f.encrypted))
	}
	f.b.Reset()
	return f.encrypted, nil
}
Пример #7
0
func NewOtaru(cfg *Config, oneshotcfg *OneshotConfig) (*Otaru, error) {
	o := &Otaru{}

	var err error

	key := btncrypt.KeyFromPassword(cfg.Password)
	o.C, err = btncrypt.NewCipher(key)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init Cipher: %v", err)
	}

	o.S = scheduler.NewScheduler()

	if !cfg.LocalDebug {
		o.Tsrc, err = auth.GetGCloudTokenSource(context.TODO(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("Failed to init GCloudClientSource: %v", err)
		}
		o.DSCfg = datastore.NewConfig(cfg.ProjectName, cfg.BucketName, o.C, o.Tsrc)
		o.GL = datastore.NewGlobalLocker(o.DSCfg, GenHostName(), "FIXME: fill info")
		if err := o.GL.Lock(); err != nil {
			return nil, err
		}
	}

	o.CacheTgtBS, err = blobstore.NewFileBlobStore(cfg.CacheDir, oflags.O_RDWRCREATE)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init FileBlobStore: %v", err)
	}

	if !cfg.LocalDebug {
		o.DefaultBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, cfg.BucketName, o.Tsrc, oflags.O_RDWRCREATE)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("Failed to init GCSBlobStore: %v", err)
		}
		if !cfg.UseSeparateBucketForMetadata {
			o.BackendBS = o.DefaultBS
		} else {
			metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName)
			o.MetadataBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, metabucketname, o.Tsrc, oflags.O_RDWRCREATE)
			if err != nil {
				o.Close()
				return nil, fmt.Errorf("Failed to init GCSBlobStore (metadata): %v", err)
			}

			o.BackendBS = blobstore.Mux{
				blobstore.MuxEntry{metadata.IsMetadataBlobpath, o.MetadataBS},
				blobstore.MuxEntry{nil, o.DefaultBS},
			}
		}
	} else {
		o.BackendBS, err = blobstore.NewFileBlobStore(path.Join(os.Getenv("HOME"), ".otaru", "bbs"), oflags.O_RDWRCREATE)
	}

	queryFn := chunkstore.NewQueryChunkVersion(o.C)
	o.CBS, err = cachedblobstore.New(o.BackendBS, o.CacheTgtBS, o.S, oflags.O_RDWRCREATE /* FIXME */, queryFn)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init CachedBlobStore: %v", err)
	}
	if err := o.CBS.RestoreState(o.C); err != nil {
		logger.Warningf(mylog, "Attempted to restore cachedblobstore state but failed: %v", err)
	}
	o.CSS = cachedblobstore.NewCacheSyncScheduler(o.CBS)

	if !cfg.LocalDebug {
		o.SSLoc = datastore.NewINodeDBSSLocator(o.DSCfg)
	} else {
		logger.Panicf(mylog, "Implement mock sslocator that doesn't depend on gcloud/datastore")
	}
	o.SIO = blobstoredbstatesnapshotio.New(o.CBS, o.C, o.SSLoc)

	if !cfg.LocalDebug {
		txio := datastore.NewDBTransactionLogIO(o.DSCfg)
		o.TxIO = txio
		o.TxIOSS = util.NewSyncScheduler(txio, 300*time.Millisecond)
	} else {
		o.TxIO = inodedb.NewSimpleDBTransactionLogIO()
	}
	o.CTxIO = inodedb.NewCachedDBTransactionLogIO(o.TxIO)

	if oneshotcfg.Mkfs {
		o.IDBBE, err = inodedb.NewEmptyDB(o.SIO, o.CTxIO)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("NewEmptyDB failed: %v", err)
		}
	} else {
		o.IDBBE, err = inodedb.NewDB(o.SIO, o.CTxIO)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("NewDB failed: %v", err)
		}
	}

	o.IDBS = inodedb.NewDBService(o.IDBBE)
	o.IDBSS = util.NewSyncScheduler(o.IDBS, 30*time.Second)

	o.FS = otaru.NewFileSystem(o.IDBS, o.CBS, o.C)
	o.MGMT = mgmt.NewServer()
	if err := o.runMgmtServer(); err != nil {
		o.Close()
		return nil, fmt.Errorf("Mgmt server run failed: %v", err)
	}

	return o, nil
}