func TestGlobalLocker_ActAsMutex(t *testing.T) { l1 := datastore.NewGlobalLocker(authtu.TestDSConfig(authtu.TestBucketName()), "otaru-unittest-1", "hogefuga") l2 := datastore.NewGlobalLocker(authtu.TestDSConfig(authtu.TestBucketName()), "otaru-unittest-2", "foobar") if err := l1.ForceUnlock(); err != nil { t.Errorf("ForceUnlock() failed: %v", err) return } // l1 takes lock. l2 lock should fail. if err := l1.Lock(); err != nil { t.Errorf("l1.Lock() failed: %v", err) } err := l2.Lock() if _, ok := err.(*datastore.ErrLockTaken); !ok { t.Errorf("l2.Lock() unexpected (no) err: %v", err) } if err := l1.Unlock(); err != nil { t.Errorf("l1.Unlock() failed: %v", err) } }
func TestGlobalLocker_LockUnlock(t *testing.T) { l := datastore.NewGlobalLocker(authtu.TestDSConfig(authtu.TestBucketName()), "otaru-unittest", "unittest desuyo-") if err := l.ForceUnlock(); err != nil { t.Errorf("ForceUnlock() failed: %v", err) return } if err := l.Lock(); err != nil { t.Errorf("Lock() failed: %v", err) } if err := l.Unlock(); err != nil { t.Errorf("Unlock() failed: %v", err) } }
func NewOtaru(cfg *Config, oneshotcfg *OneshotConfig) (*Otaru, error) { o := &Otaru{} var err error key := btncrypt.KeyFromPassword(cfg.Password) o.C, err = btncrypt.NewCipher(key) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init Cipher: %v", err) } o.S = scheduler.NewScheduler() if !cfg.LocalDebug { o.Tsrc, err = auth.GetGCloudTokenSource(context.TODO(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCloudClientSource: %v", err) } o.DSCfg = datastore.NewConfig(cfg.ProjectName, cfg.BucketName, o.C, o.Tsrc) o.GL = datastore.NewGlobalLocker(o.DSCfg, GenHostName(), "FIXME: fill info") if err := o.GL.Lock(); err != nil { return nil, err } } o.CacheTgtBS, err = blobstore.NewFileBlobStore(cfg.CacheDir, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init FileBlobStore: %v", err) } if !cfg.LocalDebug { o.DefaultBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, cfg.BucketName, o.Tsrc, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCSBlobStore: %v", err) } if !cfg.UseSeparateBucketForMetadata { o.BackendBS = o.DefaultBS } else { metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName) o.MetadataBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, metabucketname, o.Tsrc, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCSBlobStore (metadata): %v", err) } o.BackendBS = blobstore.Mux{ blobstore.MuxEntry{metadata.IsMetadataBlobpath, o.MetadataBS}, blobstore.MuxEntry{nil, o.DefaultBS}, } } } else { o.BackendBS, err = blobstore.NewFileBlobStore(path.Join(os.Getenv("HOME"), ".otaru", "bbs"), oflags.O_RDWRCREATE) } queryFn := chunkstore.NewQueryChunkVersion(o.C) o.CBS, err = cachedblobstore.New(o.BackendBS, o.CacheTgtBS, o.S, oflags.O_RDWRCREATE /* FIXME */, queryFn) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init CachedBlobStore: %v", err) } if err := o.CBS.RestoreState(o.C); err != nil { logger.Warningf(mylog, "Attempted to restore cachedblobstore state but failed: %v", err) } o.CSS = cachedblobstore.NewCacheSyncScheduler(o.CBS) if !cfg.LocalDebug { o.SSLoc = datastore.NewINodeDBSSLocator(o.DSCfg) } else { logger.Panicf(mylog, "Implement mock sslocator that doesn't depend on gcloud/datastore") } o.SIO = blobstoredbstatesnapshotio.New(o.CBS, o.C, o.SSLoc) if !cfg.LocalDebug { txio := datastore.NewDBTransactionLogIO(o.DSCfg) o.TxIO = txio o.TxIOSS = util.NewSyncScheduler(txio, 300*time.Millisecond) } else { o.TxIO = inodedb.NewSimpleDBTransactionLogIO() } o.CTxIO = inodedb.NewCachedDBTransactionLogIO(o.TxIO) if oneshotcfg.Mkfs { o.IDBBE, err = inodedb.NewEmptyDB(o.SIO, o.CTxIO) if err != nil { o.Close() return nil, fmt.Errorf("NewEmptyDB failed: %v", err) } } else { o.IDBBE, err = inodedb.NewDB(o.SIO, o.CTxIO) if err != nil { o.Close() return nil, fmt.Errorf("NewDB failed: %v", err) } } o.IDBS = inodedb.NewDBService(o.IDBBE) o.IDBSS = util.NewSyncScheduler(o.IDBS, 30*time.Second) o.FS = otaru.NewFileSystem(o.IDBS, o.CBS, o.C) o.MGMT = mgmt.NewServer() if err := o.runMgmtServer(); err != nil { o.Close() return nil, fmt.Errorf("Mgmt server run failed: %v", err) } return o, nil }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() if flag.NArg() != 0 { Usage() os.Exit(1) } cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(1) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudClientSource: %v", err) } key := btncrypt.KeyFromPassword(cfg.Password) c, err := btncrypt.NewCipher(key) if err != nil { log.Fatalf("Failed to init btncrypt.Cipher: %v", err) } fmt.Printf("Do you really want to proceed with deleting all blobs in gs://%s{,-meta} and its cache in %s?\n", cfg.BucketName, cfg.CacheDir) fmt.Printf("Type \"deleteall\" to proceed: ") sc := bufio.NewScanner(os.Stdin) if !sc.Scan() { return } if sc.Text() != "deleteall" { log.Printf("Cancelled.\n") os.Exit(1) } dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) l := datastore.NewGlobalLocker(dscfg, "otaru-deleteallblobs", facade.GenHostName()) if err := l.Lock(); err != nil { log.Printf("Failed to acquire global lock: %v", err) return } defer l.Unlock() if err := clearGCS(cfg.ProjectName, cfg.BucketName, tsrc); err != nil { log.Printf("Failed to clear bucket \"%s\": %v", cfg.BucketName, err) return } if cfg.UseSeparateBucketForMetadata { metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName) if err := clearGCS(cfg.ProjectName, metabucketname, tsrc); err != nil { log.Printf("Failed to clear metadata bucket \"%s\": %v", metabucketname, err) return } } if err := clearCache(cfg.CacheDir); err != nil { log.Printf("Failed to clear cache \"%s\": %v", cfg.CacheDir, err) return } log.Printf("otaru-deleteallblobs: Successfully completed!") log.Printf("Hint: You might also want to run \"otaru-txlogio purge\" to delete inodedb txlogs.") }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(2) } if flag.NArg() != 1 { Usage() os.Exit(2) } switch flag.Arg(0) { case "lock", "unlock", "query": break default: log.Printf("Unknown cmd: %v", flag.Arg(0)) os.Exit(1) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudClientSource: %v", err) } c := btncrypt.Cipher{} // Null cipher is fine, as we GlobalLocker doesn't make use of it. dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) info := *flagInfoStr if info == "" { info = "otaru-globallock-cli cmdline debug tool" } l := datastore.NewGlobalLocker(dscfg, "otaru-globallock-cli", info) switch flag.Arg(0) { case "lock": if err := l.Lock(); err != nil { log.Printf("Lock failed: %v", err) } case "unlock": if *flagForce { if err := l.ForceUnlock(); err != nil { log.Printf("ForceUnlock failed: %v", err) os.Exit(1) } } else { if err := l.UnlockIgnoreCreatedAt(); err != nil { log.Printf("Unlock failed: %v", err) os.Exit(1) } } case "query": entry, err := l.Query() if err != nil { log.Printf("Query failed: %v", err) os.Exit(1) } fmt.Printf("%+v\n", entry) default: panic("should not be reached") } }