func TestTokenSource() oauth2.TokenSource { cfg := TestConfig() clisrc, err := auth.GetGCloudTokenSource(context.TODO(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to create TestTokenSource: %v", err) } return clisrc }
func main() { flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) os.Exit(2) } _, err = auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, true) if err != nil { log.Fatalf("Failed: %v", err) } log.Printf("credentials valid!") }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(2) } minID := inodedb.LatestVersion if flag.NArg() < 1 { Usage() os.Exit(2) } switch flag.Arg(0) { case "purge": if flag.NArg() != 1 { Usage() os.Exit(2) } case "query": switch flag.NArg() { case 1: break case 2: n, err := strconv.ParseInt(flag.Arg(1), 10, 64) if err != nil { Usage() os.Exit(2) } minID = inodedb.TxID(n) break } break default: log.Printf("Unknown cmd: %v", flag.Arg(0)) Usage() os.Exit(2) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudClientSource: %v", err) } key := btncrypt.KeyFromPassword(cfg.Password) c, err := btncrypt.NewCipher(key) if err != nil { log.Fatalf("Failed to init btncrypt.Cipher: %v", err) } dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) txlogio := datastore.NewDBTransactionLogIO(dscfg) switch flag.Arg(0) { case "purge": if err := txlogio.DeleteAllTransactions(); err != nil { log.Printf("DeleteAllTransactions() failed: %v", err) } case "query": txs, err := txlogio.QueryTransactions(minID) if err != nil { log.Printf("QueryTransactions() failed: %v", err) } for _, tx := range txs { fmt.Printf("%s\n", tx) } default: log.Printf("Unknown cmd: %v", flag.Arg(0)) os.Exit(1) } }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() if flag.NArg() != 1 { Usage() os.Exit(1) } switch flag.Arg(0) { case "list", "purge": break default: log.Printf("Unknown cmd: %v", flag.Arg(0)) Usage() os.Exit(2) } cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(1) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudTokenSource: %v", err) } key := btncrypt.KeyFromPassword(cfg.Password) c, err := btncrypt.NewCipher(key) if err != nil { log.Fatalf("Failed to init btncrypt.Cipher: %v", err) } dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) ssloc := datastore.NewINodeDBSSLocator(dscfg) switch flag.Arg(0) { case "purge": fmt.Printf("Do you really want to proceed with deleting all inodedbsslocator entry for %s?\n", cfg.BucketName) fmt.Printf("Type \"deleteall\" to proceed: ") sc := bufio.NewScanner(os.Stdin) if !sc.Scan() { return } if sc.Text() != "deleteall" { log.Printf("Cancelled.\n") os.Exit(1) } es, err := ssloc.DeleteAll() if err != nil { log.Printf("DeleteAll failed: %v", err) } log.Printf("DeleteAll deleted entries for blobpath: %v", es) // FIXME: delete the entries from blobpath too case "list": history := 0 histloop: for { bp, err := ssloc.Locate(history) if err != nil { if err == datastore.EEMPTY { log.Printf("Locate(%d): no entry", history) } else { log.Printf("Locate(%d) err: %v", history, err) } break histloop } log.Printf("Locate(%d): %v", history, bp) history++ } default: panic("NOT REACHED") } }
func NewOtaru(cfg *Config, oneshotcfg *OneshotConfig) (*Otaru, error) { o := &Otaru{} var err error key := btncrypt.KeyFromPassword(cfg.Password) o.C, err = btncrypt.NewCipher(key) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init Cipher: %v", err) } o.S = scheduler.NewScheduler() if !cfg.LocalDebug { o.Tsrc, err = auth.GetGCloudTokenSource(context.TODO(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCloudClientSource: %v", err) } o.DSCfg = datastore.NewConfig(cfg.ProjectName, cfg.BucketName, o.C, o.Tsrc) o.GL = datastore.NewGlobalLocker(o.DSCfg, GenHostName(), "FIXME: fill info") if err := o.GL.Lock(); err != nil { return nil, err } } o.CacheTgtBS, err = blobstore.NewFileBlobStore(cfg.CacheDir, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init FileBlobStore: %v", err) } if !cfg.LocalDebug { o.DefaultBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, cfg.BucketName, o.Tsrc, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCSBlobStore: %v", err) } if !cfg.UseSeparateBucketForMetadata { o.BackendBS = o.DefaultBS } else { metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName) o.MetadataBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, metabucketname, o.Tsrc, oflags.O_RDWRCREATE) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init GCSBlobStore (metadata): %v", err) } o.BackendBS = blobstore.Mux{ blobstore.MuxEntry{metadata.IsMetadataBlobpath, o.MetadataBS}, blobstore.MuxEntry{nil, o.DefaultBS}, } } } else { o.BackendBS, err = blobstore.NewFileBlobStore(path.Join(os.Getenv("HOME"), ".otaru", "bbs"), oflags.O_RDWRCREATE) } queryFn := chunkstore.NewQueryChunkVersion(o.C) o.CBS, err = cachedblobstore.New(o.BackendBS, o.CacheTgtBS, o.S, oflags.O_RDWRCREATE /* FIXME */, queryFn) if err != nil { o.Close() return nil, fmt.Errorf("Failed to init CachedBlobStore: %v", err) } if err := o.CBS.RestoreState(o.C); err != nil { logger.Warningf(mylog, "Attempted to restore cachedblobstore state but failed: %v", err) } o.CSS = cachedblobstore.NewCacheSyncScheduler(o.CBS) if !cfg.LocalDebug { o.SSLoc = datastore.NewINodeDBSSLocator(o.DSCfg) } else { logger.Panicf(mylog, "Implement mock sslocator that doesn't depend on gcloud/datastore") } o.SIO = blobstoredbstatesnapshotio.New(o.CBS, o.C, o.SSLoc) if !cfg.LocalDebug { txio := datastore.NewDBTransactionLogIO(o.DSCfg) o.TxIO = txio o.TxIOSS = util.NewSyncScheduler(txio, 300*time.Millisecond) } else { o.TxIO = inodedb.NewSimpleDBTransactionLogIO() } o.CTxIO = inodedb.NewCachedDBTransactionLogIO(o.TxIO) if oneshotcfg.Mkfs { o.IDBBE, err = inodedb.NewEmptyDB(o.SIO, o.CTxIO) if err != nil { o.Close() return nil, fmt.Errorf("NewEmptyDB failed: %v", err) } } else { o.IDBBE, err = inodedb.NewDB(o.SIO, o.CTxIO) if err != nil { o.Close() return nil, fmt.Errorf("NewDB failed: %v", err) } } o.IDBS = inodedb.NewDBService(o.IDBBE) o.IDBSS = util.NewSyncScheduler(o.IDBS, 30*time.Second) o.FS = otaru.NewFileSystem(o.IDBS, o.CBS, o.C) o.MGMT = mgmt.NewServer() if err := o.runMgmtServer(); err != nil { o.Close() return nil, fmt.Errorf("Mgmt server run failed: %v", err) } return o, nil }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() if flag.NArg() != 0 { Usage() os.Exit(1) } cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(1) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudClientSource: %v", err) } key := btncrypt.KeyFromPassword(cfg.Password) c, err := btncrypt.NewCipher(key) if err != nil { log.Fatalf("Failed to init btncrypt.Cipher: %v", err) } fmt.Printf("Do you really want to proceed with deleting all blobs in gs://%s{,-meta} and its cache in %s?\n", cfg.BucketName, cfg.CacheDir) fmt.Printf("Type \"deleteall\" to proceed: ") sc := bufio.NewScanner(os.Stdin) if !sc.Scan() { return } if sc.Text() != "deleteall" { log.Printf("Cancelled.\n") os.Exit(1) } dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) l := datastore.NewGlobalLocker(dscfg, "otaru-deleteallblobs", facade.GenHostName()) if err := l.Lock(); err != nil { log.Printf("Failed to acquire global lock: %v", err) return } defer l.Unlock() if err := clearGCS(cfg.ProjectName, cfg.BucketName, tsrc); err != nil { log.Printf("Failed to clear bucket \"%s\": %v", cfg.BucketName, err) return } if cfg.UseSeparateBucketForMetadata { metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName) if err := clearGCS(cfg.ProjectName, metabucketname, tsrc); err != nil { log.Printf("Failed to clear metadata bucket \"%s\": %v", metabucketname, err) return } } if err := clearCache(cfg.CacheDir); err != nil { log.Printf("Failed to clear cache \"%s\": %v", cfg.CacheDir, err) return } log.Printf("otaru-deleteallblobs: Successfully completed!") log.Printf("Hint: You might also want to run \"otaru-txlogio purge\" to delete inodedb txlogs.") }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) flag.Usage = Usage flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { log.Printf("%v", err) Usage() os.Exit(2) } if flag.NArg() != 1 { Usage() os.Exit(2) } switch flag.Arg(0) { case "lock", "unlock", "query": break default: log.Printf("Unknown cmd: %v", flag.Arg(0)) os.Exit(1) } tsrc, err := auth.GetGCloudTokenSource(context.Background(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false) if err != nil { log.Fatalf("Failed to init GCloudClientSource: %v", err) } c := btncrypt.Cipher{} // Null cipher is fine, as we GlobalLocker doesn't make use of it. dscfg := datastore.NewConfig(cfg.ProjectName, cfg.BucketName, c, tsrc) info := *flagInfoStr if info == "" { info = "otaru-globallock-cli cmdline debug tool" } l := datastore.NewGlobalLocker(dscfg, "otaru-globallock-cli", info) switch flag.Arg(0) { case "lock": if err := l.Lock(); err != nil { log.Printf("Lock failed: %v", err) } case "unlock": if *flagForce { if err := l.ForceUnlock(); err != nil { log.Printf("ForceUnlock failed: %v", err) os.Exit(1) } } else { if err := l.UnlockIgnoreCreatedAt(); err != nil { log.Printf("Unlock failed: %v", err) os.Exit(1) } } case "query": entry, err := l.Query() if err != nil { log.Printf("Query failed: %v", err) os.Exit(1) } fmt.Printf("%+v\n", entry) default: panic("should not be reached") } }