func main() { var err error var dbType string var datadir string var shastring string flag.StringVar(&dbType, "dbtype", "", "Database backend to use for the Block Chain") flag.StringVar(&datadir, "datadir", "", "Directory to store data") flag.StringVar(&shastring, "s", "", "Block sha to process") flag.Parse() log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.TraceLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) if len(dbType) == 0 { dbType = "sqlite" } if len(datadir) == 0 { datadir = filepath.Join(btcdHomeDir(), "data") } datadir = filepath.Join(datadir, "mainnet") blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + dbType if dbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(datadir, dbName) log.Infof("loading db") db, err := btcdb.OpenDB(dbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db load complete") _, height, err := db.NewestSha() log.Infof("loaded block height %v", height) sha, err := getSha(db, shastring) if err != nil { log.Infof("Invalid block %v", shastring) return } err = db.DropAfterBlockBySha(&sha) if err != nil { log.Warnf("failed %v", err) } }
// NewLoggerFromWriter creates a logger for use with non-btclog based systems. func NewLoggerFromWriter(w io.Writer, minLevel LogLevel) (Logger, error) { l, err := seelog.LoggerFromWriterWithMinLevel(w, seelog.LogLevel(minLevel)) if err != nil { return nil, err } return NewSubsystemLogger(l, ""), nil }
// SetLogWriter uses a specified io.Writer to output package logging info. // This allows a caller to direct package logging output without needing a // dependency on seelog. If the caller is also using seelog, UseLogger should // be used instead. func SetLogWriter(w io.Writer) error { if w == nil { return errors.New("nil writer") } l, err := seelog.LoggerFromWriterWithMinLevel(w, seelog.TraceLvl) if err != nil { return err } UseLogger(l) return nil }
func main() { cfg := config{ DbType: "leveldb", DataDir: filepath.Join(btcdHomeDir(), "data"), } parser := flags.NewParser(&cfg, flags.Default) _, err := parser.Parse() if err != nil { if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { parser.WriteHelp(os.Stderr) } return } log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.TraceLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) var testnet string if cfg.TestNet3 { testnet = "testnet" } else { testnet = "mainnet" } cfg.DataDir = filepath.Join(cfg.DataDir, testnet) blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + cfg.DbType if cfg.DbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(cfg.DataDir, dbName) log.Infof("loading db") db, err := btcdb.OpenDB(cfg.DbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db load complete") _, height, err := db.NewestSha() log.Infof("loaded block height %v", height) sha, err := getSha(db, cfg.ShaString) if err != nil { log.Infof("Invalid block hash %v", cfg.ShaString) return } err = db.DropAfterBlockBySha(&sha) if err != nil { log.Warnf("failed %v", err) } }
func main() { cfg := config{ DbType: "leveldb", DataDir: filepath.Join(btcdHomeDir(), "data"), } parser := flags.NewParser(&cfg, flags.Default) _, err := parser.Parse() if err != nil { if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { parser.WriteHelp(os.Stderr) } return } runtime.GOMAXPROCS(runtime.NumCPU()) log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.InfoLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) var testnet string if cfg.TestNet3 { testnet = "testnet" } else { testnet = "mainnet" } cfg.DataDir = filepath.Join(cfg.DataDir, testnet) err = os.MkdirAll(cfg.DataDir, 0700) if err != nil { fmt.Printf("unable to create db repo area %v, %v", cfg.DataDir, err) } blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + cfg.DbType if cfg.DbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(cfg.DataDir, dbName) log.Infof("loading db") db, err := btcdb.CreateDB(cfg.DbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db created") var fi io.ReadCloser fi, err = os.Open(cfg.InFile) if err != nil { log.Warnf("failed to open file %v, err %v", cfg.InFile, err) } defer func() { if err := fi.Close(); err != nil { log.Warn("failed to close file %v %v", cfg.InFile, err) } }() bufqueue := make(chan *bufQueue, 2) blkqueue := make(chan *blkQueue, 2) for i := 0; i < runtime.NumCPU(); i++ { go processBuf(i, bufqueue, blkqueue) } go processBuf(0, bufqueue, blkqueue) go readBlocks(fi, bufqueue) var eheight int64 doneMap := map[int64]*blkQueue{} for { select { case blkM := <-blkqueue: doneMap[blkM.height] = blkM for { if blkP, ok := doneMap[eheight]; ok { delete(doneMap, eheight) blkP.complete <- true db.InsertBlock(blkP.blk) if cfg.Progress && eheight%int64(1) == 0 { log.Infof("Processing block %v", eheight) } eheight++ if eheight%2000 == 0 { f, err := os.Create(fmt.Sprintf("profile.%d", eheight)) if err == nil { pprof.WriteHeapProfile(f) f.Close() } else { log.Warnf("profile failed %v", err) } } } else { break } } } } }
func main() { var err error var dbType string var datadir string var shastring, eshastring, outfile string var rflag, fflag, tflag bool var progress int end := int64(-1) flag.StringVar(&dbType, "dbtype", "", "Database backend to use for the Block Chain") flag.StringVar(&datadir, "datadir", ".", "Directory to store data") flag.StringVar(&shastring, "s", "", "Block sha to process") flag.StringVar(&eshastring, "e", "", "Block sha to process") flag.StringVar(&outfile, "o", "", "outfile") flag.BoolVar(&rflag, "r", false, "raw block") flag.BoolVar(&fflag, "f", false, "fmt block") flag.BoolVar(&tflag, "t", false, "show transactions") flag.IntVar(&progress, "p", 0, "show progress") flag.Parse() log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.InfoLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) if len(dbType) == 0 { dbType = "sqlite" } if len(datadir) == 0 { datadir = filepath.Join(btcdHomeDir(), "data") } datadir = filepath.Join(datadir, "mainnet") blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + dbType if dbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(datadir, dbName) log.Infof("loading db %v", dbType) db, err := btcdb.OpenDB(dbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db load complete") height, err := getHeight(db, shastring) if err != nil { log.Infof("Invalid block %v", shastring) return } if eshastring != "" { end, err = getHeight(db, eshastring) if err != nil { log.Infof("Invalid end block %v", eshastring) return } } else { end = height + 1 } log.Infof("height %v end %v", height, end) var fo io.WriteCloser if outfile != "" { fo, err = os.Create(outfile) if err != nil { log.Warnf("failed to open file %v, err %v", outfile, err) } defer func() { if err := fo.Close(); err != nil { log.Warn("failed to close file %v %v", outfile, err) } }() } for ; height < end; height++ { if progress != 0 && height%int64(progress) == 0 { log.Infof("Processing block %v", height) } err = DumpBlock(db, height, fo, rflag, fflag, tflag) if err != nil { break } } if progress != 0 { height-- log.Infof("Processing block %v", height) } }
func main() { var err error var dbType string var datadir string var infile string var progress int flag.StringVar(&dbType, "dbtype", "", "Database backend to use for the Block Chain") flag.StringVar(&datadir, "datadir", "", "Directory to store data") flag.StringVar(&infile, "i", "", "infile") flag.IntVar(&progress, "p", 0, "show progress") flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) if len(infile) == 0 { fmt.Printf("Must specify inputfile") return } log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.InfoLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) if len(dbType) == 0 { dbType = "sqlite" } if len(datadir) == 0 { datadir = filepath.Join(btcdHomeDir(), "data") } datadir = filepath.Join(datadir, "mainnet") err = os.MkdirAll(datadir, 0700) if err != nil { fmt.Printf("unable to create db repo area %v, %v", datadir, err) } blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + dbType if dbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(datadir, dbName) log.Infof("loading db") db, err := btcdb.CreateDB(dbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db created") var fi io.ReadCloser fi, err = os.Open(infile) if err != nil { log.Warnf("failed to open file %v, err %v", infile, err) } defer func() { if err := fi.Close(); err != nil { log.Warn("failed to close file %v %v", infile, err) } }() bufqueue := make(chan *bufQueue, 2) blkqueue := make(chan *blkQueue, 2) for i := 0; i < runtime.NumCPU(); i++ { go processBuf(i, bufqueue, blkqueue) } go processBuf(0, bufqueue, blkqueue) go readBlocks(fi, bufqueue) var eheight int64 doneMap := map[int64]*blkQueue{} for { select { case blkM := <-blkqueue: doneMap[blkM.height] = blkM for { if blkP, ok := doneMap[eheight]; ok { delete(doneMap, eheight) blkP.complete <- true db.InsertBlock(blkP.blk) if progress != 0 && eheight%int64(progress) == 0 { log.Infof("Processing block %v", eheight) } eheight++ if eheight%2000 == 0 { f, err := os.Create(fmt.Sprintf("profile.%d", eheight)) if err == nil { pprof.WriteHeapProfile(f) f.Close() } else { log.Warnf("profile failed %v", err) } } } else { break } } } } }
func main() { end := int64(-1) cfg := config{ DbType: "leveldb", DataDir: filepath.Join(btcdHomeDir(), "data"), } parser := flags.NewParser(&cfg, flags.Default) _, err := parser.Parse() if err != nil { if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { parser.WriteHelp(os.Stderr) } return } log, err = seelog.LoggerFromWriterWithMinLevel(os.Stdout, seelog.InfoLvl) if err != nil { fmt.Fprintf(os.Stderr, "failed to create logger: %v", err) return } defer log.Flush() btcdb.UseLogger(log) var testnet string if cfg.TestNet3 { testnet = "testnet" } else { testnet = "mainnet" } cfg.DataDir = filepath.Join(cfg.DataDir, testnet) blockDbNamePrefix := "blocks" dbName := blockDbNamePrefix + "_" + cfg.DbType if cfg.DbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(cfg.DataDir, dbName) log.Infof("loading db %v", cfg.DbType) db, err := btcdb.OpenDB(cfg.DbType, dbPath) if err != nil { log.Warnf("db open failed: %v", err) return } defer db.Close() log.Infof("db load complete") height, err := getHeight(db, cfg.ShaString) if err != nil { log.Infof("Invalid block %v", cfg.ShaString) return } if cfg.EShaString != "" { end, err = getHeight(db, cfg.EShaString) if err != nil { log.Infof("Invalid end block %v", cfg.EShaString) return } } else { end = height + 1 } log.Infof("height %v end %v", height, end) var fo io.WriteCloser if cfg.OutFile != "" { fo, err = os.Create(cfg.OutFile) if err != nil { log.Warnf("failed to open file %v, err %v", cfg.OutFile, err) } defer func() { if err := fo.Close(); err != nil { log.Warn("failed to close file %v %v", cfg.OutFile, err) } }() } for ; height < end; height++ { if cfg.Progress && height%int64(1) == 0 { log.Infof("Processing block %v", height) } err = DumpBlock(db, height, fo, cfg.RawBlock, cfg.FmtBlock, cfg.ShowTx) if err != nil { break } } if cfg.Progress { height-- log.Infof("Processing block %v", height) } }