func RunTransactionTestsWithReader(r io.Reader, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } bt := make(map[string]TransactionTest) if err := readJson(r, &bt); err != nil { return err } for name, test := range bt { // if the test should be skipped, return if skipTest[name] { glog.Infoln("Skipping transaction test", name) return nil } // test the block if err := runTransactionTest(test); err != nil { return err } glog.Infoln("Transaction test passed: ", name) } return nil }
func upgradeDB(ctx *cli.Context) { glog.Infoln("Upgrading blockchain database") chain, chainDb := utils.MakeChain(ctx) v, _ := chainDb.Get([]byte("BlockchainVersion")) bcVersion := int(common.NewValue(v).Uint()) if bcVersion == 0 { bcVersion = core.BlockChainVersion } // Export the current chain. filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405")) exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename) if err := utils.ExportChain(chain, exportFile); err != nil { utils.Fatalf("Unable to export chain for reimport %s", err) } chainDb.Close() os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "chaindata")) // Import the chain file. chain, chainDb = utils.MakeChain(ctx) chainDb.Put([]byte("BlockchainVersion"), common.NewValue(core.BlockChainVersion).Bytes()) err := utils.ImportChain(chain, exportFile) chainDb.Close() if err != nil { utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile) } else { os.Remove(exportFile) glog.Infoln("Import finished") } }
func getFiles(path string) ([]string, error) { glog.Infoln("getFiles", path) var files []string f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() fi, err := f.Stat() if err != nil { return nil, err } switch mode := fi.Mode(); { case mode.IsDir(): fi, _ := ioutil.ReadDir(path) files = make([]string, len(fi)) for i, v := range fi { // only go 1 depth and leave directory entires blank if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension { files[i] = filepath.Join(path, v.Name()) glog.Infoln("Found file", files[i]) } } case mode.IsRegular(): files = make([]string, 1) files[0] = path } return files, nil }
func ExportChain(blockchain *core.BlockChain, fn string) error { glog.Infoln("Exporting blockchain to ", fn) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer fh.Close() if err := blockchain.Export(fh); err != nil { return err } glog.Infoln("Exported blockchain to ", fn) return nil }
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { glog.Infoln("Exporting blockchain to ", fn) // TODO verify mode perms fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) if err != nil { return err } defer fh.Close() if err := blockchain.ExportN(fh, first, last); err != nil { return err } glog.Infoln("Exported blockchain to ", fn) return nil }
func (self *worker) push(work *Work) { if atomic.LoadInt32(&self.mining) == 1 { if core.Canary(work.state) { glog.Infoln("Toxicity levels rising to deadly levels. Your canary has died. You can go back or continue down the mineshaft --more--") glog.Infoln("You turn back and abort mining") return } // push new work to agents for agent := range self.agents { atomic.AddInt32(&self.atWork, 1) if agent.Work() != nil { agent.Work() <- work } } } }
func runTestWithReader(test string, r io.Reader) error { glog.Infoln("runTest", test) var err error switch strings.ToLower(test) { case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests": err = tests.RunBlockTestWithReader(params.MainNetHomesteadBlock, params.MainNetDAOForkBlock, r, skipTests) case "st", "state", "statetest", "statetests": rs := tests.RuleSet{HomesteadBlock: params.MainNetHomesteadBlock, DAOForkBlock: params.MainNetDAOForkBlock, DAOForkSupport: true} err = tests.RunStateTestWithReader(rs, r, skipTests) case "tx", "transactiontest", "transactiontests": err = tests.RunTransactionTestsWithReader(r, skipTests) case "vm", "vmtest", "vmtests": err = tests.RunVmTestWithReader(r, skipTests) case "rlp", "rlptest", "rlptests": err = tests.RunRLPTestWithReader(r, skipTests) default: err = fmt.Errorf("Invalid test type specified: %v", test) } if err != nil { return err } return nil }
func runSuite(test, file string) { var tests []string if test == defaultTest { tests = allTests } else { tests = []string{test} } for _, curTest := range tests { glog.Infoln("runSuite", curTest, file) var err error var files []string if test == defaultTest { // check if we have an explicit directory mapping for the test if _, ok := testDirMapping[curTest]; ok { files, err = getFiles(filepath.Join(file, testDirMapping[curTest])) } else { // otherwise assume test name files, err = getFiles(filepath.Join(file, curTest)) } } else { files, err = getFiles(file) } if err != nil { glog.Fatalln(err) } if len(files) == 0 { glog.Warningln("No files matched path") } for _, curFile := range files { // Skip blank entries if len(curFile) == 0 { continue } r, err := os.Open(curFile) if err != nil { glog.Fatalln(err) } defer r.Close() err = runTestWithReader(curTest, r) if err != nil { if continueOnError { glog.Errorln(err) } else { glog.Fatalln(err) } } } } }
func runBlockTests(homesteadBlock, daoForkBlock *big.Int, bt map[string]*BlockTest, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } for name, test := range bt { if skipTest[name] { glog.Infoln("Skipping block test", name) continue } // test the block if err := runBlockTest(homesteadBlock, daoForkBlock, test); err != nil { return fmt.Errorf("%s: %v", name, err) } glog.Infoln("Block test passed: ", name) } return nil }
func runVmTests(tests map[string]VmTest, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } for name, test := range tests { if skipTest[name] { glog.Infoln("Skipping VM test", name) return nil } if err := runVmTest(test); err != nil { return fmt.Errorf("%s %s", name, err.Error()) } glog.Infoln("VM test passed: ", name) //fmt.Println(string(statedb.Dump())) } return nil }
func runBlockTests(bt map[string]*BlockTest, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } for name, test := range bt { // if the test should be skipped, return if skipTest[name] { glog.Infoln("Skipping block test", name) continue } // test the block if err := runBlockTest(test); err != nil { return fmt.Errorf("%s: %v", name, err) } glog.Infoln("Block test passed: ", name) } return nil }
func runTransactionTests(tests map[string]TransactionTest, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } for name, test := range tests { // if the test should be skipped, return if skipTest[name] { glog.Infoln("Skipping transaction test", name) return nil } // test the block if err := runTransactionTest(test); err != nil { return fmt.Errorf("%s: %v", name, err) } glog.Infoln("Transaction test passed: ", name) } return nil }
// ReportBlock reports the block to the block reporting tool found at // badblocks.ethdev.com func ReportBlock(block *types.Block, err error) { if DisableBadBlockReporting { return } const url = "https://badblocks.ethdev.com" blockRlp, _ := rlp.EncodeToBytes(block) data := map[string]interface{}{ "block": common.Bytes2Hex(blockRlp), "errortype": err.Error(), "hints": map[string]interface{}{ "receipts": "NYI", "vmtrace": "NYI", }, } jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "params": []interface{}{data}, "id": "1", "jsonrpc": "2.0"}) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { glog.V(logger.Error).Infoln("POST err:", err) return } defer resp.Body.Close() if glog.V(logger.Debug) { glog.Infoln("response Status:", resp.Status) glog.Infoln("response Headers:", resp.Header) body, _ := ioutil.ReadAll(resp.Body) glog.Infoln("response Body:", string(body)) } }
// VerifyUncles verifies the given block's uncles and applies the Ethereum // consensus rules to the various block headers included; it will return an // error if any of the included uncle headers were invalid. It returns an error // if the validation failed. func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error { // validate that there at most 2 uncles included in this block if len(block.Uncles()) > 2 { return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles())) } uncles := set.New() ancestors := make(map[common.Hash]*types.Block) for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) { ancestors[ancestor.Hash()] = ancestor // Include ancestors uncles in the uncle set. Uncles must be unique. for _, uncle := range ancestor.Uncles() { uncles.Add(uncle.Hash()) } } ancestors[block.Hash()] = block uncles.Add(block.Hash()) for i, uncle := range block.Uncles() { hash := uncle.Hash() if uncles.Has(hash) { // Error not unique return UncleError("uncle[%d](%x) not unique", i, hash[:4]) } uncles.Add(hash) if ancestors[hash] != nil { branch := fmt.Sprintf(" O - %x\n |\n", block.Hash()) for h := range ancestors { branch += fmt.Sprintf(" O - %x\n |\n", h) } glog.Infoln(branch) return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) } if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() { return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4]) } if err := ValidateHeader(v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil { return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err)) } } return nil }
func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *types.Block) error { uncles := set.New() ancestors := make(map[common.Hash]*types.Block) for _, ancestor := range sm.bc.GetBlocksFromHash(block.ParentHash(), 7) { ancestors[ancestor.Hash()] = ancestor // Include ancestors uncles in the uncle set. Uncles must be unique. for _, uncle := range ancestor.Uncles() { uncles.Add(uncle.Hash()) } } ancestors[block.Hash()] = block uncles.Add(block.Hash()) for i, uncle := range block.Uncles() { hash := uncle.Hash() if uncles.Has(hash) { // Error not unique return UncleError("uncle[%d](%x) not unique", i, hash[:4]) } uncles.Add(hash) if ancestors[hash] != nil { branch := fmt.Sprintf(" O - %x\n |\n", block.Hash()) for h := range ancestors { branch += fmt.Sprintf(" O - %x\n |\n", h) } glog.Infoln(branch) return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) } if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() { return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4]) } if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true, true); err != nil { return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err)) } } return nil }
func (self *LDBDatabase) Close() { // Stop the metrics collection to avoid internal database races self.quitLock.Lock() defer self.quitLock.Unlock() if self.quitChan != nil { errc := make(chan error) self.quitChan <- errc if err := <-errc; err != nil { glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err) } } err := self.db.Close() if glog.V(logger.Error) { if err == nil { glog.Infoln("closed db:", self.fn) } else { glog.Errorf("error closing db %s: %v", self.fn, err) } } }
func runStateTests(tests map[string]VmTest, skipTests []string) error { skipTest := make(map[string]bool, len(skipTests)) for _, name := range skipTests { skipTest[name] = true } for name, test := range tests { if skipTest[name] /*|| name != "callcodecallcode_11" */ { glog.Infoln("Skipping state test", name) continue } //fmt.Println("StateTest:", name) if err := runStateTest(test); err != nil { return fmt.Errorf("%s: %s\n", name, err.Error()) } //glog.Infoln("State test passed: ", name) //fmt.Println(string(statedb.Dump())) } return nil }
func runTestWithReader(test string, r io.Reader) error { glog.Infoln("runTest", test) var err error switch strings.ToLower(test) { case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests": err = tests.RunBlockTestWithReader(r, skipTests) case "st", "state", "statetest", "statetests": err = tests.RunStateTestWithReader(r, skipTests) case "tx", "transactiontest", "transactiontests": err = tests.RunTransactionTestsWithReader(r, skipTests) case "vm", "vmtest", "vmtests": err = tests.RunVmTestWithReader(r, skipTests) case "rlp", "rlptest", "rlptests": err = tests.RunRLPTestWithReader(r, skipTests) default: err = fmt.Errorf("Invalid test type specified: %v", test) } if err != nil { return err } return nil }
// mergeDatabases when required merge old database layout to one single database func mergeDatabases(datadir string, newdb func(path string) (common.Database, error)) error { // Check if already upgraded data := filepath.Join(datadir, "chaindata") if _, err := os.Stat(data); !os.IsNotExist(err) { return nil } // make sure it's not just a clean path chainPath := filepath.Join(datadir, "blockchain") if _, err := os.Stat(chainPath); os.IsNotExist(err) { return nil } glog.Infoln("Database upgrade required. Upgrading...") database, err := newdb(data) if err != nil { return fmt.Errorf("creating data db err: %v", err) } defer database.Close() // Migrate blocks chainDb, err := newdb(chainPath) if err != nil { return fmt.Errorf("state db err: %v", err) } defer chainDb.Close() if chain, ok := chainDb.(*ethdb.LDBDatabase); ok { glog.Infoln("Merging blockchain database...") it := chain.NewIterator() for it.Next() { database.Put(it.Key(), it.Value()) } it.Release() } // Migrate state stateDb, err := newdb(filepath.Join(datadir, "state")) if err != nil { return fmt.Errorf("state db err: %v", err) } defer stateDb.Close() if state, ok := stateDb.(*ethdb.LDBDatabase); ok { glog.Infoln("Merging state database...") it := state.NewIterator() for it.Next() { database.Put(it.Key(), it.Value()) } it.Release() } // Migrate transaction / receipts extraDb, err := newdb(filepath.Join(datadir, "extra")) if err != nil { return fmt.Errorf("state db err: %v", err) } defer extraDb.Close() if extra, ok := extraDb.(*ethdb.LDBDatabase); ok { glog.Infoln("Merging transaction database...") it := extra.NewIterator() for it.Next() { database.Put(it.Key(), it.Value()) } it.Release() } return nil }
func ImportChain(chain *core.BlockChain, fn string) error { // Watch for Ctrl-C while the import is running. // If a signal is received, the import will stop at the next batch. interrupt := make(chan os.Signal, 1) stop := make(chan struct{}) signal.Notify(interrupt, os.Interrupt) defer signal.Stop(interrupt) defer close(interrupt) go func() { if _, ok := <-interrupt; ok { glog.Info("caught interrupt during import, will stop at next batch") } close(stop) }() checkInterrupt := func() bool { select { case <-stop: return true default: return false } } glog.Infoln("Importing blockchain ", fn) fh, err := os.Open(fn) if err != nil { return err } defer fh.Close() stream := rlp.NewStream(fh, 0) // Run actual the import. blocks := make(types.Blocks, importBatchSize) n := 0 for batch := 0; ; batch++ { // Load a batch of RLP blocks. if checkInterrupt() { return fmt.Errorf("interrupted") } i := 0 for ; i < importBatchSize; i++ { var b types.Block if err := stream.Decode(&b); err == io.EOF { break } else if err != nil { return fmt.Errorf("at block %d: %v", n, err) } // don't import first block if b.NumberU64() == 0 { i-- continue } blocks[i] = &b n++ } if i == 0 { break } // Import the batch. if checkInterrupt() { return fmt.Errorf("interrupted") } if hasAllBlocks(chain, blocks[:i]) { glog.Infof("skipping batch %d, all blocks present [%x / %x]", batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4]) continue } if _, err := chain.InsertChain(blocks[:i]); err != nil { return fmt.Errorf("invalid block %d: %v", n, err) } } return nil }