// Validate the new blocks in mem pool and store them in db func validateBlocksFromMemPool(b *common.DirectoryBlock, fMemPool *ftmMemPool, db database.Db) bool { // Validate the genesis block if b.Header.DBHeight == 0 { h, _ := common.CreateHash(b) if h.String() != common.GENESIS_DIR_BLOCK_HASH { // panic for milestone 1 panic("\nGenesis block hash expected: " + common.GENESIS_DIR_BLOCK_HASH + "\nGenesis block hash found: " + h.String() + "\n") //procLog.Errorf("Genesis dir block is not as expected: " + h.String()) } } fMemPool.RLock() defer fMemPool.RUnlock() for _, dbEntry := range b.DBEntries { switch dbEntry.ChainID.String() { case ecchain.ChainID.String(): if _, ok := fMemPool.blockpool[dbEntry.KeyMR.String()]; !ok { return false } case achain.ChainID.String(): if msg, ok := fMemPool.blockpool[dbEntry.KeyMR.String()]; !ok { return false } else { // validate signature of the previous dir block aBlkMsg, _ := msg.(*wire.MsgABlock) if !validateDBSignature(aBlkMsg.ABlk, dchain) { return false } } case fchain.ChainID.String(): if _, ok := fMemPool.blockpool[dbEntry.KeyMR.String()]; !ok { return false } default: if msg, ok := fMemPool.blockpool[dbEntry.KeyMR.String()]; !ok { return false } else { eBlkMsg, _ := msg.(*wire.MsgEBlock) // validate every entry in EBlock for _, ebEntry := range eBlkMsg.EBlk.Body.EBEntries { if _, foundInMemPool := fMemPool.blockpool[ebEntry.String()]; !foundInMemPool { if !bytes.Equal(ebEntry.Bytes()[:31], common.ZERO_HASH[:31]) { // continue if the entry arleady exists in db entry, _ := db.FetchEntryByHash(ebEntry) if entry == nil { return false } } } } } } } return true }
// Validate the new blocks in mem pool and store them in db func validateAndStoreBlocks(fMemPool *ftmMemPool, db database.Db, dchain *common.DChain, outCtlMsgQ chan wire.FtmInternalMsg) { var myDBHeight int64 var dbhash *wire.ShaHash var sleeptime int var dblk *common.DirectoryBlock for true { dblk = nil dbhash, myDBHeight, _ = db.FetchBlockHeightCache() adj := (len(dchain.Blocks) - int(myDBHeight)) if adj <= 0 { adj = 1 } // in milliseconds sleeptime = 100 + 1000/adj if len(dchain.Blocks) > int(myDBHeight+1) { dblk = dchain.Blocks[myDBHeight+1] } if dblk != nil { if validateBlocksFromMemPool(dblk, fMemPool, db) { err := storeBlocksFromMemPool(dblk, fMemPool, db) if err == nil { deleteBlocksFromMemPool(dblk, fMemPool) } else { panic("error in storeBlocksFromMemPool. " + err.Error()) } } else { time.Sleep(time.Duration(sleeptime * 1000000)) // Nanoseconds for duration } } else { //TODO: send an internal msg to sync up with peers now := time.Now().Unix() // the block is up-to-date if now-int64(lastDirBlockTimestamp) < 600 { time.Sleep(11 * time.Minute) } else { time.Sleep(time.Duration(sleeptime * 1000000)) // Nanoseconds for duration // this means, there could be a syncup breakage happened, and let's renew syncup. //startHash, _ := wire.NewShaHash(dbhash.Bytes()) if dbhash != nil { outMsgQueue <- &wire.MsgInt_ReSyncup{ StartHash: dbhash, } } } } } }
// Validate the new blocks in mem pool and store them in db func validateAndStoreBlocks(fMemPool *ftmMemPool, db database.Db, dchain *common.DChain, outCtlMsgQ chan wire.FtmInternalMsg) { var myDBHeight int64 var sleeptime int var dblk *common.DirectoryBlock for true { dblk = nil _, myDBHeight, _ = db.FetchBlockHeightCache() adj := (len(dchain.Blocks) - int(myDBHeight)) if adj <= 0 { adj = 1 } // in milliseconds sleeptime = 100 + 1000/adj if len(dchain.Blocks) > int(myDBHeight+1) { dblk = dchain.Blocks[myDBHeight+1] } if dblk != nil { if validateBlocksFromMemPool(dblk, fMemPool, db) { err := storeBlocksFromMemPool(dblk, fMemPool, db) if err == nil { deleteBlocksFromMemPool(dblk, fMemPool) } else { panic("error in deleteBlocksFromMemPool.") } } else { time.Sleep(time.Duration(sleeptime * 1000000)) // Nanoseconds for duration } } else { time.Sleep(time.Duration(sleeptime * 1000000)) // Nanoseconds for duration //TODO: send an internal msg to sync up with peers } } }
// Validate the new blocks in mem pool and store them in db // Need to make a batch insert in db in milestone 2 func storeBlocksFromMemPool(b *common.DirectoryBlock, fMemPool *ftmMemPool, db database.Db) error { fMemPool.RLock() defer fMemPool.RUnlock() for _, dbEntry := range b.DBEntries { switch dbEntry.ChainID.String() { case ecchain.ChainID.String(): ecBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgECBlock) err := db.ProcessECBlockBatch(ecBlkMsg.ECBlock) if err != nil { return err } // needs to be improved?? initializeECreditMap(ecBlkMsg.ECBlock) // for debugging exportECBlock(ecBlkMsg.ECBlock) case achain.ChainID.String(): aBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgABlock) err := db.ProcessABlockBatch(aBlkMsg.ABlk) if err != nil { return err } // for debugging exportABlock(aBlkMsg.ABlk) case fchain.ChainID.String(): fBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgFBlock) err := db.ProcessFBlockBatch(fBlkMsg.SC) if err != nil { return err } // Initialize the Factoid State err = common.FactoidState.AddTransactionBlock(fBlkMsg.SC) FactoshisPerCredit = fBlkMsg.SC.GetExchRate() if err != nil { return err } // for debugging exportFctBlock(fBlkMsg.SC) default: // handle Entry Block eBlkMsg, _ := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgEBlock) // store entry in db first for _, ebEntry := range eBlkMsg.EBlk.Body.EBEntries { if msg, foundInMemPool := fMemPool.blockpool[ebEntry.String()]; foundInMemPool { err := db.InsertEntry(msg.(*wire.MsgEntry).Entry) if err != nil { return err } } } // Store Entry Block in db err := db.ProcessEBlockBatch(eBlkMsg.EBlk) if err != nil { return err } // create a chain when it's the first block of the entry chain if eBlkMsg.EBlk.Header.EBSequence == 0 { chain := new(common.EChain) chain.ChainID = eBlkMsg.EBlk.Header.ChainID chain.FirstEntry, _ = db.FetchEntryByHash(eBlkMsg.EBlk.Body.EBEntries[0]) if chain.FirstEntry == nil { return errors.New("First entry not found for chain:" + eBlkMsg.EBlk.Header.ChainID.String()) } db.InsertChain(chain) chainIDMap[chain.ChainID.String()] = chain } // for debugging exportEBlock(eBlkMsg.EBlk) } } dbhash, dbHeight, _ := db.FetchBlockHeightCache() //fmt.Printf("last block height is %d, to-be-saved block height is %d\n", dbHeight, b.Header.DBHeight) // Store the dir block err := db.ProcessDBlockBatch(b) if err != nil { return err } lastDirBlockTimestamp = b.Header.Timestamp // Update dir block height cache in db commonHash, _ := common.CreateHash(b) db.UpdateBlockHeightCache(b.Header.DBHeight, commonHash) // for debugging exportDBlock(b) // this means, there's syncup breakage happened, and let's renew syncup. if uint32(dbHeight) < b.Header.DBHeight-1 { startHash, _ := wire.NewShaHash(dbhash.Bytes()) stopHash, _ := wire.NewShaHash(commonHash.Bytes()) outMsgQueue <- &wire.MsgInt_ReSyncup{ StartHash: startHash, StopHash: stopHash, } } return nil }
// Validate the new blocks in mem pool and store them in db // Need to make a batch insert in db in milestone 2 func storeBlocksFromMemPool(b *common.DirectoryBlock, fMemPool *ftmMemPool, db database.Db) error { for _, dbEntry := range b.DBEntries { switch dbEntry.ChainID.String() { case ecchain.ChainID.String(): ecBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgECBlock) err := db.ProcessECBlockBatch(ecBlkMsg.ECBlock) if err != nil { return err } // needs to be improved?? initializeECreditMap(ecBlkMsg.ECBlock) // for debugging exportECBlock(ecBlkMsg.ECBlock) case achain.ChainID.String(): aBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgABlock) err := db.ProcessABlockBatch(aBlkMsg.ABlk) if err != nil { return err } // for debugging exportABlock(aBlkMsg.ABlk) case fchain.ChainID.String(): fBlkMsg := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgFBlock) err := db.ProcessFBlockBatch(fBlkMsg.SC) if err != nil { return err } // Initialize the Factoid State err = common.FactoidState.AddTransactionBlock(fBlkMsg.SC) FactoshisPerCredit = fBlkMsg.SC.GetExchRate() if err != nil { return err } // for debugging exportFctBlock(fBlkMsg.SC) default: // handle Entry Block eBlkMsg, _ := fMemPool.blockpool[dbEntry.KeyMR.String()].(*wire.MsgEBlock) // store entry in db first for _, ebEntry := range eBlkMsg.EBlk.Body.EBEntries { if msg, foundInMemPool := fMemPool.blockpool[ebEntry.String()]; foundInMemPool { err := db.InsertEntry(msg.(*wire.MsgEntry).Entry) if err != nil { return err } } } // Store Entry Block in db err := db.ProcessEBlockBatch(eBlkMsg.EBlk) if err != nil { return err } // create a chain when it's the first block of the entry chain if eBlkMsg.EBlk.Header.EBSequence == 0 { chain := new(common.EChain) chain.ChainID = eBlkMsg.EBlk.Header.ChainID chain.FirstEntry, _ = db.FetchEntryByHash(eBlkMsg.EBlk.Body.EBEntries[0]) if chain.FirstEntry == nil { return errors.New("First entry not found for chain:" + eBlkMsg.EBlk.Header.ChainID.String()) } db.InsertChain(chain) chainIDMap[chain.ChainID.String()] = chain } // for debugging exportEBlock(eBlkMsg.EBlk) } } // Store the dir block err := db.ProcessDBlockBatch(b) if err != nil { return err } // Update dir block height cache in db commonHash, _ := common.CreateHash(b) db.UpdateBlockHeightCache(b.Header.DBHeight, commonHash) // for debugging exportDBlock(b) return nil }