// generate creates the actual cache. it can be called from multiple // goroutines. the first call will generate the cache, subsequent // calls wait until it is generated. func (cache *cache) generate() { cache.gen.Do(func() { started := time.Now() seedHash := makeSeedHash(cache.epoch) glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash) size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength)) if cache.test { size = cacheSizeForTesting } cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0]))) runtime.SetFinalizer(cache, freeCache) glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started)) }) }
func (l *Light) getCache(blockNum uint64) *cache { var c *cache epoch := blockNum / epochLength // If we have a PoW for that epoch, use that l.mu.Lock() if l.caches == nil { l.caches = make(map[uint64]*cache) } if l.NumCaches == 0 { l.NumCaches = 3 } c = l.caches[epoch] if c == nil { // No cached DAG, evict the oldest if the cache limit was reached if len(l.caches) >= l.NumCaches { var evict *cache for _, cache := range l.caches { if evict == nil || evict.used.After(cache.used) { evict = cache } } glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch) delete(l.caches, evict.epoch) } // If we have the new DAG pre-generated, use that, otherwise create a new one if l.future != nil && l.future.epoch == epoch { glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch) c, l.future = l.future, nil } else { glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch) c = &cache{epoch: epoch, test: l.test} } l.caches[epoch] = c // If we just used up the future cache, or need a refresh, regenerate if l.future == nil || l.future.epoch <= epoch { glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1) l.future = &cache{epoch: epoch + 1, test: l.test} go l.future.generate() } } c.used = time.Now() l.mu.Unlock() // Wait for generation finish and return the cache c.generate() return c }
// Verify checks whether the block's nonce is valid. func (l *Light) Verify(block pow.Block) bool { // TODO: do ethash_quick_verify before getCache in order // to prevent DOS attacks. blockNum := block.NumberU64() if blockNum >= epochLength*2048 { glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048) return false } difficulty := block.Difficulty() /* Cannot happen if block header diff is validated prior to PoW, but can happen if PoW is checked first due to parallel PoW checking. We could check the minimum valid difficulty but for SoC we avoid (duplicating) Ethereum protocol consensus rules here which are not in scope of Ethash */ if difficulty.Cmp(common.Big0) == 0 { glog.V(logger.Debug).Infof("invalid block difficulty") return false } cache := l.getCache(blockNum) dagSize := C.ethash_get_datasize(C.uint64_t(blockNum)) if l.test { dagSize = dagSizeForTesting } // Recompute the hash using the cache. hash := hashToH256(block.HashNoNonce()) ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce())) if !ret.success { return false } // avoid mixdigest malleability as it's not included in a block's "hashNononce" if block.MixDigest() != h256ToHash(ret.mix_hash) { return false } // Make sure cache is live until after the C call. // This is important because a GC might happen and execute // the finalizer before the call completes. _ = cache // The actual check. target := new(big.Int).Div(maxUint256, difficulty) return h256ToHash(ret.result).Big().Cmp(target) <= 0 }
// generate creates the actual DAG. it can be called from multiple // goroutines. the first call will generate the DAG, subsequent // calls wait until it is generated. func (d *dag) generate() { d.gen.Do(func() { var ( started = time.Now() seedHash = makeSeedHash(d.epoch) blockNum = C.uint64_t(d.epoch * epochLength) cacheSize = C.ethash_get_cachesize(blockNum) dagSize = C.ethash_get_datasize(blockNum) ) if d.test { cacheSize = cacheSizeForTesting dagSize = dagSizeForTesting } if d.dir == "" { d.dir = DefaultDir } glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash) // Generate a temporary cache. // TODO: this could share the cache with Light cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0]))) defer C.ethash_light_delete(cache) // Generate the actual DAG. d.ptr = C.ethash_full_new_internal( C.CString(d.dir), hashToH256(seedHash), dagSize, cache, (C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)), ) if d.ptr == nil { panic("ethash_full_new IO or memory error") } runtime.SetFinalizer(d, freeDAG) glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started)) }) }
//export ethashGoCallback func ethashGoCallback(percent C.unsigned) C.int { glog.V(logger.Info).Infof("Generating DAG: %d%%", percent) return 0 }