Esempio n. 1
0
func (pow *Ethash) Verify(block pow.Block) bool {
	nonceInt, err := blockNum(block)
	if err != nil {
		log.Println("nonce to int err:", err)
		return false
	}
	return pow.verify(block.HashNoNonce(), block.Difficulty(), nonceInt)
}
Esempio n. 2
0
func blockNum(block pow.Block) (uint64, error) {
	nonce := block.N()
	nonceBuf := bytes.NewBuffer(nonce)
	nonceInt, err := binary.ReadUvarint(nonceBuf)
	if err != nil {
		return 0, err
	}

	return nonceInt, nil
}
Esempio n. 3
0
func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) []byte {
	//r := rand.New(rand.NewSource(time.Now().UnixNano()))
	miningHash := block.HashNoNonce()
	diff := block.Difficulty()
	//diff = big.NewInt(10000)
	log.Println("difficulty", diff)
	i := int64(0)
	start := time.Now().UnixNano()
	t := time.Now()

	nonce := uint64(0) //uint64(r.Int63())

	for {
		select {
		case <-stop:
			powlogger.Infoln("Breaking from mining")
			pow.HashRate = 0
			return nil
		default:
			i++

			if time.Since(t) > (1 * time.Second) {
				elapsed := time.Now().UnixNano() - start
				hashes := ((float64(1e9) / float64(elapsed)) * float64(i)) / 1000
				pow.HashRate = int64(hashes)
				powlogger.Infoln("Hashing @", pow.HashRate, "khash")

				t = time.Now()
			}

			cMiningHash := (*C.uint8_t)(unsafe.Pointer(&miningHash))
			cnonce := C.uint64_t(nonce)
			log.Println("seed hash, nonce:", miningHash, nonce)
			// pow.hash is the output/return of ethash_full
			C.ethash_full(pow.hash, pow.cache.mem, pow.params, cMiningHash, cnonce)
			ghash := C.GoBytes(unsafe.Pointer(pow.hash), 32)
			log.Println("ethhash full (on nonce):", ghash, nonce)

			if pow.verify(miningHash, diff, nonce) {
				return ghash
			}
			nonce += 1
		}

		if !pow.turbo {
			time.Sleep(20 * time.Microsecond)
		}
	}

	return nil
}
Esempio n. 4
0
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
	// TODO: do ethash_quick_verify before getCache in order
	// to prevent DOS attacks.
	var (
		blockNum   = block.NumberU64()
		difficulty = block.Difficulty()
		cache      = l.getCache(blockNum)
		dagSize    = C.ethash_get_datasize(C.uint64_t(blockNum))
	)
	if l.test {
		dagSize = dagSizeForTesting
	}
	if blockNum >= epochLength*2048 {
		glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
		return false
	}
	// Recompute the hash using the cache.
	hash := hashToH256(block.HashNoNonce())
	ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
	if !ret.success {
		return false
	}
	// Make sure cache is live until after the C call.
	// This is important because a GC might happen and execute
	// the finalizer before the call completes.
	_ = cache
	// The actual check.
	target := new(big.Int).Div(minDifficulty, difficulty)
	return h256ToHash(ret.result).Big().Cmp(target) <= 0
}
Esempio n. 5
0
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
	// TODO: do ethash_quick_verify before getCache in order
	// to prevent DOS attacks.
	blockNum := block.NumberU64()
	if blockNum >= epochLength*2048 {
		glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
		return false
	}

	difficulty := block.Difficulty()
	/* Cannot happen if block header diff is validated prior to PoW, but can
		 happen if PoW is checked first due to parallel PoW checking.
		 We could check the minimum valid difficulty but for SoC we avoid (duplicating)
	   Ethereum protocol consensus rules here which are not in scope of Ethash
	*/
	if difficulty.Cmp(common.Big0) == 0 {
		glog.V(logger.Debug).Infof("invalid block difficulty")
		return false
	}

	cache := l.getCache(blockNum)
	dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))

	if l.test {
		dagSize = dagSizeForTesting
	}
	// Recompute the hash using the cache.
	hash := hashToH256(block.HashNoNonce())
	ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
	if !ret.success {
		return false
	}

	// avoid mixdigest malleability as it's not included in a block's "hashNononce"
	if block.MixDigest() != h256ToHash(ret.mix_hash) {
		return false
	}

	// Make sure cache is live until after the C call.
	// This is important because a GC might happen and execute
	// the finalizer before the call completes.
	_ = cache
	// The actual check.
	target := new(big.Int).Div(minDifficulty, difficulty)
	return h256ToHash(ret.result).Big().Cmp(target) <= 0
}
Esempio n. 6
0
func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	hash := block.HashNoNonce()
	diff := block.Difficulty()
	//i := int64(0)
	// TODO fix offset
	i := rand.Int63()
	starti := i
	start := time.Now().UnixNano()

	defer func() { pow.HashRate = 0 }()

	// Make sure stop is empty
empty:
	for {
		select {
		case <-stop:
		default:
			break empty
		}
	}

	for {
		select {
		case <-stop:
			return 0, nil
		default:
			i++

			elapsed := time.Now().UnixNano() - start
			hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
			pow.HashRate = int64(hashes)

			sha := uint64(r.Int63())
			if verify(hash, diff, sha) {
				return sha, nil
			}
		}

		if !pow.turbo {
			time.Sleep(20 * time.Microsecond)
		}
	}

	return 0, nil
}
Esempio n. 7
0
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
	// TODO: do ethash_quick_verify before getCache in order
	// to prevent DOS attacks.
	blockNum := block.NumberU64()
	if blockNum >= epochLength*2048 {
		glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
		return false
	}

	difficulty := block.Difficulty()
	/* Cannot happen if block header diff is validated prior to PoW, but can
		 happen if PoW is checked first due to parallel PoW checking.
		 We could check the minimum valid difficulty but for SoC we avoid (duplicating)
	   Ethereum protocol consensus rules here which are not in scope of Ethash
	*/
	if difficulty.Cmp(common.Big0) == 0 {
		glog.V(logger.Debug).Infof("invalid block difficulty")
		return false
	}

	cache := l.getCache(blockNum)
	dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
	if l.test {
		dagSize = dagSizeForTesting
	}
	// Recompute the hash using the cache.
	ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
	if !ok {
		return false
	}

	// avoid mixdigest malleability as it's not included in a block's "hashNononce"
	if block.MixDigest() != mixDigest {
		return false
	}

	// The actual check.
	target := new(big.Int).Div(maxUint256, difficulty)
	return result.Big().Cmp(target) <= 0
}
Esempio n. 8
0
func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
	dag := pow.getDAG(block.NumberU64())

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	diff := block.Difficulty()

	i := int64(0)
	starti := i
	start := time.Now().UnixNano()
	previousHashrate := int32(0)

	nonce = uint64(r.Int63())
	hash := hashToH256(block.HashNoNonce())
	target := new(big.Int).Div(minDifficulty, diff)
	for {
		select {
		case <-stop:
			atomic.AddInt32(&pow.hashRate, -previousHashrate)
			return 0, nil
		default:
			i++

			// we don't have to update hash rate on every nonce, so update after
			// first nonce check and then after 2^X nonces
			if i == 2 || ((i % (1 << 16)) == 0) {
				elapsed := time.Now().UnixNano() - start
				hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
				hashrateDiff := int32(hashes) - previousHashrate
				previousHashrate = int32(hashes)
				atomic.AddInt32(&pow.hashRate, hashrateDiff)
			}

			ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
			result := h256ToHash(ret.result).Big()

			// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
			if ret.success && result.Cmp(target) <= 0 {
				mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
				atomic.AddInt32(&pow.hashRate, -previousHashrate)
				return nonce, mixDigest
			}
			nonce += 1
		}

		if !pow.turbo {
			time.Sleep(20 * time.Microsecond)
		}
	}
}
Esempio n. 9
0
func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
	dag := pow.getDAG(block.NumberU64())

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	diff := block.Difficulty()

	i := int64(0)
	starti := i
	start := time.Now().UnixNano()

	nonce = uint64(r.Int63())
	hash := hashToH256(block.HashNoNonce())
	target := new(big.Int).Div(minDifficulty, diff)
	for {
		select {
		case <-stop:
			pow.hashRate = 0
			return 0, nil
		default:
			i++

			elapsed := time.Now().UnixNano() - start
			hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
			pow.hashRate = int64(hashes)

			ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
			result := h256ToHash(ret.result).Big()

			// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
			if ret.success && result.Cmp(target) <= 0 {
				mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
				return nonce, mixDigest
			}
			nonce += 1
		}

		if !pow.turbo {
			time.Sleep(20 * time.Microsecond)
		}
	}
}
func (pow failpow) Verify(b pow.Block) bool {
	return b.NumberU64() != pow.num
}
Esempio n. 11
0
func (pow failPow) Verify(block pow.Block) bool { return block.NumberU64() != pow.failing }
Esempio n. 12
0
func Verify(block pow.Block) bool {
	return verify(block.HashNoNonce(), block.Difficulty(), block.Nonce())
}
Esempio n. 13
0
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
	c.mu.Lock()
	newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
	if newDagSize > c.dagSize {
		// TODO: clean up buffers from previous DAG?
		err := InitCL(block.NumberU64(), c)
		if err != nil {
			fmt.Println("OpenCL init error: ", err)
			return 0, []byte{0}
		}
	}
	defer c.mu.Unlock()

	// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
	select {
	case <-stop:
		return 0, []byte{0}
	default:
	}

	headerHash := block.HashNoNonce()
	diff := block.Difficulty()
	target256 := new(big.Int).Div(maxUint256, diff)
	target64 := new(big.Int).Rsh(target256, 192).Uint64()
	var zero uint32 = 0

	d := c.devices[index]

	_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
	if err != nil {
		fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
		return 0, []byte{0}
	}

	for i := 0; i < searchBufSize; i++ {
		_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
		if err != nil {
			fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
			return 0, []byte{0}
		}
	}

	// wait for all search buffers to complete
	err = d.queue.Finish()
	if err != nil {
		fmt.Println("Error in Search clFinish : ", err)
		return 0, []byte{0}
	}

	err = d.searchKernel.SetArg(1, d.headerBuf)
	if err != nil {
		fmt.Println("Error in Search clSetKernelArg : ", err)
		return 0, []byte{0}
	}

	err = d.searchKernel.SetArg(2, d.dagBuf)
	if err != nil {
		fmt.Println("Error in Search clSetKernelArg : ", err)
		return 0, []byte{0}
	}

	err = d.searchKernel.SetArg(4, target64)
	if err != nil {
		fmt.Println("Error in Search clSetKernelArg : ", err)
		return 0, []byte{0}
	}
	err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
	if err != nil {
		fmt.Println("Error in Search clSetKernelArg : ", err)
		return 0, []byte{0}
	}

	// wait on this before returning
	var preReturnEvent *cl.Event
	if d.openCL12 {
		preReturnEvent, err = d.ctx.CreateUserEvent()
		if err != nil {
			fmt.Println("Error in Search create CL user event : ", err)
			return 0, []byte{0}
		}
	}

	pending := make([]pendingSearch, 0, searchBufSize)
	var p *pendingSearch
	searchBufIndex := uint32(0)
	var checkNonce uint64
	loops := int64(0)
	prevHashRate := int32(0)
	start := time.Now().UnixNano()
	// we grab a single random nonce and sets this as argument to the kernel search function
	// the device will then add each local threads gid to the nonce, creating a unique nonce
	// for each device computing unit executing in parallel
	initNonce := uint64(d.nonceRand.Int63())
	for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
		select {
		case <-stop:

			/*
				if d.openCL12 {
					err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
					if err != nil {
						fmt.Println("Error in Search WaitForEvents: ", err)
					}
				}
			*/

			atomic.AddInt32(&c.hashRate, -prevHashRate)
			return 0, []byte{0}
		default:
		}

		if (loops % (1 << 7)) == 0 {
			elapsed := time.Now().UnixNano() - start
			// TODO: verify if this is correct hash rate calculation
			hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256)
			hashrateDiff := int32(hashes) - prevHashRate
			prevHashRate = int32(hashes)
			atomic.AddInt32(&c.hashRate, hashrateDiff)
		}
		loops++

		err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex])
		if err != nil {
			fmt.Println("Error in Search clSetKernelArg : ", err)
			return 0, []byte{0}
		}
		err = d.searchKernel.SetArg(3, nonce)
		if err != nil {
			fmt.Println("Error in Search clSetKernelArg : ", err)
			return 0, []byte{0}
		}

		// execute kernel
		_, err := d.queue.EnqueueNDRangeKernel(
			d.searchKernel,
			[]int{0},
			[]int{globalWorkSize},
			[]int{d.workGroupSize},
			nil)
		if err != nil {
			fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err)
			return 0, []byte{0}
		}

		pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce})
		searchBufIndex = (searchBufIndex + 1) % searchBufSize

		if len(pending) == searchBufSize {
			p = &(pending[searchBufIndex])
			cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true,
				cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32,
				nil)
			if err != nil {
				fmt.Println("Error in Search clEnqueueMapBuffer: ", err)
				return 0, []byte{0}
			}

			results := cres.ByteSlice()
			nfound := binary.LittleEndian.Uint32(results)
			nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults)))
			// OpenCL returns the offsets from the start nonce
			for i := uint32(0); i < nfound; i++ {
				lo := (i + 1) * SIZEOF_UINT32
				hi := (i + 2) * SIZEOF_UINT32
				upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
				checkNonce = p.startNonce + upperNonce
				if checkNonce != 0 {
					// We verify that the nonce is indeed a solution by
					// executing the Ethash verification function (on the CPU).
					cache := c.ethash.Light.getCache(block.NumberU64())
					ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)

					// TODO: return result first
					if ok && result.Big().Cmp(target256) <= 0 {
						_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
						if err != nil {
							fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
						}
						if d.openCL12 {
							err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
							if err != nil {
								fmt.Println("Error in Search WaitForEvents: ", err)
							}
						}
						return checkNonce, mixDigest.Bytes()
					}
					_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
					if err != nil {
						fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
						return 0, []byte{0}
					}
				}
			}
			_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
			if err != nil {
				fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err)
				return 0, []byte{0}
			}
			pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...)
		}
	}
	if d.openCL12 {
		err := cl.WaitForEvents([]*cl.Event{preReturnEvent})
		if err != nil {
			fmt.Println("Error in Search clWaitForEvents: ", err)
			return 0, []byte{0}
		}
	}
	return 0, []byte{0}
}