// validateHeader does some early, low computation verification on the header // to determine if the block should be downloaded. Callers should not assume // that validation will happen in a particular order. func (cs *ConsensusSet) validateHeader(tx dbTx, h types.BlockHeader) error { // Check if the block is a DoS block - a known invalid block that is expensive // to validate. id := h.ID() _, exists := cs.dosBlocks[id] if exists { return errDoSBlock } // Check if the block is already known. blockMap := tx.Bucket(BlockMap) if blockMap == nil { return errNoBlockMap } if blockMap.Get(id[:]) != nil { return modules.ErrBlockKnown } // Check for the parent. parentID := h.ParentID parentBytes := blockMap.Get(parentID[:]) if parentBytes == nil { return errOrphan } var parent processedBlock err := cs.marshaler.Unmarshal(parentBytes, &parent) if err != nil { return err } // Check that the target of the new block is sufficient. if !checkHeaderTarget(h, parent.ChildTarget) { return modules.ErrBlockUnsolved } // TODO: check if the block is a non extending block once headers-first // downloads are implemented. // Check that the timestamp is not too far in the past to be acceptable. minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, &parent) if minTimestamp > h.Timestamp { return errEarlyTimestamp } // Check if the block is in the extreme future. We make a distinction between // future and extreme future because there is an assumption that by the time // the extreme future arrives, this block will no longer be a part of the // longest fork because it will have been ignored by all of the miners. if h.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold { return errExtremeFutureTimestamp } // We do not check if the header is in the near future here, because we want // to get the corresponding block as soon as possible, even if the block is in // the near future. return nil }
// SubmitHeader accepts a block header. func (m *Miner) SubmitHeader(bh types.BlockHeader) error { if err := m.tg.Add(); err != nil { return err } defer m.tg.Done() // Because a call to managedSubmitBlock is required at the end of this // function, the first part needs to be wrapped in an anonymous function // for lock safety. var b types.Block err := func() error { m.mu.Lock() defer m.mu.Unlock() // Lookup the block that corresponds to the provided header. nonce := bh.Nonce bh.Nonce = [8]byte{} bPointer, bExists := m.blockMem[bh] arbData, arbExists := m.arbDataMem[bh] if !bExists || !arbExists { return errLateHeader } // Block is going to be passed to external memory, but the memory pointed // to by the transactions slice is still being modified - needs to be // copied. Same with the memory being pointed to by the arb data slice. b = *bPointer txns := make([]types.Transaction, len(b.Transactions)) copy(txns, b.Transactions) b.Transactions = txns b.Transactions[0].ArbitraryData = [][]byte{arbData[:]} b.Nonce = nonce // Sanity check - block should have same id as header. bh.Nonce = nonce if types.BlockID(crypto.HashObject(bh)) != b.ID() { m.log.Critical("block reconstruction failed") } return nil }() if err != nil { m.log.Println("ERROR during call to SubmitHeader, pre SubmitBlock:", err) return err } err = m.managedSubmitBlock(b) if err != nil { m.log.Println("ERROR returned by managedSubmitBlock:", err) return err } return nil }
// TestCheckHeaderTarget probes the checkHeaderTarget function and checks that // the result matches the result of checkTarget. func TestCheckHeaderTarget(t *testing.T) { var b types.Block var h types.BlockHeader tests := []struct { target types.Target expected bool msg string }{ {types.RootDepth, true, "checkHeaderTarget failed for a low target"}, {types.Target{}, false, "checkHeaderTarget passed for a high target"}, {types.Target(h.ID()), true, "checkHeaderTarget failed for a same target"}, } for _, tt := range tests { if checkHeaderTarget(h, tt.target) != tt.expected { t.Error(tt.msg) } if checkHeaderTarget(h, tt.target) != checkTarget(b, tt.target) { t.Errorf("checkHeaderTarget and checkTarget do not match for target %v", tt.target) } } }
// SubmitHeader accepts a block header. func (m *Miner) SubmitHeader(bh types.BlockHeader) error { m.mu.Lock() // Lookup the block that corresponds to the provided header. var b types.Block nonce := bh.Nonce bh.Nonce = [8]byte{} bPointer, bExists := m.blockMem[bh] arbData, arbExists := m.arbDataMem[bh] if !bExists || !arbExists { m.log.Println("ERROR:", errLateHeader) m.mu.Unlock() return errLateHeader } // Block is going to be passed to external memory, but the memory pointed // to by the transactions slice is still being modified - needs to be // copied. Same with the memory being pointed to by the arb data slice. b = *bPointer txns := make([]types.Transaction, len(b.Transactions)) copy(txns, b.Transactions) b.Transactions = txns b.Transactions[0].ArbitraryData = [][]byte{arbData[:]} b.Nonce = nonce // Sanity check - block should have same id as header. if build.DEBUG { bh.Nonce = nonce if types.BlockID(crypto.HashObject(bh)) != b.ID() { panic("block reconstruction failed") } } m.mu.Unlock() return m.SubmitBlock(b) }
// threadedRPCRelayHeader is an RPC that accepts a block header from a peer. func (cs *ConsensusSet) threadedRPCRelayHeader(conn modules.PeerConn) error { err := cs.tg.Add() if err != nil { return err } wg := new(sync.WaitGroup) defer func() { go func() { wg.Wait() cs.tg.Done() }() }() // Decode the block header from the connection. var h types.BlockHeader err = encoding.ReadObject(conn, &h, types.BlockHeaderSize) if err != nil { return err } // Start verification inside of a bolt View tx. cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { // Do some relatively inexpensive checks to validate the header return cs.validateHeader(boltTxWrapper{tx}, h) }) cs.mu.RUnlock() if err == errOrphan { // If the header is an orphan, try to find the parents. Call needs to // be made in a separate goroutine as execution requires calling an // exported gateway method - threadedRPCRelayHeader was likely called // from an exported gateway function. // // NOTE: In general this is bad design. Rather than recycling other // calls, the whole protocol should have been kept in a single RPC. // Because it is not, we have to do weird threading to prevent // deadlocks, and we also have to be concerned every time the code in // managedReceiveBlocks is adjusted. wg.Add(1) go func() { err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks) if err != nil { cs.log.Debugln("WARN: failed to get parents of orphan header:", err) } wg.Done() }() return nil } else if err != nil { return err } // If the header is valid and extends the heaviest chain, fetch the // corresponding block. Call needs to be made in a separate goroutine // because an exported call to the gateway is used, which is a deadlock // risk given that rpcRelayHeader is called from the gateway. // // NOTE: In general this is bad design. Rather than recycling other calls, // the whole protocol should have been kept in a single RPC. Because it is // not, we have to do weird threading to prevent deadlocks, and we also // have to be concerned every time the code in managedReceiveBlock is // adjusted. wg.Add(1) go func() { err = cs.gateway.RPC(conn.RPCAddr(), "SendBlk", cs.managedReceiveBlock(h.ID())) if err != nil { cs.log.Debugln("WARN: failed to get header's corresponding block:", err) } wg.Done() }() return nil }
// checkHeaderTarget returns true if the header's ID meets the given target. func checkHeaderTarget(h types.BlockHeader, target types.Target) bool { blockHash := h.ID() return bytes.Compare(target[:], blockHash[:]) >= 0 }