// Receive waits for the response promised by the future and returns the // blockheader requested from the server given its hash. func (r FutureGetBlockHeaderResult) Receive() (*wire.BlockHeader, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a string. var bhHex string err = json.Unmarshal(res, &bhHex) if err != nil { return nil, err } serializedBH, err := hex.DecodeString(bhHex) if err != nil { return nil, err } // Deserialize the blockheader and return it. var bh wire.BlockHeader err = bh.Deserialize(bytes.NewReader(serializedBH)) if err != nil { return nil, err } return &bh, err }
func (s *SPVCon) AskForMerkBlocks(current, last uint32) error { var hdr wire.BlockHeader _, err := s.headerFile.Seek(int64(current*80), os.SEEK_SET) if err != nil { return err } for current < last { err = hdr.Deserialize(s.headerFile) if err != nil { return err } current++ bHash := hdr.BlockSha() iv1 := wire.NewInvVect(wire.InvTypeFilteredBlock, &bHash) gdataMsg := wire.NewMsgGetData() err = gdataMsg.AddInvVect(iv1) if err != nil { return err } s.outMsgQueue <- gdataMsg } return nil }
// dbFetchHeaderByHash uses an existing database transaction to retrieve the // block header for the provided hash. func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { headerBytes, err := dbTx.FetchBlockHeader(hash) if err != nil { return nil, err } var header wire.BlockHeader err = header.Deserialize(bytes.NewReader(headerBytes)) if err != nil { return nil, err } return &header, nil }
func (s *SPVCon) AskForHeaders() error { var hdr wire.BlockHeader ghdr := wire.NewMsgGetHeaders() ghdr.ProtocolVersion = s.localVersion s.headerMutex.Lock() // start header file ops info, err := s.headerFile.Stat() if err != nil { return err } headerFileSize := info.Size() if headerFileSize == 0 || headerFileSize%80 != 0 { // header file broken return fmt.Errorf("Header file not a multiple of 80 bytes") } // seek to 80 bytes from end of file ns, err := s.headerFile.Seek(-80, os.SEEK_END) if err != nil { log.Printf("can't seek\n") return err } log.Printf("suk to offset %d (should be near the end\n", ns) // get header from last 80 bytes of file err = hdr.Deserialize(s.headerFile) if err != nil { log.Printf("can't Deserialize") return err } s.headerMutex.Unlock() // done with header file cHash := hdr.BlockSha() err = ghdr.AddBlockLocatorHash(&cHash) if err != nil { return err } fmt.Printf("get headers message has %d header hashes, first one is %s\n", len(ghdr.BlockLocatorHashes), ghdr.BlockLocatorHashes[0].String()) s.outMsgQueue <- ghdr return nil }
// FetchBlockHeaderBySha - return a ShaHash func (db *LevelDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeader, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() // Read the raw block from the database. buf, _, err := db.fetchSha(sha) if err != nil { return nil, err } // Only deserialize the header portion and ensure the transaction count // is zero since this is a standalone header. var blockHeader wire.BlockHeader err = blockHeader.Deserialize(bytes.NewReader(buf)) if err != nil { return nil, err } bh = &blockHeader return bh, err }
// TestBlockHeaderSerialize tests BlockHeader serialize and deserialize. func TestBlockHeaderSerialize(t *testing.T) { nonce := uint32(123123) // 0x1e0f3 // baseBlockHdr is used in the various tests as a baseline BlockHeader. bits := uint32(0x1d00ffff) baseBlockHdr := &wire.BlockHeader{ Version: 1, PrevBlock: mainNetGenesisHash, MerkleRoot: mainNetGenesisMerkleRoot, Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST Bits: bits, Nonce: nonce, } // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. baseBlockHdrEncoded := []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot 0x29, 0xab, 0x5f, 0x49, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0xf3, 0xe0, 0x01, 0x00, // Nonce } tests := []struct { in *wire.BlockHeader // Data to encode out *wire.BlockHeader // Expected decoded data buf []byte // Serialized data }{ { baseBlockHdr, baseBlockHdr, baseBlockHdrEncoded, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block header. var buf bytes.Buffer err := test.in.Serialize(&buf) if err != nil { t.Errorf("Serialize #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("Serialize #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Deserialize the block header. var bh wire.BlockHeader rbuf := bytes.NewReader(test.buf) err = bh.Deserialize(rbuf) if err != nil { t.Errorf("Deserialize #%d error %v", i, err) continue } if !reflect.DeepEqual(&bh, test.out) { t.Errorf("Deserialize #%d\n got: %s want: %s", i, spew.Sdump(&bh), spew.Sdump(test.out)) continue } } }
func CheckHeader(r io.ReadSeeker, height int64, p *chaincfg.Params) bool { var err error var cur, prev, epochStart wire.BlockHeader // don't try to verfy the genesis block. That way madness lies. if height == 0 { return true } // initial load of headers // load epochstart, previous and current. // get the header from the epoch start, up to 2016 blocks ago _, err = r.Seek(80*(height-(height%epochLength)), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } err = epochStart.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // log.Printf("start epoch at height %d ", height-(height%epochLength)) // seek to n-1 header _, err = r.Seek(80*(height-1), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } // read in n-1 err = prev.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // seek to curHeight header and read in _, err = r.Seek(80*(height), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } err = cur.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // get hash of n-1 header prevHash := prev.BlockSha() // check if headers link together. That whole 'blockchain' thing. if prevHash.IsEqual(&cur.PrevBlock) == false { log.Printf("Headers %d and %d don't link.\n", height-1, height) log.Printf("%s - %s", prev.BlockSha().String(), cur.BlockSha().String()) return false } rightBits := epochStart.Bits // normal, no adjustment; Dn = Dn-1 // see if we're on a difficulty adjustment block if (height)%epochLength == 0 { // if so, check if difficulty adjustment is valid. // That whole "controlled supply" thing. // calculate diff n based on n-2016 ... n-1 rightBits = calcDiffAdjust(epochStart, prev, p) // done with adjustment, save new ephochStart header epochStart = cur log.Printf("Update epoch at height %d", height) } else { // not a new epoch // if on testnet, check for difficulty nerfing if p.ResetMinDifficulty && cur.Timestamp.After( prev.Timestamp.Add(targetSpacing*2)) { // fmt.Printf("nerf %d ", curHeight) rightBits = p.PowLimitBits // difficulty 1 } if cur.Bits != rightBits { log.Printf("Block %d %s incorrect difficuly. Read %x, expect %x\n", height, cur.BlockSha().String(), cur.Bits, rightBits) return false } } // check if there's a valid proof of work. That whole "Bitcoin" thing. if !checkProofOfWork(cur, p) { log.Printf("Block %d Bad proof of work.\n", height) return false } return true // it must have worked if there's no errors and got to the end. }
func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) { var err error _, err = s.headerFile.Seek(-80, os.SEEK_END) if err != nil { return false, err } var last wire.BlockHeader err = last.Deserialize(s.headerFile) if err != nil { return false, err } prevHash := last.BlockSha() gotNum := int64(len(m.Headers)) if gotNum > 0 { fmt.Printf("got %d headers. Range:\n%s - %s\n", gotNum, m.Headers[0].BlockSha().String(), m.Headers[len(m.Headers)-1].BlockSha().String()) } else { log.Printf("got 0 headers, we're probably synced up") return false, nil } endPos, err := s.headerFile.Seek(0, os.SEEK_END) if err != nil { return false, err } // check first header returned to make sure it fits on the end // of our header file if !m.Headers[0].PrevBlock.IsEqual(&prevHash) { // delete 100 headers if this happens! Dumb reorg. log.Printf("possible reorg; header msg doesn't fit. points to %s, expect %s", m.Headers[0].PrevBlock.String(), prevHash.String()) if endPos < 8080 { // jeez I give up, back to genesis s.headerFile.Truncate(80) } else { err = s.headerFile.Truncate(endPos - 8000) if err != nil { return false, fmt.Errorf("couldn't truncate header file") } } return false, fmt.Errorf("Truncated header file to try again") } tip := endPos / 80 tip-- // move back header length so it can read last header for _, resphdr := range m.Headers { // write to end of file err = resphdr.Serialize(s.headerFile) if err != nil { return false, err } // advance chain tip tip++ // check last header worked := CheckHeader(s.headerFile, tip, params) if !worked { if endPos < 8080 { // jeez I give up, back to genesis s.headerFile.Truncate(80) } else { err = s.headerFile.Truncate(endPos - 8000) if err != nil { return false, fmt.Errorf("couldn't truncate header file") } } // probably should disconnect from spv node at this point, // since they're giving us invalid headers. return false, fmt.Errorf( "Header %d - %s doesn't fit, dropping 100 headers.", resphdr.BlockSha().String(), tip) } } log.Printf("Headers to height %d OK.", tip) return true, nil }
// AskForMerkBlocks requests blocks from current to last // right now this asks for 1 block per getData message. // Maybe it's faster to ask for many in a each message? func (s *SPVCon) AskForBlocks() error { var hdr wire.BlockHeader s.headerMutex.Lock() // lock just to check filesize stat, err := os.Stat(headerFileName) s.headerMutex.Unlock() // checked, unlock endPos := stat.Size() headerTip := int32(endPos/80) - 1 // move back 1 header length to read dbTip, err := s.TS.GetDBSyncHeight() if err != nil { return err } fmt.Printf("dbTip %d headerTip %d\n", dbTip, headerTip) if dbTip > headerTip { return fmt.Errorf("error- db longer than headers! shouldn't happen.") } if dbTip == headerTip { // nothing to ask for; set wait state and return fmt.Printf("no blocks to request, entering wait state\n") fmt.Printf("%d bytes received\n", s.RBytes) s.inWaitState <- true // also advertise any unconfirmed txs here s.Rebroadcast() return nil } fmt.Printf("will request merkleblocks %d to %d\n", dbTip, headerTip) if !s.HardMode { // don't send this in hardmode! that's the whole point // create initial filter filt, err := s.TS.GimmeFilter() if err != nil { return err } // send filter s.SendFilter(filt) fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter) } // loop through all heights where we want merkleblocks. for dbTip <= headerTip { // load header from file s.headerMutex.Lock() // seek to header we need _, err = s.headerFile.Seek(int64((dbTip-1)*80), os.SEEK_SET) if err != nil { return err } err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now s.headerMutex.Unlock() // unlock after reading 1 header if err != nil { log.Printf("header deserialize error!\n") return err } bHash := hdr.BlockSha() // create inventory we're asking for iv1 := new(wire.InvVect) // if hardmode, ask for legit blocks, none of this ralphy stuff if s.HardMode { iv1 = wire.NewInvVect(wire.InvTypeBlock, &bHash) } else { // ah well iv1 = wire.NewInvVect(wire.InvTypeFilteredBlock, &bHash) } gdataMsg := wire.NewMsgGetData() // add inventory err = gdataMsg.AddInvVect(iv1) if err != nil { return err } hah := NewRootAndHeight(hdr.BlockSha(), dbTip) if dbTip == headerTip { // if this is the last block, indicate finality hah.final = true } s.outMsgQueue <- gdataMsg // waits here most of the time for the queue to empty out s.blockQueue <- hah // push height and mroot of requested block on queue dbTip++ } return nil }