// AskForOneBlock is for testing only, so you can ask for a specific block height // and see what goes wrong func (s *SPVCon) AskForOneBlock(h int32) error { var hdr wire.BlockHeader var err error dbTip := int32(h) s.headerMutex.Lock() // seek to header we need _, err = s.headerFile.Seek(int64((dbTip)*80), os.SEEK_SET) if err != nil { return err } err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now s.headerMutex.Unlock() // unlock after reading 1 header if err != nil { log.Printf("header deserialize error!\n") return err } bHash := hdr.BlockSha() // create inventory we're asking for iv1 := wire.NewInvVect(wire.InvTypeWitnessBlock, &bHash) gdataMsg := wire.NewMsgGetData() // add inventory err = gdataMsg.AddInvVect(iv1) if err != nil { return err } hah := NewRootAndHeight(bHash, h) s.outMsgQueue <- gdataMsg s.blockQueue <- hah // push height and mroot of requested block on queue return nil }
func (s *SPVCon) AskForHeaders() error { var hdr wire.BlockHeader ghdr := wire.NewMsgGetHeaders() ghdr.ProtocolVersion = s.localVersion s.headerMutex.Lock() // start header file ops info, err := s.headerFile.Stat() if err != nil { return err } headerFileSize := info.Size() if headerFileSize == 0 || headerFileSize%80 != 0 { // header file broken return fmt.Errorf("Header file not a multiple of 80 bytes") } // seek to 80 bytes from end of file ns, err := s.headerFile.Seek(-80, os.SEEK_END) if err != nil { log.Printf("can't seek\n") return err } log.Printf("suk to offset %d (should be near the end\n", ns) // get header from last 80 bytes of file err = hdr.Deserialize(s.headerFile) if err != nil { log.Printf("can't Deserialize") return err } s.headerMutex.Unlock() // done with header file cHash := hdr.BlockSha() err = ghdr.AddBlockLocatorHash(&cHash) if err != nil { return err } fmt.Printf("get headers message has %d header hashes, first one is %s\n", len(ghdr.BlockLocatorHashes), ghdr.BlockLocatorHashes[0].String()) s.outMsgQueue <- ghdr return nil }
// AskForMerkBlocks requests blocks from current to last // right now this asks for 1 block per getData message. // Maybe it's faster to ask for many in a each message? func (s *SPVCon) AskForBlocks() error { var hdr wire.BlockHeader s.headerMutex.Lock() // lock just to check filesize stat, err := os.Stat(headerFileName) s.headerMutex.Unlock() // checked, unlock endPos := stat.Size() headerTip := int32(endPos/80) - 1 // move back 1 header length to read dbTip, err := s.TS.GetDBSyncHeight() if err != nil { return err } fmt.Printf("dbTip %d headerTip %d\n", dbTip, headerTip) if dbTip > headerTip { return fmt.Errorf("error- db longer than headers! shouldn't happen.") } if dbTip == headerTip { // nothing to ask for; set wait state and return fmt.Printf("no blocks to request, entering wait state\n") fmt.Printf("%d bytes received\n", s.RBytes) s.inWaitState <- true // also advertise any unconfirmed txs here s.Rebroadcast() return nil } fmt.Printf("will request blocks %d to %d\n", dbTip+1, headerTip) if !s.HardMode { // don't send this in hardmode! that's the whole point // create initial filter filt, err := s.TS.GimmeFilter() if err != nil { return err } // send filter s.SendFilter(filt) fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter) } // loop through all heights where we want merkleblocks. for dbTip < headerTip { dbTip++ // we're requesting the next header // load header from file s.headerMutex.Lock() // seek to header we need _, err = s.headerFile.Seek(int64((dbTip)*80), os.SEEK_SET) if err != nil { return err } err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now s.headerMutex.Unlock() // unlock after reading 1 header if err != nil { log.Printf("header deserialize error!\n") return err } bHash := hdr.BlockSha() // create inventory we're asking for iv1 := new(wire.InvVect) // if hardmode, ask for legit blocks, none of this ralphy stuff if s.HardMode { iv1 = wire.NewInvVect(wire.InvTypeWitnessBlock, &bHash) } else { // ah well iv1 = wire.NewInvVect(wire.InvTypeFilteredWitnessBlock, &bHash) } gdataMsg := wire.NewMsgGetData() // add inventory err = gdataMsg.AddInvVect(iv1) if err != nil { return err } hah := NewRootAndHeight(hdr.BlockSha(), dbTip) if dbTip == headerTip { // if this is the last block, indicate finality hah.final = true } // waits here most of the time for the queue to empty out s.blockQueue <- hah // push height and mroot of requested block on queue s.outMsgQueue <- gdataMsg } return nil }
// IngestHeaders takes in a bunch of headers and appends them to the // local header file, checking that they fit. If there's no headers, // it assumes we're done and returns false. If it worked it assumes there's // more to request and returns true. func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) { gotNum := int64(len(m.Headers)) if gotNum > 0 { fmt.Printf("got %d headers. Range:\n%s - %s\n", gotNum, m.Headers[0].BlockSha().String(), m.Headers[len(m.Headers)-1].BlockSha().String()) } else { log.Printf("got 0 headers, we're probably synced up") return false, nil } s.headerMutex.Lock() defer s.headerMutex.Unlock() var err error // seek to last header _, err = s.headerFile.Seek(-80, os.SEEK_END) if err != nil { return false, err } var last wire.BlockHeader err = last.Deserialize(s.headerFile) if err != nil { return false, err } prevHash := last.BlockSha() endPos, err := s.headerFile.Seek(0, os.SEEK_END) if err != nil { return false, err } tip := int32(endPos/80) - 1 // move back 1 header length to read // check first header returned to make sure it fits on the end // of our header file if !m.Headers[0].PrevBlock.IsEqual(&prevHash) { // delete 100 headers if this happens! Dumb reorg. log.Printf("reorg? header msg doesn't fit. points to %s, expect %s", m.Headers[0].PrevBlock.String(), prevHash.String()) if endPos < 8080 { // jeez I give up, back to genesis s.headerFile.Truncate(80) } else { err = s.headerFile.Truncate(endPos - 8000) if err != nil { return false, fmt.Errorf("couldn't truncate header file") } } return true, fmt.Errorf("Truncated header file to try again") } for _, resphdr := range m.Headers { // write to end of file err = resphdr.Serialize(s.headerFile) if err != nil { return false, err } // advance chain tip tip++ // check last header worked := CheckHeader(s.headerFile, tip, s.TS.Param) if !worked { if endPos < 8080 { // jeez I give up, back to genesis s.headerFile.Truncate(80) } else { err = s.headerFile.Truncate(endPos - 8000) if err != nil { return false, fmt.Errorf("couldn't truncate header file") } } // probably should disconnect from spv node at this point, // since they're giving us invalid headers. return true, fmt.Errorf( "Header %d - %s doesn't fit, dropping 100 headers.", resphdr.BlockSha().String(), tip) } } log.Printf("Headers to height %d OK.", tip) return true, nil }
func CheckHeader(r io.ReadSeeker, height int32, p *chaincfg.Params) bool { var err error var cur, prev, epochStart wire.BlockHeader // don't try to verfy the genesis block. That way madness lies. if height == 0 { return true } // initial load of headers // load epochstart, previous and current. // get the header from the epoch start, up to 2016 blocks ago _, err = r.Seek(int64(80*(height-(height%epochLength))), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } err = epochStart.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // log.Printf("start epoch at height %d ", height-(height%epochLength)) // seek to n-1 header _, err = r.Seek(int64(80*(height-1)), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } // read in n-1 err = prev.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // seek to curHeight header and read in _, err = r.Seek(int64(80*(height)), os.SEEK_SET) if err != nil { log.Printf(err.Error()) return false } err = cur.Deserialize(r) if err != nil { log.Printf(err.Error()) return false } // get hash of n-1 header prevHash := prev.BlockSha() // check if headers link together. That whole 'blockchain' thing. if prevHash.IsEqual(&cur.PrevBlock) == false { log.Printf("Headers %d and %d don't link.\n", height-1, height) log.Printf("%s - %s", prev.BlockSha().String(), cur.BlockSha().String()) return false } rightBits := epochStart.Bits // normal, no adjustment; Dn = Dn-1 // see if we're on a difficulty adjustment block if (height)%epochLength == 0 { // if so, check if difficulty adjustment is valid. // That whole "controlled supply" thing. // calculate diff n based on n-2016 ... n-1 rightBits = calcDiffAdjust(epochStart, prev, p) // done with adjustment, save new ephochStart header epochStart = cur log.Printf("Update epoch at height %d", height) } else { // not a new epoch // if on testnet, check for difficulty nerfing if p.ResetMinDifficulty && cur.Timestamp.After( prev.Timestamp.Add(targetSpacing*2)) { // fmt.Printf("nerf %d ", curHeight) rightBits = p.PowLimitBits // difficulty 1 } if cur.Bits != rightBits { log.Printf("Block %d %s incorrect difficuly. Read %x, expect %x\n", height, cur.BlockSha().String(), cur.Bits, rightBits) return false } } // check if there's a valid proof of work. That whole "Bitcoin" thing. if !checkProofOfWork(cur, p) { log.Printf("Block %d Bad proof of work.\n", height) return false } return true // it must have worked if there's no errors and got to the end. }