// threadedScan is an ongoing function which will query the full set of hosts // every few hours to see who is online and available for uploading. func (hdb *HostDB) threadedScan() { for { // Determine who to scan. At most 'MaxActiveHosts' will be scanned, // starting with the active hosts followed by a random selection of the // inactive hosts. func() { hdb.mu.Lock() defer hdb.mu.Unlock() // Scan all active hosts. for _, host := range hdb.activeHosts { hdb.scanHostEntry(host.hostEntry) } // Assemble all of the inactive hosts into a single array. var entries []*hostEntry for _, entry := range hdb.allHosts { entry2, exists := hdb.activeHosts[entry.NetAddress] if !exists { entries = append(entries, entry) } else if entry2.hostEntry != entry { build.Critical("allHosts + activeHosts mismatch!") } } // Generate a random ordering of up to InactiveHostCheckupQuantity // hosts. n := InactiveHostCheckupQuantity if n > len(entries) { n = len(entries) } hostOrder, err := crypto.Perm(n) if err != nil { hdb.log.Println("ERR: could not generate random permutation:", err) } // Scan each host. for _, randIndex := range hostOrder { hdb.scanHostEntry(entries[randIndex]) } }() // Sleep for a random amount of time before doing another round of // scanning. The minimums and maximums keep the scan time reasonable, // while the randomness prevents the scanning from always happening at // the same time of day or week. maxBig := big.NewInt(int64(MaxScanSleep)) minBig := big.NewInt(int64(MinScanSleep)) randSleep, err := rand.Int(rand.Reader, maxBig.Sub(maxBig, minBig)) if err != nil { build.Critical(err) // If there's an error, sleep for the default amount of time. defaultBig := big.NewInt(int64(DefaultScanSleep)) randSleep = defaultBig.Sub(defaultBig, minBig) } hdb.sleeper.Sleep(time.Duration(randSleep.Int64()) + MinScanSleep) // this means the MaxScanSleep is actual Max+Min. } }
// RandomHosts will pull up to 'n' random hosts from the hostdb. There will be // no repeats, but the length of the slice returned may be less than 'n', and // may even be 0. The hosts that get returned first have the higher priority. // Hosts specified in 'ignore' will not be considered; pass 'nil' if no // blacklist is desired. func (hdb *HostDB) RandomHosts(n int, ignore []modules.NetAddress) (hosts []modules.HostDBEntry) { hdb.mu.Lock() defer hdb.mu.Unlock() if hdb.isEmpty() { return } // These will be restored after selection is finished. var removedEntries []*hostEntry // Remove hosts that we want to ignore. for _, addr := range ignore { node, exists := hdb.activeHosts[addr] if !exists { continue } node.removeNode() delete(hdb.activeHosts, addr) removedEntries = append(removedEntries, node.hostEntry) } // Pick a host, remove it from the tree, and repeat until we have n hosts // or the tree is empty. for len(hosts) < n && !hdb.isEmpty() { randWeight, err := rand.Int(rand.Reader, hdb.hostTree.weight.Big()) if err != nil { build.Critical("rand.Int is returning an error:", err) break } node, err := hdb.hostTree.nodeAtWeight(types.NewCurrency(randWeight)) if err != nil { build.Critical("nodeAtWeight is returning and error:", err) break } // Only return the host if they are accepting contracts. if node.hostEntry.HostDBEntry.AcceptingContracts { hosts = append(hosts, node.hostEntry.HostDBEntry) } removedEntries = append(removedEntries, node.hostEntry) node.removeNode() delete(hdb.activeHosts, node.hostEntry.NetAddress) } // Add back all of the entries that got removed. for i := range removedEntries { hdb.insertNode(removedEntries[i]) } return hosts }
// Write takes the input data and writes it to the file. func (cf *closeableFile) Write(b []byte) (int, error) { // Sanity check - close should not have been called yet. if cf.closed { build.Critical("cannot write to the file after it has been closed") } return cf.File.Write(b) }
// nodeAtWeight grabs an element in the tree that appears at the given weight. // Though the tree has an arbitrary sorting, a sufficiently random weight will // pull a random element. The tree is searched through in a post-ordered way. func (hn *hostNode) nodeAtWeight(weight types.Currency) (*hostNode, error) { // Sanity check - weight must be less than the total weight of the tree. if weight.Cmp(hn.weight) > 0 { return nil, errOverweight } // Check if the left or right child should be returned. if hn.left != nil { if weight.Cmp(hn.left.weight) < 0 { return hn.left.nodeAtWeight(weight) } weight = weight.Sub(hn.left.weight) // Search from 0th index of right side. } if hn.right != nil && weight.Cmp(hn.right.weight) < 0 { return hn.right.nodeAtWeight(weight) } // Sanity check if build.DEBUG && !hn.taken { build.Critical("nodeAtWeight should not be returning a nil entry") } // Return the root entry. return hn, nil }
// redundancy returns the redundancy of the least redundant chunk. A file // becomes available when this redundancy is >= 1. Assumes that every piece is // unique within a file contract. -1 is returned if the file has size 0. func (f *file) redundancy() float64 { if f.size == 0 { return math.NaN() } piecesPerChunk := make([]int, f.numChunks()) // If the file has non-0 size then the number of chunks should also be // non-0. Therefore the f.size == 0 conditional block above must appear // before this check. if len(piecesPerChunk) == 0 { build.Critical("cannot get redundancy of a file with 0 chunks") return math.NaN() } for _, fc := range f.contracts { for _, p := range fc.Pieces { piecesPerChunk[p.Chunk]++ } } minPieces := piecesPerChunk[0] for _, numPieces := range piecesPerChunk { if numPieces < minPieces { minPieces = numPieces } } return float64(minPieces) / float64(f.erasureCode.MinPieces()) }
// isSane checks that required assumptions about the storage obligation are // correct. func (so storageObligation) isSane() error { // There should be an origin transaction set. if len(so.OriginTransactionSet) == 0 { build.Critical("origin transaction set is empty") return errInsaneOriginSetSize } // The final transaction of the origin transaction set should have one file // contract. final := len(so.OriginTransactionSet) - 1 fcCount := len(so.OriginTransactionSet[final].FileContracts) if fcCount != 1 { build.Critical("wrong number of file contracts associated with storage obligation:", fcCount) return errInsaneOriginSetFileContract } // The file contract in the final transaction of the origin transaction set // should have two valid proof outputs and two missed proof outputs. lenVPOs := len(so.OriginTransactionSet[final].FileContracts[0].ValidProofOutputs) lenMPOs := len(so.OriginTransactionSet[final].FileContracts[0].MissedProofOutputs) if lenVPOs != 2 || lenMPOs != 2 { build.Critical("file contract has wrong number of VPOs and MPOs, expecting 2 each:", lenVPOs, lenMPOs) return errInsaneFileContractOutputCounts } // If there is a revision transaction set, there should be one file // contract revision in the final transaction. if len(so.RevisionTransactionSet) > 0 { final = len(so.OriginTransactionSet) - 1 fcrCount := len(so.OriginTransactionSet[final].FileContractRevisions) if fcrCount != 1 { build.Critical("wrong number of file contract revisions in final transaction of revision transaction set:", fcrCount) return errInsaneRevisionSetRevisionCount } // The file contract revision in the final transaction of the revision // transaction set should have two valid proof outputs and two missed // proof outputs. lenVPOs = len(so.RevisionTransactionSet[final].FileContractRevisions[0].NewValidProofOutputs) lenMPOs = len(so.RevisionTransactionSet[final].FileContractRevisions[0].NewMissedProofOutputs) if lenVPOs != 2 || lenMPOs != 2 { build.Critical("file contract has wrong number of VPOs and MPOs, expecting 2 each:", lenVPOs, lenMPOs) return errInsaneFileContractRevisionOutputCounts } } return nil }
// threadedScan is an ongoing function which will query the full set of hosts // every few hours to see who is online and available for uploading. func (hdb *HostDB) threadedScan() { defer hdb.threadGroup.Done() for { // Determine who to scan. At most 'maxActiveHosts' will be scanned, // starting with the active hosts followed by a random selection of the // inactive hosts. func() { hdb.mu.Lock() defer hdb.mu.Unlock() // Scan all active hosts. for _, host := range hdb.activeHosts { hdb.scanHostEntry(host.hostEntry) } // Assemble all of the inactive hosts into a single array. var entries []*hostEntry for _, entry := range hdb.allHosts { _, exists := hdb.activeHosts[entry.NetAddress] if !exists { entries = append(entries, entry) } } // Generate a random ordering of up to inactiveHostCheckupQuantity // hosts. hostOrder, err := crypto.Perm(len(entries)) if err != nil { hdb.log.Println("ERR: could not generate random permutation:", err) } // Scan each host. for i := 0; i < len(hostOrder) && i < inactiveHostCheckupQuantity; i++ { hdb.scanHostEntry(entries[hostOrder[i]]) } }() // Sleep for a random amount of time before doing another round of // scanning. The minimums and maximums keep the scan time reasonable, // while the randomness prevents the scanning from always happening at // the same time of day or week. maxBig := big.NewInt(int64(maxScanSleep)) minBig := big.NewInt(int64(minScanSleep)) randSleep, err := rand.Int(rand.Reader, maxBig.Sub(maxBig, minBig)) if err != nil { build.Critical(err) // If there's an error, sleep for the default amount of time. defaultBig := big.NewInt(int64(defaultScanSleep)) randSleep = defaultBig.Sub(defaultBig, minBig) } select { // awaken and exit if hostdb is closing case <-hdb.closeChan: return case <-time.After(time.Duration(randSleep.Int64()) + minScanSleep): } } }
// NewCurrency creates a Currency value from a big.Int. Undefined behavior // occurs if a negative input is used. func NewCurrency(b *big.Int) (c Currency) { if b.Sign() < 0 { build.Critical(ErrNegativeCurrency) } else { c.i = *b } return }
// RegisterConnectCall registers a name and RPCFunc to be called on a peer // upon connecting. func (g *Gateway) RegisterConnectCall(name string, fn modules.RPCFunc) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.initRPCs[name]; ok { build.Critical("ConnectCall already registered: " + name) } g.initRPCs[name] = fn }
// UnregisterRPC unregisters an RPC and removes the corresponding RPCFunc from // g.handlers. Future calls to the RPC by peers will fail. func (g *Gateway) UnregisterRPC(name string) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.handlers[handlerName(name)]; !ok { build.Critical("RPC not registered: " + name) } delete(g.handlers, handlerName(name)) }
// RegisterRPC registers an RPCFunc as a handler for a given identifier. To // call an RPC, use gateway.RPC, supplying the same identifier given to // RegisterRPC. Identifiers should always use PascalCase. The first 8 // characters of an identifier should be unique, as the identifier used // internally is truncated to 8 bytes. func (g *Gateway) RegisterRPC(name string, fn modules.RPCFunc) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.handlers[handlerName(name)]; ok { build.Critical("RPC already registered: " + name) } g.handlers[handlerName(name)] = fn }
// UnregisterConnectCall unregisters an on-connect call and removes the // corresponding RPCFunc from g.initRPCs. Future connections to peers will not // trigger the RPC to be called on them. func (g *Gateway) UnregisterConnectCall(name string) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.initRPCs[name]; !ok { build.Critical("ConnectCall not registered: " + name) } delete(g.initRPCs, name) }
// init runs a series of sanity checks to verify that the constants have sane // values. func init() { // The revision submission buffer should be greater than the resubmission // timeout, because there should be time to perform resubmission if the // first attempt to submit the revision fails. if revisionSubmissionBuffer < resubmissionTimeout { build.Critical("revision submission buffer needs to be larger than or equal to the resubmission timeout") } }
// Sub returns a new Currency value c = x - y. Behavior is undefined when // x < y. func (x Currency) Sub(y Currency) (c Currency) { if x.Cmp(y) < 0 { c = x build.Critical(ErrNegativeCurrency) } else { c.i.Sub(&x.i, &y.i) } return }
// MulRat returns a new Currency value c = x * y, where y is a big.Rat. func (x Currency) MulRat(y *big.Rat) (c Currency) { if y.Sign() < 0 { build.Critical(ErrNegativeCurrency) } else { c.i.Mul(&x.i, y.Num()) c.i.Div(&c.i, y.Denom()) } return }
// ExternalSettings returns the hosts external settings. These values cannot be // set by the user (host is configured through InternalSettings), and are the // values that get displayed to other hosts on the network. func (h *Host) ExternalSettings() modules.HostExternalSettings { h.mu.RLock() defer h.mu.RUnlock() err := h.tg.Add() if err != nil { build.Critical("Call to ExternalSettings after close") } defer h.tg.Done() return h.externalSettings() }
// FinancialMetrics returns information about the financial commitments, // rewards, and activities of the host. func (h *Host) FinancialMetrics() modules.HostFinancialMetrics { h.mu.RLock() defer h.mu.RUnlock() err := h.tg.Add() if err != nil { build.Critical("Call to FinancialMetrics after close") } defer h.tg.Done() return h.financialMetrics }
// CPUMining indicates whether the cpu miner is running. func (m *Miner) CPUMining() bool { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() return m.miningOn }
// CPUHashrate returns an estimated cpu hashrate. func (m *Miner) CPUHashrate() int { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() return int(m.hashRate) }
// StopCPUMining will stop the cpu miner. If the cpu miner is already stopped, // nothing will happen. func (m *Miner) StopCPUMining() { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() m.hashRate = 0 m.miningOn = false }
// StartCPUMining will start a single threaded cpu miner. If the miner is // already running, nothing will happen. func (m *Miner) StartCPUMining() { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() m.miningOn = true go m.threadedMine() }
// remove disconnects from a host and adds it to the blacklist. func (p *hostPool) remove(addr modules.NetAddress) { for i, h := range p.hosts { if h.Address() == addr { h.Close() p.hosts = append(p.hosts[:i], p.hosts[i+1:]...) p.blacklist = append(p.blacklist, addr) return } } build.Critical("could not remove host from pool: no record of host", addr) }
// COMPATv0.4.0 - until the first 10e3 blocks have been archived, MulFloat is // needed while verifying the first set of blocks. // // MulFloat returns a new Currency value y = c * x, where x is a float64. // Behavior is undefined when x is negative. func (x Currency) MulFloat(y float64) (c Currency) { if y < 0 { build.Critical(ErrNegativeCurrency) } else { cRat := new(big.Rat).Mul( new(big.Rat).SetInt(&x.i), new(big.Rat).SetFloat64(y), ) c.i.Div(cRat.Num(), cRat.Denom()) } return }
// Close closes the file and sets the closed flag. func (cf *closeableFile) Close() error { // Sanity check - close should not have been called yet. if cf.closed { build.Critical("cannot close the file; already closed") } // Ensure that all data has actually hit the disk. if err := cf.Sync(); err != nil { return err } cf.closed = true return cf.File.Close() }
// ProcessConsensusDigest will update the miner's most recent block. func (m *Miner) ProcessConsensusChange(cc modules.ConsensusChange) { m.mu.Lock() defer m.mu.Unlock() // Adjust the height of the miner. The miner height is initialized to zero, // but the genesis block is actually height zero. For the genesis block // only, the height will be left at zero. // // Checking the height here eliminates the need to initialize the miner to // an underflowed types.BlockHeight, which was deemed the worse of the two // evils. if m.persist.Height != 0 || cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID() != types.GenesisBlock.ID() { m.persist.Height -= types.BlockHeight(len(cc.RevertedBlocks)) m.persist.Height += types.BlockHeight(len(cc.AppliedBlocks)) } // Update the unsolved block. var exists1, exists2 bool m.persist.UnsolvedBlock.ParentID = cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID() m.persist.Target, exists1 = m.cs.ChildTarget(m.persist.UnsolvedBlock.ParentID) m.persist.UnsolvedBlock.Timestamp, exists2 = m.cs.MinimumValidChildTimestamp(m.persist.UnsolvedBlock.ParentID) if !exists1 { build.Critical("miner was unable to find parent id of an unsolved block in the consensus set") } if !exists2 { build.Critical("miner was unable to find child timestamp of an unsovled block in the consensus set") } // There is a new parent block, the source block should be updated to keep // the stale rate as low as possible. m.newSourceBlock() m.persist.RecentChange = cc.ID // Save the new consensus information. err := m.save() if err != nil { m.log.Println("ERROR: could not save during ProcessConsensusChange:", err) } }
// LatestBlockFacts returns a set of statistics about the blockchain as they appeared // at the latest block height in the explorer's consensus set. func (e *Explorer) LatestBlockFacts() modules.BlockFacts { var bf blockFacts err := e.db.View(func(tx *bolt.Tx) error { var height types.BlockHeight err := dbGetInternal(internalBlockHeight, &height)(tx) if err != nil { return err } return e.dbGetBlockFacts(height, &bf)(tx) }) if err != nil { build.Critical(err) } return bf.BlockFacts }
// contractEndHeight returns the height at which the Contractor's contracts // end. If there are no contracts, it returns zero. func (c *Contractor) contractEndHeight() types.BlockHeight { var endHeight types.BlockHeight for _, contract := range c.contracts { endHeight = contract.EndHeight() break } // sanity check: all contracts should have same EndHeight if build.DEBUG { for _, contract := range c.contracts { if contract.EndHeight() != endHeight { build.Critical("all contracts should have EndHeight", endHeight, "-- got", contract.EndHeight()) } } } return endHeight }
// SubmitHeader accepts a block header. func (m *Miner) SubmitHeader(bh types.BlockHeader) error { // Because a call to managedSubmitBlock is required at the end of this // function, the first part needs to be wrapped in an anonymous function // for lock safety. var b types.Block err := func() error { m.mu.Lock() defer m.mu.Unlock() // Lookup the block that corresponds to the provided header. nonce := bh.Nonce bh.Nonce = [8]byte{} bPointer, bExists := m.blockMem[bh] arbData, arbExists := m.arbDataMem[bh] if !bExists || !arbExists { return errLateHeader } // Block is going to be passed to external memory, but the memory pointed // to by the transactions slice is still being modified - needs to be // copied. Same with the memory being pointed to by the arb data slice. b = *bPointer txns := make([]types.Transaction, len(b.Transactions)) copy(txns, b.Transactions) b.Transactions = txns b.Transactions[0].ArbitraryData = [][]byte{arbData[:]} b.Nonce = nonce // Sanity check - block should have same id as header. bh.Nonce = nonce if types.BlockID(crypto.HashObject(bh)) != b.ID() { build.Critical("block reconstruction failed") } return nil }() if err != nil { m.log.Println("ERROR during call to SubmitHeader, pre SubmitBlock:", err) return err } err = m.managedSubmitBlock(b) if err != nil { m.log.Println("ERROR returned by managedSubmitBlock:", err) return err } return nil }
// BlocksMined returns the number of good blocks and stale blocks that have // been mined by the miner. func (m *Miner) BlocksMined() (goodBlocks, staleBlocks int) { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() for _, blockID := range m.persist.BlocksFound { if m.cs.InCurrentPath(blockID) { goodBlocks++ } else { staleBlocks++ } } return }
// verifySettings reads a signed HostSettings object from conn, validates the // signature, and checks for discrepancies between the known settings and the // received settings. If there is a discrepancy, the hostDB is notified. The // received settings are returned. func verifySettings(conn net.Conn, host modules.HostDBEntry) (modules.HostDBEntry, error) { // convert host key (types.SiaPublicKey) to a crypto.PublicKey if host.PublicKey.Algorithm != types.SignatureEd25519 || len(host.PublicKey.Key) != crypto.PublicKeySize { build.Critical("hostdb did not filter out host with wrong signature algorithm:", host.PublicKey.Algorithm) return modules.HostDBEntry{}, errors.New("host used unsupported signature algorithm") } var pk crypto.PublicKey copy(pk[:], host.PublicKey.Key) // read signed host settings var recvSettings modules.HostExternalSettings if err := crypto.ReadSignedObject(conn, &recvSettings, modules.NegotiateMaxHostExternalSettingsLen, pk); err != nil { return modules.HostDBEntry{}, errors.New("couldn't read host's settings: " + err.Error()) } // TODO: check recvSettings against host.HostExternalSettings. If there is // a discrepancy, write the error to conn. if recvSettings.NetAddress != host.NetAddress { // for now, just overwrite the NetAddress, since we know that // host.NetAddress works (it was the one we dialed to get conn) recvSettings.NetAddress = host.NetAddress } host.HostExternalSettings = recvSettings return host, nil }