示例#1
0
文件: update.go 项目: kustomzone/Sia
// revertHistory reverts any transaction history that was destroyed by reverted
// blocks in the consensus change.
func (w *Wallet) revertHistory(cc modules.ConsensusChange) {
	for _, block := range cc.RevertedBlocks {
		// Remove any transactions that have been reverted.
		for i := len(block.Transactions) - 1; i >= 0; i-- {
			// If the transaction is relevant to the wallet, it will be the
			// most recent transaction appended to w.processedTransactions.
			// Relevance can be determined just by looking at the last element
			// of w.processedTransactions.
			txn := block.Transactions[i]
			txid := txn.ID()
			if len(w.processedTransactions) > 0 && txid == w.processedTransactions[len(w.processedTransactions)-1].TransactionID {
				w.processedTransactions = w.processedTransactions[:len(w.processedTransactions)-1]
				delete(w.processedTransactionMap, txid)
			}
		}

		// Remove the miner payout transaction if applicable.
		for _, mp := range block.MinerPayouts {
			_, exists := w.keys[mp.UnlockHash]
			if exists {
				w.processedTransactions = w.processedTransactions[:len(w.processedTransactions)-1]
				delete(w.processedTransactionMap, types.TransactionID(block.ID()))
				break
			}
		}
		w.consensusSetHeight--
	}
}
示例#2
0
// buildTransactionSet returns the blocks and transactions that are associated
// with a set of transaction ids.
func (srv *Server) buildTransactionSet(txids []types.TransactionID) (txns []ExplorerTransaction, blocks []ExplorerBlock) {
	for _, txid := range txids {
		// Get the block containing the transaction - in the case of miner
		// payouts, the block might be the transaction.
		block, height, exists := srv.explorer.Transaction(txid)
		if !exists && build.DEBUG {
			panic("explorer pointing to nonexistant txn")
		}

		// Check if the block is the transaction.
		if types.TransactionID(block.ID()) == txid {
			blocks = append(blocks, srv.buildExplorerBlock(height, block))
		} else {
			// Find the transaction within the block with the correct id.
			for _, t := range block.Transactions {
				if t.ID() == txid {
					txns = append(txns, srv.buildExplorerTransaction(height, block.ID(), t))
					break
				}
			}
		}
	}
	return txns, blocks
}
示例#3
0
文件: update.go 项目: mantyr/Sia
// ProcessConsensusChange follows the most recent changes to the consensus set,
// including parsing new blocks and updating the utxo sets.
func (e *Explorer) ProcessConsensusChange(cc modules.ConsensusChange) {
	e.mu.Lock()
	defer e.mu.Unlock()

	// Update cumulative stats for reverted blocks.
	for _, block := range cc.RevertedBlocks {
		// Delete the block from the list of active blocks.
		bid := block.ID()
		tbid := types.TransactionID(bid)
		e.blockchainHeight -= 1
		delete(e.blockHashes, bid)
		delete(e.transactionHashes, tbid) // Miner payouts are a transaction.

		// Catalog the removed miner payouts.
		for j, payout := range block.MinerPayouts {
			scoid := block.MinerPayoutID(uint64(j))
			delete(e.siacoinOutputIDs[scoid], tbid)
			delete(e.unlockHashes[payout.UnlockHash], tbid)
			e.minerPayoutCount--
		}

		// Update cumulative stats for reverted transcations.
		for _, txn := range block.Transactions {
			txid := txn.ID()
			e.transactionCount--
			delete(e.transactionHashes, txid)

			for _, sci := range txn.SiacoinInputs {
				delete(e.siacoinOutputIDs[sci.ParentID], txid)
				delete(e.unlockHashes[sci.UnlockConditions.UnlockHash()], txid)
				e.siacoinInputCount--
			}
			for k, sco := range txn.SiacoinOutputs {
				delete(e.siacoinOutputIDs[txn.SiacoinOutputID(uint64(k))], txid)
				delete(e.unlockHashes[sco.UnlockHash], txid)
				e.siacoinOutputCount--
			}
			for k, fc := range txn.FileContracts {
				fcid := txn.FileContractID(uint64(k))
				delete(e.fileContractIDs[fcid], txid)
				delete(e.unlockHashes[fc.UnlockHash], txid)
				for l, sco := range fc.ValidProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				for l, sco := range fc.MissedProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				e.fileContractCount--
				e.totalContractCost = e.totalContractCost.Sub(fc.Payout)
				e.totalContractSize = e.totalContractSize.Sub(types.NewCurrency64(fc.FileSize))
			}
			for _, fcr := range txn.FileContractRevisions {
				delete(e.fileContractIDs[fcr.ParentID], txid)
				delete(e.unlockHashes[fcr.UnlockConditions.UnlockHash()], txid)
				delete(e.unlockHashes[fcr.NewUnlockHash], txid)
				for l, sco := range fcr.NewValidProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				for l, sco := range fcr.NewMissedProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				e.fileContractRevisionCount--
				e.totalContractSize = e.totalContractSize.Sub(types.NewCurrency64(fcr.NewFileSize))
				e.totalRevisionVolume = e.totalRevisionVolume.Sub(types.NewCurrency64(fcr.NewFileSize))
			}
			for _, sp := range txn.StorageProofs {
				delete(e.fileContractIDs[sp.ParentID], txid)
				e.storageProofCount--
			}
			for _, sfi := range txn.SiafundInputs {
				delete(e.siafundOutputIDs[sfi.ParentID], txid)
				delete(e.unlockHashes[sfi.UnlockConditions.UnlockHash()], txid)
				delete(e.unlockHashes[sfi.ClaimUnlockHash], txid)
				e.siafundInputCount--
			}
			for k, sfo := range txn.SiafundOutputs {
				sfoid := txn.SiafundOutputID(uint64(k))
				delete(e.siafundOutputIDs[sfoid], txid)
				delete(e.unlockHashes[sfo.UnlockHash], txid)
				e.siafundOutputCount--
			}
			for _ = range txn.MinerFees {
				e.minerFeeCount--
			}
			for _ = range txn.ArbitraryData {
				e.arbitraryDataCount--
			}
			for _ = range txn.TransactionSignatures {
				e.transactionSignatureCount--
			}
		}
	}

	// Update cumulative stats for applied blocks.
	for _, block := range cc.AppliedBlocks {
		// Add the block to the list of active blocks.
		bid := block.ID()
		tbid := types.TransactionID(bid)
		e.blockchainHeight++
		e.blockHashes[bid] = e.blockchainHeight
		e.transactionHashes[tbid] = e.blockchainHeight // Miner payouts are a transaciton.

		// Catalog the new miner payouts.
		for j, payout := range block.MinerPayouts {
			scoid := block.MinerPayoutID(uint64(j))
			_, exists := e.siacoinOutputIDs[scoid]
			if !exists {
				e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
			}
			e.siacoinOutputIDs[scoid][tbid] = struct{}{}
			_, exists = e.unlockHashes[payout.UnlockHash]
			if !exists {
				e.unlockHashes[payout.UnlockHash] = make(map[types.TransactionID]struct{})
			}
			e.unlockHashes[payout.UnlockHash][tbid] = struct{}{}
			e.minerPayoutCount++
		}

		// Update cumulative stats for applied transactions.
		for _, txn := range block.Transactions {
			// Add the transaction to the list of active transactions.
			txid := txn.ID()
			e.transactionCount++
			e.transactionHashes[txid] = e.blockchainHeight

			for _, sci := range txn.SiacoinInputs {
				_, exists := e.siacoinOutputIDs[sci.ParentID]
				if build.DEBUG && !exists {
					panic("siacoin input without siacoin output")
				} else if !exists {
					e.siacoinOutputIDs[sci.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.siacoinOutputIDs[sci.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[sci.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without a parent unlock hash")
				} else if !exists {
					e.unlockHashes[sci.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sci.UnlockConditions.UnlockHash()][txid] = struct{}{}
				e.siacoinInputCount++
			}
			for j, sco := range txn.SiacoinOutputs {
				scoid := txn.SiacoinOutputID(uint64(j))
				_, exists := e.siacoinOutputIDs[scoid]
				if !exists {
					e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
				}
				e.siacoinOutputIDs[scoid][txid] = struct{}{}
				_, exists = e.unlockHashes[sco.UnlockHash]
				if !exists {
					e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sco.UnlockHash][txn.ID()] = struct{}{}
				e.siacoinOutputCount++
			}
			for k, fc := range txn.FileContracts {
				fcid := txn.FileContractID(uint64(k))
				_, exists := e.fileContractIDs[fcid]
				if !exists {
					e.fileContractIDs[fcid] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[fcid][txid] = struct{}{}
				_, exists = e.unlockHashes[fc.UnlockHash]
				if !exists {
					e.unlockHashes[fc.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fc.UnlockHash][txid] = struct{}{}
				for l, sco := range fc.ValidProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				for l, sco := range fc.MissedProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				e.fileContractCount++
				e.totalContractCost = e.totalContractCost.Add(fc.Payout)
				e.totalContractSize = e.totalContractSize.Add(types.NewCurrency64(fc.FileSize))
			}
			for _, fcr := range txn.FileContractRevisions {
				_, exists := e.fileContractIDs[fcr.ParentID]
				if build.DEBUG && !exists {
					panic("revision without entry in file contract list")
				} else if !exists {
					e.fileContractIDs[fcr.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[fcr.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[fcr.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without unlock hash")
				} else if !exists {
					e.unlockHashes[fcr.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fcr.UnlockConditions.UnlockHash()][txid] = struct{}{}
				_, exists = e.unlockHashes[fcr.NewUnlockHash]
				if !exists {
					e.unlockHashes[fcr.NewUnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fcr.NewUnlockHash][txid] = struct{}{}
				for l, sco := range fcr.NewValidProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				for l, sco := range fcr.NewMissedProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				e.fileContractRevisionCount++
				e.totalContractSize = e.totalContractSize.Add(types.NewCurrency64(fcr.NewFileSize))
				e.totalRevisionVolume = e.totalRevisionVolume.Add(types.NewCurrency64(fcr.NewFileSize))
			}
			for _, sp := range txn.StorageProofs {
				_, exists := e.fileContractIDs[sp.ParentID]
				if build.DEBUG && !exists {
					panic("storage proof without file contract parent")
				} else if !exists {
					e.fileContractIDs[sp.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[sp.ParentID][txid] = struct{}{}
				e.storageProofCount++
			}
			for _, sfi := range txn.SiafundInputs {
				_, exists := e.siafundOutputIDs[sfi.ParentID]
				if build.DEBUG && !exists {
					panic("siafund input without corresponding output")
				} else if !exists {
					e.siafundOutputIDs[sfi.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.siafundOutputIDs[sfi.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[sfi.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without unlock hash")
				} else if !exists {
					e.unlockHashes[sfi.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfi.UnlockConditions.UnlockHash()][txid] = struct{}{}
				_, exists = e.unlockHashes[sfi.ClaimUnlockHash]
				if !exists {
					e.unlockHashes[sfi.ClaimUnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfi.ClaimUnlockHash][txid] = struct{}{}
				e.siafundInputCount++
			}
			for k, sfo := range txn.SiafundOutputs {
				sfoid := txn.SiafundOutputID(uint64(k))
				_, exists := e.siafundOutputIDs[sfoid]
				if !exists {
					e.siafundOutputIDs[sfoid] = make(map[types.TransactionID]struct{})
				}
				e.siafundOutputIDs[sfoid][txid] = struct{}{}
				_, exists = e.unlockHashes[sfo.UnlockHash]
				if !exists {
					e.unlockHashes[sfo.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfo.UnlockHash][txid] = struct{}{}
				e.siafundOutputCount++
			}
			for _ = range txn.MinerFees {
				e.minerFeeCount++
			}
			for _ = range txn.ArbitraryData {
				e.arbitraryDataCount++
			}
			for _ = range txn.TransactionSignatures {
				e.transactionSignatureCount++
			}
		}
	}

	// Compute the changes in the active set.
	for _, diff := range cc.FileContractDiffs {
		if diff.Direction == modules.DiffApply {
			e.activeContractCount += 1
			e.activeContractCost = e.activeContractCost.Add(diff.FileContract.Payout)
			e.activeContractSize = e.activeContractSize.Add(types.NewCurrency64(diff.FileContract.FileSize))
		} else {
			e.activeContractCount -= 1
			e.activeContractCost = e.activeContractCost.Sub(diff.FileContract.Payout)
			e.activeContractSize = e.activeContractSize.Sub(types.NewCurrency64(diff.FileContract.FileSize))
		}
	}

	// Set the id of the current block.
	e.currentBlock = cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID()
}
示例#4
0
// explorerHashHandler handles GET requests to /explorer/hash/:hash.
func (srv *Server) explorerHashHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
	// The hash is scanned as an address, because an address can be typecast to
	// all other necessary types, and will correctly decode hashes whether or
	// not they have a checksum.
	hash, err := scanAddress(ps.ByName("hash"))
	if err != nil {
		writeError(w, err.Error(), http.StatusBadRequest)
		return
	}

	// Try the hash as a block id.
	block, height, exists := srv.explorer.Block(types.BlockID(hash))
	if exists {
		writeJSON(w, ExplorerHashGET{
			HashType: "blockid",
			Block:    srv.buildExplorerBlock(height, block),
		})
		return
	}

	// Try the hash as a transaction id.
	block, height, exists = srv.explorer.Transaction(types.TransactionID(hash))
	if exists {
		var txn types.Transaction
		for _, t := range block.Transactions {
			if t.ID() == types.TransactionID(hash) {
				txn = t
			}
		}
		writeJSON(w, ExplorerHashGET{
			HashType:    "transactionid",
			Transaction: srv.buildExplorerTransaction(height, block.ID(), txn),
		})
		return
	}

	// Try the hash as a siacoin output id.
	txids := srv.explorer.SiacoinOutputID(types.SiacoinOutputID(hash))
	if len(txids) != 0 {
		txns, blocks := srv.buildTransactionSet(txids)
		writeJSON(w, ExplorerHashGET{
			HashType:     "siacoinoutputid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as a file contract id.
	txids = srv.explorer.FileContractID(types.FileContractID(hash))
	if len(txids) != 0 {
		txns, blocks := srv.buildTransactionSet(txids)
		writeJSON(w, ExplorerHashGET{
			HashType:     "filecontractid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as a siafund output id.
	txids = srv.explorer.SiafundOutputID(types.SiafundOutputID(hash))
	if len(txids) != 0 {
		txns, blocks := srv.buildTransactionSet(txids)
		writeJSON(w, ExplorerHashGET{
			HashType:     "siafundoutputid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as an unlock hash. Unlock hash is checked last because
	// unlock hashes do not have collision-free guarantees. Someone can create
	// an unlock hash that collides with another object id. They will not be
	// able to use the unlock hash, but they can disrupt the explorer. This is
	// handled by checking the unlock hash last. Anyone intentionally creating
	// a colliding unlock hash (such a collision can only happen if done
	// intentionally) will be unable to find their unlock hash in the
	// blockchain through the explorer hash lookup.
	txids = srv.explorer.UnlockHash(types.UnlockHash(hash))
	if len(txids) != 0 {
		txns, blocks := srv.buildTransactionSet(txids)
		writeJSON(w, ExplorerHashGET{
			HashType:     "unlockhash",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Hash not found, return an error.
	writeError(w, "unrecognized hash used as input to /explorer/hash", http.StatusBadRequest)
}
示例#5
0
文件: addblock.go 项目: zoutaiqi/Sia
// addBlockDB parses a block and adds it to the database
func (e *Explorer) addBlockDB(b types.Block) error {
	// Special case for the genesis block, which does not have a
	// valid parent, and for testing, as tests will not always use
	// blocks in consensus
	var blocktarget types.Target
	if b.ID() == e.genesisBlockID {
		blocktarget = types.RootDepth
		e.blockchainHeight = 0
	} else {
		var exists bool
		blocktarget, exists = e.cs.ChildTarget(b.ParentID)
		if build.DEBUG {
			if build.Release == "testing" {
				blocktarget = types.RootDepth
			} else if !exists {
				panic("Applied block not in consensus")
			}

		}
	}

	// Check if the block exsts.
	var exists bool
	dbErr := e.db.View(func(tx *bolt.Tx) error {
		id := b.ID()
		block := tx.Bucket([]byte("Blocks")).Get(id[:])
		exists = block != nil
		return nil
	})
	if dbErr != nil {
		return dbErr
	}
	if exists {
		return nil
	}

	tx, err := newBoltTx(e.db)
	if err != nil {
		return err
	}
	defer tx.Rollback()

	// Construct the struct that will be inside the heights map
	blockStruct := blockData{
		Block:  b,
		Height: e.blockchainHeight,
	}

	tx.addNewHash("Blocks", hashBlock, crypto.Hash(b.ID()), blockStruct)

	bSum := modules.ExplorerBlockData{
		ID:        b.ID(),
		Timestamp: b.Timestamp,
		Target:    blocktarget,
		Size:      uint64(len(encoding.Marshal(b))),
	}

	tx.putObject("Heights", e.blockchainHeight, bSum)
	tx.putObject("Hashes", crypto.Hash(b.ID()), hashBlock)

	// Insert the miner payouts as new outputs
	for i, payout := range b.MinerPayouts {
		tx.addAddress(payout.UnlockHash, types.TransactionID(b.ID()))
		tx.addNewOutput(b.MinerPayoutID(uint64(i)), types.TransactionID(b.ID()))
	}

	// Insert each transaction
	for i, txn := range b.Transactions {
		tx.addNewHash("Transactions", hashTransaction, crypto.Hash(txn.ID()), txInfo{b.ID(), i})
		tx.addTransaction(txn)
	}

	return tx.commit()
}
示例#6
0
// ProcessConsensusChange parses a consensus change to update the set of
// confirmed outputs known to the wallet.
func (w *Wallet) ProcessConsensusChange(cc modules.ConsensusChange) {
	// There are two different situations under which a subscribee calls
	// ProcessConsensusChange. The first is when w.subscribed is set to false
	// AND the mutex is already locked. The other situation is that subscribed
	// is set to true and is not going to be changed. Therefore there is no
	// race condition here. If w.subscribed is set to false, trying to grab the
	// lock would cause a deadlock.
	if w.subscribed {
		lockID := w.mu.Lock()
		defer w.mu.Unlock(lockID)
	}

	// Iterate through the output diffs (siacoin and siafund) and apply all of
	// them. Only apply the outputs that relate to unlock hashes we understand.
	for _, diff := range cc.SiacoinOutputDiffs {
		// Verify that the diff is relevant to the wallet.
		_, exists := w.keys[diff.SiacoinOutput.UnlockHash]
		if !exists {
			continue
		}

		_, exists = w.siacoinOutputs[diff.ID]
		if diff.Direction == modules.DiffApply {
			if exists && build.DEBUG {
				panic("adding an existing output to wallet")
			}
			w.siacoinOutputs[diff.ID] = diff.SiacoinOutput
		} else {
			if !exists && build.DEBUG {
				panic("deleting nonexisting output from wallet")
			}
			delete(w.siacoinOutputs, diff.ID)
		}
	}
	for _, diff := range cc.SiafundOutputDiffs {
		// Verify that the diff is relevant to the wallet.
		_, exists := w.keys[diff.SiafundOutput.UnlockHash]
		if !exists {
			continue
		}

		_, exists = w.siafundOutputs[diff.ID]
		if diff.Direction == modules.DiffApply {
			if exists && build.DEBUG {
				panic("adding an existing output to wallet")
			}
			w.siafundOutputs[diff.ID] = diff.SiafundOutput
		} else {
			if !exists && build.DEBUG {
				panic("deleting nonexisting output from wallet")
			}
			delete(w.siafundOutputs, diff.ID)
		}
	}
	for _, diff := range cc.SiafundPoolDiffs {
		if diff.Direction == modules.DiffApply {
			w.siafundPool = diff.Adjusted
		} else {
			w.siafundPool = diff.Previous
		}
	}

	// Iterate through the transactions and find every transaction somehow
	// related to the wallet. Wallet transactions must be removed in the same
	// order they were added.
	for _, block := range cc.RevertedBlocks {
		// Remove any transactions that have been reverted.
		for i := len(block.Transactions) - 1; i >= 0; i-- {
			// If the transaction is relevant to the wallet, it will be the
			// most recent transaction appended to w.processedTransactions.
			// Relevance can be determined just by looking at the last element
			// of w.processedTransactions.
			txn := block.Transactions[i]
			txid := txn.ID()
			if len(w.processedTransactions) > 0 && txid == w.processedTransactions[len(w.processedTransactions)-1].TransactionID {
				w.processedTransactions = w.processedTransactions[:len(w.processedTransactions)-1]
				delete(w.processedTransactionMap, txid)
			}
		}

		// Remove the miner payout transaction if applicable.
		for _, mp := range block.MinerPayouts {
			_, exists := w.keys[mp.UnlockHash]
			if exists {
				w.processedTransactions = w.processedTransactions[:len(w.processedTransactions)-1]
				delete(w.processedTransactionMap, types.TransactionID(block.ID()))
				break
			}
		}
		w.consensusSetHeight--
	}

	// Apply all of the new blocks.
	for _, block := range cc.AppliedBlocks {
		w.consensusSetHeight++
		// Apply the miner payout transaction if applicable.
		minerPT := modules.ProcessedTransaction{
			Transaction:           types.Transaction{},
			TransactionID:         types.TransactionID(block.ID()),
			ConfirmationHeight:    w.consensusSetHeight,
			ConfirmationTimestamp: block.Timestamp,
		}
		relevant := false
		for i, mp := range block.MinerPayouts {
			_, exists := w.keys[mp.UnlockHash]
			if exists {
				relevant = true
			}
			minerPT.Outputs = append(minerPT.Outputs, modules.ProcessedOutput{
				FundType:       types.SpecifierMinerPayout,
				MaturityHeight: w.consensusSetHeight + types.MaturityDelay,
				WalletAddress:  exists,
				RelatedAddress: mp.UnlockHash,
				Value:          mp.Value,
			})
			w.historicOutputs[types.OutputID(block.MinerPayoutID(uint64(i)))] = mp.Value
		}
		if relevant {
			w.processedTransactions = append(w.processedTransactions, minerPT)
			w.processedTransactionMap[minerPT.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]
		}
		for _, txn := range block.Transactions {
			relevant := false
			pt := modules.ProcessedTransaction{
				Transaction:           txn,
				TransactionID:         txn.ID(),
				ConfirmationHeight:    w.consensusSetHeight,
				ConfirmationTimestamp: block.Timestamp,
			}
			for _, sci := range txn.SiacoinInputs {
				_, exists := w.keys[sci.UnlockConditions.UnlockHash()]
				if exists {
					relevant = true
				}
				pt.Inputs = append(pt.Inputs, modules.ProcessedInput{
					FundType:       types.SpecifierSiacoinInput,
					WalletAddress:  exists,
					RelatedAddress: sci.UnlockConditions.UnlockHash(),
					Value:          w.historicOutputs[types.OutputID(sci.ParentID)],
				})
			}
			for i, sco := range txn.SiacoinOutputs {
				_, exists := w.keys[sco.UnlockHash]
				if exists {
					relevant = true
				}
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierSiacoinOutput,
					MaturityHeight: w.consensusSetHeight,
					WalletAddress:  exists,
					RelatedAddress: sco.UnlockHash,
					Value:          sco.Value,
				})
				w.historicOutputs[types.OutputID(txn.SiacoinOutputID(i))] = sco.Value
			}
			for _, sfi := range txn.SiafundInputs {
				_, exists := w.keys[sfi.UnlockConditions.UnlockHash()]
				if exists {
					relevant = true
				}
				sfiValue := w.historicOutputs[types.OutputID(sfi.ParentID)]
				pt.Inputs = append(pt.Inputs, modules.ProcessedInput{
					FundType:       types.SpecifierSiafundInput,
					WalletAddress:  exists,
					RelatedAddress: sfi.UnlockConditions.UnlockHash(),
					Value:          sfiValue,
				})
				claimValue := w.siafundPool.Sub(w.historicClaimStarts[sfi.ParentID]).Mul(sfiValue)
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierClaimOutput,
					MaturityHeight: w.consensusSetHeight + types.MaturityDelay,
					WalletAddress:  exists,
					RelatedAddress: sfi.ClaimUnlockHash,
					Value:          claimValue,
				})
			}
			for i, sfo := range txn.SiafundOutputs {
				_, exists := w.keys[sfo.UnlockHash]
				if exists {
					relevant = true
				}
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierSiafundOutput,
					MaturityHeight: w.consensusSetHeight,
					WalletAddress:  exists,
					RelatedAddress: sfo.UnlockHash,
					Value:          sfo.Value,
				})
				w.historicOutputs[types.OutputID(txn.SiafundOutputID(i))] = sfo.Value
				w.historicClaimStarts[txn.SiafundOutputID(i)] = sfo.ClaimStart
			}
			for _, fee := range txn.MinerFees {
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType: types.SpecifierMinerFee,
					Value:    fee,
				})
			}
			if relevant {
				w.processedTransactions = append(w.processedTransactions, pt)
				w.processedTransactionMap[pt.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]
			}
		}
	}
}
示例#7
0
// explorerHashHandler handles GET requests to /explorer/hash/:hash.
func (api *API) explorerHashHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
	// Scan the hash as a hash. If that fails, try scanning the hash as an
	// address.
	hash, err := scanHash(ps.ByName("hash"))
	if err != nil {
		addr, err := scanAddress(ps.ByName("hash"))
		if err != nil {
			WriteError(w, Error{err.Error()}, http.StatusBadRequest)
			return
		}
		hash = crypto.Hash(addr)
	}

	// TODO: lookups on the zero hash are too expensive to allow. Need a
	// better way to handle this case.
	if hash == (crypto.Hash{}) {
		WriteError(w, Error{"can't lookup the empty unlock hash"}, http.StatusBadRequest)
		return
	}

	// Try the hash as a block id.
	block, height, exists := api.explorer.Block(types.BlockID(hash))
	if exists {
		WriteJSON(w, ExplorerHashGET{
			HashType: "blockid",
			Block:    api.buildExplorerBlock(height, block),
		})
		return
	}

	// Try the hash as a transaction id.
	block, height, exists = api.explorer.Transaction(types.TransactionID(hash))
	if exists {
		var txn types.Transaction
		for _, t := range block.Transactions {
			if t.ID() == types.TransactionID(hash) {
				txn = t
			}
		}
		WriteJSON(w, ExplorerHashGET{
			HashType:    "transactionid",
			Transaction: api.buildExplorerTransaction(height, block.ID(), txn),
		})
		return
	}

	// Try the hash as a siacoin output id.
	txids := api.explorer.SiacoinOutputID(types.SiacoinOutputID(hash))
	if len(txids) != 0 {
		txns, blocks := api.buildTransactionSet(txids)
		WriteJSON(w, ExplorerHashGET{
			HashType:     "siacoinoutputid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as a file contract id.
	txids = api.explorer.FileContractID(types.FileContractID(hash))
	if len(txids) != 0 {
		txns, blocks := api.buildTransactionSet(txids)
		WriteJSON(w, ExplorerHashGET{
			HashType:     "filecontractid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as a siafund output id.
	txids = api.explorer.SiafundOutputID(types.SiafundOutputID(hash))
	if len(txids) != 0 {
		txns, blocks := api.buildTransactionSet(txids)
		WriteJSON(w, ExplorerHashGET{
			HashType:     "siafundoutputid",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Try the hash as an unlock hash. Unlock hash is checked last because
	// unlock hashes do not have collision-free guarantees. Someone can create
	// an unlock hash that collides with another object id. They will not be
	// able to use the unlock hash, but they can disrupt the explorer. This is
	// handled by checking the unlock hash last. Anyone intentionally creating
	// a colliding unlock hash (such a collision can only happen if done
	// intentionally) will be unable to find their unlock hash in the
	// blockchain through the explorer hash lookup.
	txids = api.explorer.UnlockHash(types.UnlockHash(hash))
	if len(txids) != 0 {
		txns, blocks := api.buildTransactionSet(txids)
		WriteJSON(w, ExplorerHashGET{
			HashType:     "unlockhash",
			Blocks:       blocks,
			Transactions: txns,
		})
		return
	}

	// Hash not found, return an error.
	WriteError(w, Error{"unrecognized hash used as input to /explorer/hash"}, http.StatusBadRequest)
}
示例#8
0
文件: update.go 项目: CSSZiegler/Sia
// ProcessConsensusChange follows the most recent changes to the consensus set,
// including parsing new blocks and updating the utxo sets.
func (e *Explorer) ProcessConsensusChange(cc modules.ConsensusChange) {
	if len(cc.AppliedBlocks) == 0 {
		build.Critical("Explorer.ProcessConsensusChange called with a ConsensusChange that has no AppliedBlocks")
	}

	err := e.db.Update(func(tx *bolt.Tx) (err error) {
		// use exception-style error handling to enable more concise update code
		defer func() {
			if r := recover(); r != nil {
				err = fmt.Errorf("%v", r)
			}
		}()

		// get starting block height
		var blockheight types.BlockHeight
		err = dbGetInternal(internalBlockHeight, &blockheight)(tx)
		if err != nil {
			return err
		}

		// Update cumulative stats for reverted blocks.
		for _, block := range cc.RevertedBlocks {
			bid := block.ID()
			tbid := types.TransactionID(bid)

			blockheight--
			dbRemoveBlockID(tx, bid)
			dbRemoveTransactionID(tx, tbid) // Miner payouts are a transaction

			target, exists := e.cs.ChildTarget(block.ParentID)
			if !exists {
				target = types.RootTarget
			}
			dbRemoveBlockTarget(tx, bid, target)

			// Remove miner payouts
			for j, payout := range block.MinerPayouts {
				scoid := block.MinerPayoutID(uint64(j))
				dbRemoveSiacoinOutputID(tx, scoid, tbid)
				dbRemoveUnlockHash(tx, payout.UnlockHash, tbid)
			}

			// Remove transactions
			for _, txn := range block.Transactions {
				txid := txn.ID()
				dbRemoveTransactionID(tx, txid)

				for _, sci := range txn.SiacoinInputs {
					dbRemoveSiacoinOutputID(tx, sci.ParentID, txid)
					dbRemoveUnlockHash(tx, sci.UnlockConditions.UnlockHash(), txid)
				}
				for k, sco := range txn.SiacoinOutputs {
					scoid := txn.SiacoinOutputID(uint64(k))
					dbRemoveSiacoinOutputID(tx, scoid, txid)
					dbRemoveUnlockHash(tx, sco.UnlockHash, txid)
					dbRemoveSiacoinOutput(tx, scoid)
				}
				for k, fc := range txn.FileContracts {
					fcid := txn.FileContractID(uint64(k))
					dbRemoveFileContractID(tx, fcid, txid)
					dbRemoveUnlockHash(tx, fc.UnlockHash, txid)
					for l, sco := range fc.ValidProofOutputs {
						scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
						dbRemoveSiacoinOutputID(tx, scoid, txid)
						dbRemoveUnlockHash(tx, sco.UnlockHash, txid)
					}
					for l, sco := range fc.MissedProofOutputs {
						scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
						dbRemoveSiacoinOutputID(tx, scoid, txid)
						dbRemoveUnlockHash(tx, sco.UnlockHash, txid)
					}
					dbRemoveFileContract(tx, fcid)
				}
				for _, fcr := range txn.FileContractRevisions {
					dbRemoveFileContractID(tx, fcr.ParentID, txid)
					dbRemoveUnlockHash(tx, fcr.UnlockConditions.UnlockHash(), txid)
					dbRemoveUnlockHash(tx, fcr.NewUnlockHash, txid)
					for l, sco := range fcr.NewValidProofOutputs {
						scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
						dbRemoveSiacoinOutputID(tx, scoid, txid)
						dbRemoveUnlockHash(tx, sco.UnlockHash, txid)
					}
					for l, sco := range fcr.NewMissedProofOutputs {
						scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
						dbRemoveSiacoinOutputID(tx, scoid, txid)
						dbRemoveUnlockHash(tx, sco.UnlockHash, txid)
					}
					// Remove the file contract revision from the revision chain.
					dbRemoveFileContractRevision(tx, fcr.ParentID)
				}
				for _, sp := range txn.StorageProofs {
					dbRemoveStorageProof(tx, sp.ParentID)
				}
				for _, sfi := range txn.SiafundInputs {
					dbRemoveSiafundOutputID(tx, sfi.ParentID, txid)
					dbRemoveUnlockHash(tx, sfi.UnlockConditions.UnlockHash(), txid)
					dbRemoveUnlockHash(tx, sfi.ClaimUnlockHash, txid)
				}
				for k, sfo := range txn.SiafundOutputs {
					sfoid := txn.SiafundOutputID(uint64(k))
					dbRemoveSiafundOutputID(tx, sfoid, txid)
					dbRemoveUnlockHash(tx, sfo.UnlockHash, txid)
				}
			}

			// remove the associated block facts
			dbRemoveBlockFacts(tx, bid)
		}

		// Update cumulative stats for applied blocks.
		for _, block := range cc.AppliedBlocks {
			bid := block.ID()
			tbid := types.TransactionID(bid)

			// special handling for genesis block
			if bid == types.GenesisID {
				dbAddGenesisBlock(tx)
				continue
			}

			blockheight++
			dbAddBlockID(tx, bid, blockheight)
			dbAddTransactionID(tx, tbid, blockheight) // Miner payouts are a transaction

			target, exists := e.cs.ChildTarget(block.ParentID)
			if !exists {
				target = types.RootTarget
			}
			dbAddBlockTarget(tx, bid, target)

			// Catalog the new miner payouts.
			for j, payout := range block.MinerPayouts {
				scoid := block.MinerPayoutID(uint64(j))
				dbAddSiacoinOutputID(tx, scoid, tbid)
				dbAddUnlockHash(tx, payout.UnlockHash, tbid)
			}

			// Update cumulative stats for applied transactions.
			for _, txn := range block.Transactions {
				// Add the transaction to the list of active transactions.
				txid := txn.ID()
				dbAddTransactionID(tx, txid, blockheight)

				for _, sci := range txn.SiacoinInputs {
					dbAddSiacoinOutputID(tx, sci.ParentID, txid)
					dbAddUnlockHash(tx, sci.UnlockConditions.UnlockHash(), txid)
				}
				for j, sco := range txn.SiacoinOutputs {
					scoid := txn.SiacoinOutputID(uint64(j))
					dbAddSiacoinOutputID(tx, scoid, txid)
					dbAddUnlockHash(tx, sco.UnlockHash, txid)
					dbAddSiacoinOutput(tx, scoid, sco)
				}
				for k, fc := range txn.FileContracts {
					fcid := txn.FileContractID(uint64(k))
					dbAddFileContractID(tx, fcid, txid)
					dbAddUnlockHash(tx, fc.UnlockHash, txid)
					dbAddFileContract(tx, fcid, fc)
					for l, sco := range fc.ValidProofOutputs {
						scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
						dbAddSiacoinOutputID(tx, scoid, txid)
						dbAddUnlockHash(tx, sco.UnlockHash, txid)
					}
					for l, sco := range fc.MissedProofOutputs {
						scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
						dbAddSiacoinOutputID(tx, scoid, txid)
						dbAddUnlockHash(tx, sco.UnlockHash, txid)
					}
				}
				for _, fcr := range txn.FileContractRevisions {
					dbAddFileContractID(tx, fcr.ParentID, txid)
					dbAddUnlockHash(tx, fcr.UnlockConditions.UnlockHash(), txid)
					dbAddUnlockHash(tx, fcr.NewUnlockHash, txid)
					for l, sco := range fcr.NewValidProofOutputs {
						scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
						dbAddSiacoinOutputID(tx, scoid, txid)
						dbAddUnlockHash(tx, sco.UnlockHash, txid)
					}
					for l, sco := range fcr.NewMissedProofOutputs {
						scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
						dbAddSiacoinOutputID(tx, scoid, txid)
						dbAddUnlockHash(tx, sco.UnlockHash, txid)
					}
					dbAddFileContractRevision(tx, fcr.ParentID, fcr)
				}
				for _, sp := range txn.StorageProofs {
					dbAddFileContractID(tx, sp.ParentID, txid)
					dbAddStorageProof(tx, sp.ParentID, sp)
				}
				for _, sfi := range txn.SiafundInputs {
					dbAddSiafundOutputID(tx, sfi.ParentID, txid)
					dbAddUnlockHash(tx, sfi.UnlockConditions.UnlockHash(), txid)
					dbAddUnlockHash(tx, sfi.ClaimUnlockHash, txid)
				}
				for k, sfo := range txn.SiafundOutputs {
					sfoid := txn.SiafundOutputID(uint64(k))
					dbAddSiafundOutputID(tx, sfoid, txid)
					dbAddUnlockHash(tx, sfo.UnlockHash, txid)
					dbAddSiafundOutput(tx, sfoid, sfo)
				}
			}

			// calculate and add new block facts, if possible
			if tx.Bucket(bucketBlockFacts).Get(encoding.Marshal(block.ParentID)) != nil {
				facts := dbCalculateBlockFacts(tx, e.cs, block)
				dbAddBlockFacts(tx, facts)
			}
		}

		// Compute the changes in the active set. Note, because this is calculated
		// at the end instead of in a loop, the historic facts may contain
		// inaccuracies about the active set. This should not be a problem except
		// for large reorgs.
		// TODO: improve this
		currentBlock, exists := e.cs.BlockAtHeight(blockheight)
		if !exists {
			build.Critical("consensus is missing block", blockheight)
		}
		currentID := currentBlock.ID()
		var facts blockFacts
		err = dbGetAndDecode(bucketBlockFacts, currentID, &facts)(tx)
		if err == nil {
			for _, diff := range cc.FileContractDiffs {
				if diff.Direction == modules.DiffApply {
					facts.ActiveContractCount++
					facts.ActiveContractCost = facts.ActiveContractCost.Add(diff.FileContract.Payout)
					facts.ActiveContractSize = facts.ActiveContractSize.Add(types.NewCurrency64(diff.FileContract.FileSize))
				} else {
					facts.ActiveContractCount--
					facts.ActiveContractCost = facts.ActiveContractCost.Sub(diff.FileContract.Payout)
					facts.ActiveContractSize = facts.ActiveContractSize.Sub(types.NewCurrency64(diff.FileContract.FileSize))
				}
			}
			err = tx.Bucket(bucketBlockFacts).Put(encoding.Marshal(currentID), encoding.Marshal(facts))
			if err != nil {
				return err
			}
		}

		// set final blockheight
		err = dbSetInternal(internalBlockHeight, blockheight)(tx)
		if err != nil {
			return err
		}

		// set change ID
		err = dbSetInternal(internalRecentChange, cc.ID)(tx)
		if err != nil {
			return err
		}

		return nil
	})
	if err != nil {
		build.Critical("explorer update failed:", err)
	}
}
示例#9
0
文件: update.go 项目: cfromknecht/Sia
// ProcessConsensusChange follows the most recent changes to the consensus set,
// including parsing new blocks and updating the utxo sets.
func (e *Explorer) ProcessConsensusChange(cc modules.ConsensusChange) {
	e.mu.Lock()
	defer e.mu.Unlock()

	// Update cumulative stats for reverted blocks.
	for _, block := range cc.RevertedBlocks {
		bid := block.ID()
		tbid := types.TransactionID(bid)

		// Update all of the explorer statistics.
		e.currentBlock = block.ID()
		e.blockchainHeight -= 1
		e.target = e.blockTargets[block.ID()]
		e.timestamp = block.Timestamp
		if e.blockchainHeight > types.MaturityDelay {
			e.maturityTimestamp = e.historicFacts[e.blockchainHeight-types.MaturityDelay].timestamp
		}
		e.blocksDifficulty = e.blocksDifficulty.SubtractDifficulties(e.target)
		if e.blockchainHeight > hashrateEstimationBlocks {
			e.blocksDifficulty = e.blocksDifficulty.AddDifficulties(e.historicFacts[e.blockchainHeight-hashrateEstimationBlocks].target)
			secondsPassed := e.timestamp - e.historicFacts[e.blockchainHeight-hashrateEstimationBlocks].timestamp
			e.estimatedHashrate = e.blocksDifficulty.Difficulty().Div(types.NewCurrency64(uint64(secondsPassed)))
		}
		e.totalCoins = types.CalculateNumSiacoins(e.blockchainHeight)

		// Delete the block from the list of active blocks.
		delete(e.blockHashes, bid)
		delete(e.transactionHashes, tbid) // Miner payouts are a transaction.

		// Catalog the removed miner payouts.
		for j, payout := range block.MinerPayouts {
			scoid := block.MinerPayoutID(uint64(j))
			delete(e.siacoinOutputIDs[scoid], tbid)
			delete(e.unlockHashes[payout.UnlockHash], tbid)
			e.minerPayoutCount--
		}

		// Update cumulative stats for reverted transcations.
		for _, txn := range block.Transactions {
			txid := txn.ID()
			e.transactionCount--
			delete(e.transactionHashes, txid)

			for _, sci := range txn.SiacoinInputs {
				delete(e.siacoinOutputIDs[sci.ParentID], txid)
				delete(e.unlockHashes[sci.UnlockConditions.UnlockHash()], txid)
				e.siacoinInputCount--
			}
			for k, sco := range txn.SiacoinOutputs {
				delete(e.siacoinOutputIDs[txn.SiacoinOutputID(uint64(k))], txid)
				delete(e.unlockHashes[sco.UnlockHash], txid)
				e.siacoinOutputCount--
			}
			for k, fc := range txn.FileContracts {
				fcid := txn.FileContractID(uint64(k))
				delete(e.fileContractIDs[fcid], txid)
				delete(e.unlockHashes[fc.UnlockHash], txid)
				for l, sco := range fc.ValidProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				for l, sco := range fc.MissedProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				e.fileContractCount--
				e.totalContractCost = e.totalContractCost.Sub(fc.Payout)
				e.totalContractSize = e.totalContractSize.Sub(types.NewCurrency64(fc.FileSize))
			}
			for _, fcr := range txn.FileContractRevisions {
				delete(e.fileContractIDs[fcr.ParentID], txid)
				delete(e.unlockHashes[fcr.UnlockConditions.UnlockHash()], txid)
				delete(e.unlockHashes[fcr.NewUnlockHash], txid)
				// Remove the file contract revision from the revision chain.
				e.fileContractHistories[fcr.ParentID].revisions = e.fileContractHistories[fcr.ParentID].revisions[:len(e.fileContractHistories[fcr.ParentID].revisions)-1]
				for l, sco := range fcr.NewValidProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				for l, sco := range fcr.NewMissedProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
					delete(e.siacoinOutputIDs[scoid], txid)
					delete(e.unlockHashes[sco.UnlockHash], txid)
				}
				e.fileContractRevisionCount--
				e.totalContractSize = e.totalContractSize.Sub(types.NewCurrency64(fcr.NewFileSize))
				e.totalRevisionVolume = e.totalRevisionVolume.Sub(types.NewCurrency64(fcr.NewFileSize))
			}
			for _, sp := range txn.StorageProofs {
				delete(e.fileContractIDs[sp.ParentID], txid)
				e.storageProofCount--
			}
			for _, sfi := range txn.SiafundInputs {
				delete(e.siafundOutputIDs[sfi.ParentID], txid)
				delete(e.unlockHashes[sfi.UnlockConditions.UnlockHash()], txid)
				delete(e.unlockHashes[sfi.ClaimUnlockHash], txid)
				e.siafundInputCount--
			}
			for k, sfo := range txn.SiafundOutputs {
				sfoid := txn.SiafundOutputID(uint64(k))
				delete(e.siafundOutputIDs[sfoid], txid)
				delete(e.unlockHashes[sfo.UnlockHash], txid)
				e.siafundOutputCount--
			}
			for _ = range txn.MinerFees {
				e.minerFeeCount--
			}
			for _ = range txn.ArbitraryData {
				e.arbitraryDataCount--
			}
			for _ = range txn.TransactionSignatures {
				e.transactionSignatureCount--
			}
		}
	}
	// Delete all of the block facts for the reverted blocks.
	e.historicFacts = e.historicFacts[:len(e.historicFacts)-len(cc.RevertedBlocks)]

	// Update cumulative stats for applied blocks.
	for _, block := range cc.AppliedBlocks {
		// Add the block to the list of active blocks.
		bid := block.ID()
		tbid := types.TransactionID(bid)
		e.currentBlock = block.ID()
		e.blockchainHeight++
		var exists bool
		e.target, exists = e.cs.ChildTarget(block.ParentID)
		if !exists {
			e.target = types.RootTarget
		}
		e.timestamp = block.Timestamp
		if e.blockchainHeight > types.MaturityDelay {
			e.maturityTimestamp = e.historicFacts[e.blockchainHeight-types.MaturityDelay].timestamp
		}
		e.blocksDifficulty = e.blocksDifficulty.AddDifficulties(e.target)
		if e.blockchainHeight > hashrateEstimationBlocks {
			e.blocksDifficulty = e.blocksDifficulty.SubtractDifficulties(e.historicFacts[e.blockchainHeight-hashrateEstimationBlocks].target)
			secondsPassed := e.timestamp - e.historicFacts[e.blockchainHeight-hashrateEstimationBlocks].timestamp
			e.estimatedHashrate = e.blocksDifficulty.Difficulty().Div(types.NewCurrency64(uint64(secondsPassed)))
		}
		e.totalCoins = types.CalculateNumSiacoins(e.blockchainHeight)

		e.blockHashes[bid] = e.blockchainHeight
		e.transactionHashes[tbid] = e.blockchainHeight // Miner payouts are a transaciton.
		e.blockTargets[bid] = e.target

		// Catalog the new miner payouts.
		for j, payout := range block.MinerPayouts {
			scoid := block.MinerPayoutID(uint64(j))
			_, exists := e.siacoinOutputIDs[scoid]
			if !exists {
				e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
			}
			e.siacoinOutputIDs[scoid][tbid] = struct{}{}
			_, exists = e.unlockHashes[payout.UnlockHash]
			if !exists {
				e.unlockHashes[payout.UnlockHash] = make(map[types.TransactionID]struct{})
			}
			e.unlockHashes[payout.UnlockHash][tbid] = struct{}{}
			e.minerPayoutCount++
		}

		// Update cumulative stats for applied transactions.
		for _, txn := range block.Transactions {
			// Add the transaction to the list of active transactions.
			txid := txn.ID()
			e.transactionCount++
			e.transactionHashes[txid] = e.blockchainHeight

			for _, sci := range txn.SiacoinInputs {
				_, exists := e.siacoinOutputIDs[sci.ParentID]
				if build.DEBUG && !exists {
					panic("siacoin input without siacoin output")
				} else if !exists {
					e.siacoinOutputIDs[sci.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.siacoinOutputIDs[sci.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[sci.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without a parent unlock hash")
				} else if !exists {
					e.unlockHashes[sci.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sci.UnlockConditions.UnlockHash()][txid] = struct{}{}
				e.siacoinInputCount++
			}
			for j, sco := range txn.SiacoinOutputs {
				scoid := txn.SiacoinOutputID(uint64(j))
				_, exists := e.siacoinOutputIDs[scoid]
				if !exists {
					e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
				}
				e.siacoinOutputIDs[scoid][txid] = struct{}{}
				_, exists = e.unlockHashes[sco.UnlockHash]
				if !exists {
					e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sco.UnlockHash][txn.ID()] = struct{}{}
				e.siacoinOutputs[scoid] = sco
				e.siacoinOutputCount++
			}
			for k, fc := range txn.FileContracts {
				fcid := txn.FileContractID(uint64(k))
				_, exists := e.fileContractIDs[fcid]
				if !exists {
					e.fileContractIDs[fcid] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[fcid][txid] = struct{}{}
				_, exists = e.unlockHashes[fc.UnlockHash]
				if !exists {
					e.unlockHashes[fc.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fc.UnlockHash][txid] = struct{}{}
				e.fileContractHistories[fcid] = &fileContractHistory{contract: fc}
				for l, sco := range fc.ValidProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				for l, sco := range fc.MissedProofOutputs {
					scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				e.fileContractCount++
				e.totalContractCost = e.totalContractCost.Add(fc.Payout)
				e.totalContractSize = e.totalContractSize.Add(types.NewCurrency64(fc.FileSize))
			}
			for _, fcr := range txn.FileContractRevisions {
				_, exists := e.fileContractIDs[fcr.ParentID]
				if build.DEBUG && !exists {
					panic("revision without entry in file contract list")
				} else if !exists {
					e.fileContractIDs[fcr.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[fcr.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[fcr.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without unlock hash")
				} else if !exists {
					e.unlockHashes[fcr.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fcr.UnlockConditions.UnlockHash()][txid] = struct{}{}
				_, exists = e.unlockHashes[fcr.NewUnlockHash]
				if !exists {
					e.unlockHashes[fcr.NewUnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[fcr.NewUnlockHash][txid] = struct{}{}
				for l, sco := range fcr.NewValidProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				for l, sco := range fcr.NewMissedProofOutputs {
					scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l))
					_, exists = e.siacoinOutputIDs[scoid]
					if !exists {
						e.siacoinOutputIDs[scoid] = make(map[types.TransactionID]struct{})
					}
					e.siacoinOutputIDs[scoid][txid] = struct{}{}
					_, exists = e.unlockHashes[sco.UnlockHash]
					if !exists {
						e.unlockHashes[sco.UnlockHash] = make(map[types.TransactionID]struct{})
					}
					e.unlockHashes[sco.UnlockHash][txid] = struct{}{}
				}
				e.fileContractRevisionCount++
				e.totalContractSize = e.totalContractSize.Add(types.NewCurrency64(fcr.NewFileSize))
				e.totalRevisionVolume = e.totalRevisionVolume.Add(types.NewCurrency64(fcr.NewFileSize))
				e.fileContractHistories[fcr.ParentID].revisions = append(e.fileContractHistories[fcr.ParentID].revisions, fcr)
			}
			for _, sp := range txn.StorageProofs {
				_, exists := e.fileContractIDs[sp.ParentID]
				if build.DEBUG && !exists {
					panic("storage proof without file contract parent")
				} else if !exists {
					e.fileContractIDs[sp.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.fileContractIDs[sp.ParentID][txid] = struct{}{}
				e.fileContractHistories[sp.ParentID].storageProof = sp
				e.storageProofCount++
			}
			for _, sfi := range txn.SiafundInputs {
				_, exists := e.siafundOutputIDs[sfi.ParentID]
				if build.DEBUG && !exists {
					panic("siafund input without corresponding output")
				} else if !exists {
					e.siafundOutputIDs[sfi.ParentID] = make(map[types.TransactionID]struct{})
				}
				e.siafundOutputIDs[sfi.ParentID][txid] = struct{}{}
				_, exists = e.unlockHashes[sfi.UnlockConditions.UnlockHash()]
				if build.DEBUG && !exists {
					panic("unlock conditions without unlock hash")
				} else if !exists {
					e.unlockHashes[sfi.UnlockConditions.UnlockHash()] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfi.UnlockConditions.UnlockHash()][txid] = struct{}{}
				_, exists = e.unlockHashes[sfi.ClaimUnlockHash]
				if !exists {
					e.unlockHashes[sfi.ClaimUnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfi.ClaimUnlockHash][txid] = struct{}{}
				e.siafundInputCount++
			}
			for k, sfo := range txn.SiafundOutputs {
				sfoid := txn.SiafundOutputID(uint64(k))
				_, exists := e.siafundOutputIDs[sfoid]
				if !exists {
					e.siafundOutputIDs[sfoid] = make(map[types.TransactionID]struct{})
				}
				e.siafundOutputIDs[sfoid][txid] = struct{}{}
				_, exists = e.unlockHashes[sfo.UnlockHash]
				if !exists {
					e.unlockHashes[sfo.UnlockHash] = make(map[types.TransactionID]struct{})
				}
				e.unlockHashes[sfo.UnlockHash][txid] = struct{}{}
				e.siafundOutputs[sfoid] = sfo
				e.siafundOutputCount++
			}
			for _ = range txn.MinerFees {
				e.minerFeeCount++
			}
			for _ = range txn.ArbitraryData {
				e.arbitraryDataCount++
			}
			for _ = range txn.TransactionSignatures {
				e.transactionSignatureCount++
			}
		}

		// Set the current block and copy over the historic facts.
		e.historicFacts = append(e.historicFacts, e.blockFacts)
	}

	// Compute the changes in the active set. Note, because this is calculated
	// at the end instead of in a loop, the historic facts may contain
	// inaccuracies about the active set. This should not be a problem except
	// for large reorgs.
	for _, diff := range cc.FileContractDiffs {
		if diff.Direction == modules.DiffApply {
			e.activeContractCount += 1
			e.activeContractCost = e.activeContractCost.Add(diff.FileContract.Payout)
			e.activeContractSize = e.activeContractSize.Add(types.NewCurrency64(diff.FileContract.FileSize))
		} else {
			e.activeContractCount -= 1
			e.activeContractCost = e.activeContractCost.Sub(diff.FileContract.Payout)
			e.activeContractSize = e.activeContractSize.Sub(types.NewCurrency64(diff.FileContract.FileSize))
		}
	}
}
示例#10
0
文件: update.go 项目: kustomzone/Sia
// applyHistory applies any transaction history that was introduced by the
// applied blocks.
func (w *Wallet) applyHistory(cc modules.ConsensusChange) {
	for _, block := range cc.AppliedBlocks {
		w.consensusSetHeight++
		// Apply the miner payout transaction if applicable.
		minerPT := modules.ProcessedTransaction{
			Transaction:           types.Transaction{},
			TransactionID:         types.TransactionID(block.ID()),
			ConfirmationHeight:    w.consensusSetHeight,
			ConfirmationTimestamp: block.Timestamp,
		}
		relevant := false
		for i, mp := range block.MinerPayouts {
			_, exists := w.keys[mp.UnlockHash]
			if exists {
				relevant = true
			}
			minerPT.Outputs = append(minerPT.Outputs, modules.ProcessedOutput{
				FundType:       types.SpecifierMinerPayout,
				MaturityHeight: w.consensusSetHeight + types.MaturityDelay,
				WalletAddress:  exists,
				RelatedAddress: mp.UnlockHash,
				Value:          mp.Value,
			})
			w.historicOutputs[types.OutputID(block.MinerPayoutID(uint64(i)))] = mp.Value
		}
		if relevant {
			w.processedTransactions = append(w.processedTransactions, minerPT)
			w.processedTransactionMap[minerPT.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]
		}
		for _, txn := range block.Transactions {
			relevant := false
			pt := modules.ProcessedTransaction{
				Transaction:           txn,
				TransactionID:         txn.ID(),
				ConfirmationHeight:    w.consensusSetHeight,
				ConfirmationTimestamp: block.Timestamp,
			}
			for _, sci := range txn.SiacoinInputs {
				_, exists := w.keys[sci.UnlockConditions.UnlockHash()]
				if exists {
					relevant = true
				}
				pt.Inputs = append(pt.Inputs, modules.ProcessedInput{
					FundType:       types.SpecifierSiacoinInput,
					WalletAddress:  exists,
					RelatedAddress: sci.UnlockConditions.UnlockHash(),
					Value:          w.historicOutputs[types.OutputID(sci.ParentID)],
				})
			}
			for i, sco := range txn.SiacoinOutputs {
				_, exists := w.keys[sco.UnlockHash]
				if exists {
					relevant = true
				}
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierSiacoinOutput,
					MaturityHeight: w.consensusSetHeight,
					WalletAddress:  exists,
					RelatedAddress: sco.UnlockHash,
					Value:          sco.Value,
				})
				w.historicOutputs[types.OutputID(txn.SiacoinOutputID(i))] = sco.Value
			}
			for _, sfi := range txn.SiafundInputs {
				_, exists := w.keys[sfi.UnlockConditions.UnlockHash()]
				if exists {
					relevant = true
				}
				sfiValue := w.historicOutputs[types.OutputID(sfi.ParentID)]
				pt.Inputs = append(pt.Inputs, modules.ProcessedInput{
					FundType:       types.SpecifierSiafundInput,
					WalletAddress:  exists,
					RelatedAddress: sfi.UnlockConditions.UnlockHash(),
					Value:          sfiValue,
				})
				claimValue := w.siafundPool.Sub(w.historicClaimStarts[sfi.ParentID]).Mul(sfiValue)
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierClaimOutput,
					MaturityHeight: w.consensusSetHeight + types.MaturityDelay,
					WalletAddress:  exists,
					RelatedAddress: sfi.ClaimUnlockHash,
					Value:          claimValue,
				})
			}
			for i, sfo := range txn.SiafundOutputs {
				_, exists := w.keys[sfo.UnlockHash]
				if exists {
					relevant = true
				}
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType:       types.SpecifierSiafundOutput,
					MaturityHeight: w.consensusSetHeight,
					WalletAddress:  exists,
					RelatedAddress: sfo.UnlockHash,
					Value:          sfo.Value,
				})
				w.historicOutputs[types.OutputID(txn.SiafundOutputID(i))] = sfo.Value
				w.historicClaimStarts[txn.SiafundOutputID(i)] = sfo.ClaimStart
			}
			for _, fee := range txn.MinerFees {
				pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{
					FundType: types.SpecifierMinerFee,
					Value:    fee,
				})
			}
			if relevant {
				w.processedTransactions = append(w.processedTransactions, pt)
				w.processedTransactionMap[pt.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]
			}
		}
	}
}