コード例 #1
0
ファイル: editor.go プロジェクト: robvanmieghem/Sia
// cachedMerkleRoot calculates the root of a set of existing Merkle roots.
func cachedMerkleRoot(roots []crypto.Hash) crypto.Hash {
	tree := crypto.NewCachedTree(sectorHeight) // NOTE: height is not strictly necessary here
	for _, h := range roots {
		tree.Push(h)
	}
	return tree.Root()
}
コード例 #2
0
// verifyRevision checks that the revision pays the host correctly, and that
// the revision does not attempt any malicious or unexpected changes.
func verifyRevision(so *storageObligation, revision types.FileContractRevision, blockHeight types.BlockHeight, newRevenue, newCollateral types.Currency) error {
	// Check that the revision is well-formed.
	if len(revision.NewValidProofOutputs) != 2 || len(revision.NewMissedProofOutputs) != 3 {
		return errInsaneFileContractRevisionOutputCounts
	}

	// Check that the time to finalize and submit the file contract revision
	// has not already passed.
	if so.expiration()-revisionSubmissionBuffer <= blockHeight {
		return errLateRevision
	}

	oldFCR := so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0]

	// Check that all non-volatile fields are the same.
	if oldFCR.ParentID != revision.ParentID {
		return errReviseBadParent
	}
	if oldFCR.UnlockConditions.UnlockHash() != revision.UnlockConditions.UnlockHash() {
		return errReviseBadUnlockConditions
	}
	if oldFCR.NewRevisionNumber >= revision.NewRevisionNumber {
		return errReviseBadRevisionNumber
	}
	if revision.NewFileSize != uint64(len(so.SectorRoots))*modules.SectorSize {
		return errReviseBadNewFileSize
	}
	if oldFCR.NewWindowStart != revision.NewWindowStart {
		return errReviseBadNewWindowStart
	}
	if oldFCR.NewWindowEnd != revision.NewWindowEnd {
		return errReviseBadNewWindowEnd
	}
	if oldFCR.NewUnlockHash != revision.NewUnlockHash {
		return errReviseBadUnlockHash
	}

	// The new revenue comes out of the renter's valid outputs.
	if revision.NewValidProofOutputs[0].Value.Add(newRevenue).Cmp(oldFCR.NewValidProofOutputs[0].Value) > 0 {
		return errReviseBadRenterValidOutput
	}
	// The new revenue goes into the host's valid outputs.
	if oldFCR.NewValidProofOutputs[1].Value.Add(newRevenue).Cmp(revision.NewValidProofOutputs[1].Value) < 0 {
		return errReviseBadHostValidOutput
	}
	// The new revenue comes out of the renter's missed outputs.
	if revision.NewMissedProofOutputs[0].Value.Add(newRevenue).Cmp(oldFCR.NewMissedProofOutputs[0].Value) > 0 {
		return errReviseBadRenterMissedOutput
	}
	// The new collateral comes out of the host's missed outputs.
	if revision.NewMissedProofOutputs[1].Value.Add(newCollateral).Cmp(oldFCR.NewMissedProofOutputs[1].Value) < 0 {
		return errReviseBadCollateralDeduction
	}

	// The Merkle root is checked last because it is the most expensive check.
	log2SectorSize := uint64(0)
	for 1<<log2SectorSize < (modules.SectorSize / crypto.SegmentSize) {
		log2SectorSize++
	}
	ct := crypto.NewCachedTree(log2SectorSize)
	for _, root := range so.SectorRoots {
		ct.Push(root)
	}
	expectedMerkleRoot := ct.Root()
	if revision.NewFileMerkleRoot != expectedMerkleRoot {
		return errReviseBadFileMerkleRoot
	}

	return nil
}
コード例 #3
0
// threadedHandleActionItem will look at a storage obligation and determine
// which action is necessary for the storage obligation to succeed.
func (h *Host) threadedHandleActionItem(soid types.FileContractID, wg *sync.WaitGroup) {
	// The calling thread is responsible for calling Add to the thread group.
	defer wg.Done()

	// Lock the storage obligation in question.
	h.managedLockStorageObligation(soid)
	defer func() {
		h.managedUnlockStorageObligation(soid)
	}()

	// Convert the storage obligation id into a storage obligation.
	var err error
	var so storageObligation
	h.mu.RLock()
	blockHeight := h.blockHeight
	err = h.db.View(func(tx *bolt.Tx) error {
		so, err = getStorageObligation(tx, soid)
		return err
	})
	h.mu.RUnlock()
	if err != nil {
		h.log.Println("Could not get storage obligation:", err)
		return
	}

	// Check whether the storage obligation has already been completed.
	if so.ObligationStatus != obligationUnresolved {
		// Storage obligation has already been completed, skip action item.
		return
	}

	// Check whether the file contract has been seen. If not, resubmit and
	// queue another action item. Check for death. (signature should have a
	// kill height)
	if !so.OriginConfirmed {
		// Submit the transaction set again, try to get the transaction
		// confirmed.
		err := h.tpool.AcceptTransactionSet(so.OriginTransactionSet)
		if err != nil {
			h.log.Debugln("Could not get origin transaction set accepted", err)

			// Check if the transaction is invalid with the current consensus set.
			// If so, the transaction is highly unlikely to ever be confirmed, and
			// the storage obligation should be removed. This check should come
			// after logging the errror so that the function can quit.
			//
			// TODO: If the host or tpool is behind consensus, might be difficult
			// to have certainty about the issue. If some but not all of the
			// parents are confirmed, might be some difficulty.
			_, t := err.(modules.ConsensusConflict)
			if t {
				h.log.Println("Consensus conflict on the origin transaction set, id", so.id())
				h.mu.Lock()
				err = h.removeStorageObligation(so, obligationRejected)
				h.mu.Unlock()
				if err != nil {
					h.log.Println("Error removing storage obligation:", err)
				}
				return
			}
		}

		// Queue another action item to check the status of the transaction.
		h.mu.Lock()
		err = h.queueActionItem(h.blockHeight+resubmissionTimeout, so.id())
		h.mu.Unlock()
		if err != nil {
			h.log.Println("Error queuing action item:", err)
		}
	}

	// Check if the file contract revision is ready for submission. Check for death.
	if !so.RevisionConfirmed && len(so.RevisionTransactionSet) > 0 && blockHeight > so.expiration()-revisionSubmissionBuffer {
		// Sanity check - there should be a file contract revision.
		rtsLen := len(so.RevisionTransactionSet)
		if rtsLen < 1 || len(so.RevisionTransactionSet[rtsLen-1].FileContractRevisions) != 1 {
			h.log.Critical("transaction revision marked as unconfirmed, yet there is no transaction revision")
			return
		}

		// Check if the revision has failed to submit correctly.
		if blockHeight > so.expiration() {
			// TODO: Check this error.
			//
			// TODO: this is not quite right, because a previous revision may
			// be confirmed, and the origin transaction may be confirmed, which
			// would confuse the revenue stuff a bit. Might happen frequently
			// due to the dynamic fee pool.
			h.log.Println("Full time has elapsed, but the revision transaction could not be submitted to consensus, id", so.id())
			h.mu.Lock()
			h.removeStorageObligation(so, obligationRejected)
			h.mu.Unlock()
			return
		}

		// Queue another action item to check the status of the transaction.
		h.mu.Lock()
		err := h.queueActionItem(blockHeight+resubmissionTimeout, so.id())
		h.mu.Unlock()
		if err != nil {
			h.log.Println("Error queuing action item:", err)
		}

		// Add a miner fee to the transaction and submit it to the blockchain.
		revisionTxnIndex := len(so.RevisionTransactionSet) - 1
		revisionParents := so.RevisionTransactionSet[:revisionTxnIndex]
		revisionTxn := so.RevisionTransactionSet[revisionTxnIndex]
		builder := h.wallet.RegisterTransaction(revisionTxn, revisionParents)
		_, feeRecommendation := h.tpool.FeeEstimation()
		if so.value().Div64(2).Cmp(feeRecommendation) < 0 {
			// There's no sense submitting the revision if the fee is more than
			// half of the anticipated revenue - fee market went up
			// unexpectedly, and the money that the renter paid to cover the
			// fees is no longer enough.
			return
		}
		txnSize := uint64(len(encoding.MarshalAll(so.RevisionTransactionSet)) + 300)
		requiredFee := feeRecommendation.Mul64(txnSize)
		err = builder.FundSiacoins(requiredFee)
		if err != nil {
			h.log.Println("Error funding transaction fees", err)
		}
		builder.AddMinerFee(requiredFee)
		if err != nil {
			h.log.Println("Error adding miner fees", err)
		}
		feeAddedRevisionTransactionSet, err := builder.Sign(true)
		if err != nil {
			h.log.Println("Error signing transaction", err)
		}
		err = h.tpool.AcceptTransactionSet(feeAddedRevisionTransactionSet)
		if err != nil {
			h.log.Println("Error submitting transaction to transaction pool", err)
		}
		so.TransactionFeesAdded = so.TransactionFeesAdded.Add(requiredFee)
		// return
	}

	// Check whether a storage proof is ready to be provided, and whether it
	// has been accepted. Check for death.
	if !so.ProofConfirmed && blockHeight >= so.expiration()+resubmissionTimeout {
		h.log.Debugln("Host is attempting a storage proof for", so.id())

		// If the window has closed, the host has failed and the obligation can
		// be removed.
		if so.proofDeadline() < blockHeight || len(so.SectorRoots) == 0 {
			h.log.Debugln("storage proof not confirmed by deadline, id", so.id())
			h.mu.Lock()
			err := h.removeStorageObligation(so, obligationFailed)
			h.mu.Unlock()
			if err != nil {
				h.log.Println("Error removing storage obligation:", err)
			}
			return
		}

		// Get the index of the segment, and the index of the sector containing
		// the segment.
		segmentIndex, err := h.cs.StorageProofSegment(so.id())
		if err != nil {
			h.log.Debugln("Host got an error when fetching a storage proof segment:", err)
			return
		}
		sectorIndex := segmentIndex / (modules.SectorSize / crypto.SegmentSize)
		// Pull the corresponding sector into memory.
		sectorRoot := so.SectorRoots[sectorIndex]
		sectorBytes, err := h.ReadSector(sectorRoot)
		if err != nil {
			h.log.Debugln(err)
			return
		}

		// Build the storage proof for just the sector.
		sectorSegment := segmentIndex % (modules.SectorSize / crypto.SegmentSize)
		base, cachedHashSet := crypto.MerkleProof(sectorBytes, sectorSegment)

		// Using the sector, build a cached root.
		log2SectorSize := uint64(0)
		for 1<<log2SectorSize < (modules.SectorSize / crypto.SegmentSize) {
			log2SectorSize++
		}
		ct := crypto.NewCachedTree(log2SectorSize)
		ct.SetIndex(segmentIndex)
		for _, root := range so.SectorRoots {
			ct.Push(root)
		}
		hashSet := ct.Prove(base, cachedHashSet)
		sp := types.StorageProof{
			ParentID: so.id(),
			HashSet:  hashSet,
		}
		copy(sp.Segment[:], base)

		// Create and build the transaction with the storage proof.
		builder := h.wallet.StartTransaction()
		_, feeRecommendation := h.tpool.FeeEstimation()
		if so.value().Cmp(feeRecommendation) < 0 {
			// There's no sense submitting the storage proof if the fee is more
			// than the anticipated revenue.
			h.log.Debugln("Host not submitting storage proof due to a value that does not sufficiently exceed the fee cost")
			return
		}
		txnSize := uint64(len(encoding.Marshal(sp)) + 300)
		requiredFee := feeRecommendation.Mul64(txnSize)
		err = builder.FundSiacoins(requiredFee)
		if err != nil {
			h.log.Println("Host error when funding a storage proof transaction fee:", err)
			return
		}
		builder.AddMinerFee(requiredFee)
		builder.AddStorageProof(sp)
		storageProofSet, err := builder.Sign(true)
		if err != nil {
			h.log.Println("Host error when signing the storage proof transaction:", err)
			return
		}
		err = h.tpool.AcceptTransactionSet(storageProofSet)
		if err != nil {
			h.log.Println("Host unable to submit storage proof transaction to transaction pool:", err)
			return
		}
		so.TransactionFeesAdded = so.TransactionFeesAdded.Add(requiredFee)

		// Queue another action item to check whether there the storage proof
		// got confirmed.
		h.mu.Lock()
		err = h.queueActionItem(so.proofDeadline(), so.id())
		h.mu.Unlock()
		if err != nil {
			h.log.Println("Error queuing action item:", err)
		}
	}

	// Save the storage obligation to account for any fee changes.
	err = h.db.Update(func(tx *bolt.Tx) error {
		soBytes, err := json.Marshal(so)
		if err != nil {
			return err
		}
		return tx.Bucket(bucketStorageObligations).Put(soid[:], soBytes)
	})
	if err != nil {
		h.log.Println("Error updating the storage obligations", err)
	}

	// Check if all items have succeeded with the required confirmations. Report
	// success, delete the obligation.
	if so.ProofConfirmed && blockHeight >= so.proofDeadline() {
		h.log.Println("file contract complete, id", so.id())
		h.mu.Lock()
		h.removeStorageObligation(so, obligationSucceeded)
		h.mu.Unlock()
	}
}