// Upload simulates a successful data upload. func (uc *uploadDownloadContractor) Upload(data []byte) (crypto.Hash, error) { uc.mu.Lock() defer uc.mu.Unlock() root := crypto.MerkleRoot(data) uc.sectors[root] = data return root, nil }
// createSector makes a random, unique sector that can be inserted into the // storage manager. func createSector() (sectorRoot crypto.Hash, sectorData []byte, err error) { sectorData, err = crypto.RandBytes(int(modules.SectorSize)) if err != nil { return crypto.Hash{}, nil, err } sectorRoot = crypto.MerkleRoot(sectorData) return sectorRoot, sectorData, nil }
// randSector creates a random sector, returning the sector along with the // Merkle root of the sector. func randSector() (crypto.Hash, []byte, error) { sectorData, err := crypto.RandBytes(int(modules.SectorSize)) if err != nil { return crypto.Hash{}, nil, err } sectorRoot := crypto.MerkleRoot(sectorData) return sectorRoot, sectorData, nil }
// HARDFORK 21,000 // // TestPreForkValidStorageProofs checks that storage proofs which are invalid // before the hardfork (but valid afterwards) are still rejected before the // hardfork). func TestPreForkValidStorageProofs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester("TestPreForkValidStorageProofs") if err != nil { t.Fatal(err) } defer cst.Close() // Try a proof set where there is padding on the last segment in the file. file := make([]byte, 100) _, err = rand.Read(file) if err != nil { t.Fatal(err) } root := crypto.MerkleRoot(file) fc := types.FileContract{ FileSize: 100, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } // Find a proofIndex that has the value '1'. var fcid types.FileContractID var proofIndex uint64 for { fcid[0]++ cst.cs.dbAddFileContract(fcid, fc) proofIndex, err = cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } if proofIndex == 1 { break } } base, proofSet := crypto.MerkleProof(file, proofIndex) txn := types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != errInvalidStorageProof { t.Log(cst.cs.dbBlockHeight()) t.Fatal(err) } }
// TestIntegrationInsertDelete tests that the contractor can insert and delete // a sector during the same revision. func TestIntegrationInsertDelete(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio("TestIntegrationInsertDelete") if err != nil { t.Fatal(err) } // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } // revise the contract editor, err := c.Editor(contract) if err != nil { t.Fatal(err) } data, err := crypto.RandBytes(int(modules.SectorSize)) if err != nil { t.Fatal(err) } // insert the sector _, err = editor.Upload(data) if err != nil { t.Fatal(err) } // delete the sector err = editor.Delete(crypto.MerkleRoot(data)) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // contract should have no sectors contract = c.contracts[contract.ID] if len(contract.MerkleRoots) != 0 { t.Fatal("contract should have no sectors:", contract.MerkleRoots) } }
// Upload adds a piece to the testHost. It randomly fails according to the // testHost's parameters. func (h *testHost) Upload(data []byte) (crypto.Hash, error) { // simulate I/O delay time.Sleep(h.delay) h.Lock() defer h.Unlock() // randomly fail if n, _ := crypto.RandIntn(h.failRate); n == 0 { return crypto.Hash{}, errors.New("no data") } root := crypto.MerkleRoot(data) h.sectors[root] = data return root, nil }
// Upload negotiates a revision that adds a sector to a file contract. func (he *Editor) Upload(data []byte) (modules.RenterContract, crypto.Hash, error) { // allot 10 minutes for this exchange; sufficient to transfer 4 MB over 50 kbps extendDeadline(he.conn, modules.NegotiateFileContractRevisionTime) defer extendDeadline(he.conn, time.Hour) // reset deadline // calculate price // TODO: height is never updated, so we'll wind up overpaying on long-running uploads blockBytes := types.NewCurrency64(modules.SectorSize * uint64(he.contract.FileContract.WindowEnd-he.height)) sectorStoragePrice := he.host.StoragePrice.Mul(blockBytes) sectorBandwidthPrice := he.host.UploadBandwidthPrice.Mul64(modules.SectorSize) sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) if he.contract.RenterFunds().Cmp(sectorPrice) < 0 { return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient funds to support upload") } sectorCollateral := he.host.Collateral.Mul(blockBytes) if he.contract.LastRevision.NewMissedProofOutputs[1].Value.Cmp(sectorCollateral) < 0 { return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient collateral to support upload") } // calculate the new Merkle root sectorRoot := crypto.MerkleRoot(data) newRoots := append(he.contract.MerkleRoots, sectorRoot) merkleRoot := cachedMerkleRoot(newRoots) // create the action and revision actions := []modules.RevisionAction{{ Type: modules.ActionInsert, SectorIndex: uint64(len(he.contract.MerkleRoots)), Data: data, }} rev := newUploadRevision(he.contract.LastRevision, merkleRoot, sectorPrice, sectorCollateral) // run the revision iteration if err := he.runRevisionIteration(actions, rev, newRoots); err != nil { return modules.RenterContract{}, crypto.Hash{}, err } // update metrics he.StorageSpending = he.StorageSpending.Add(sectorStoragePrice) he.UploadSpending = he.UploadSpending.Add(sectorBandwidthPrice) return he.contract, sectorRoot, nil }
// Sector retrieves the sector with the specified Merkle root, and revises // the underlying contract to pay the host proportionally to the data // retrieve. func (hd *Downloader) Sector(root crypto.Hash) (modules.RenterContract, []byte, error) { extendDeadline(hd.conn, modules.NegotiateDownloadTime) defer extendDeadline(hd.conn, time.Hour) // reset deadline when finished // calculate price sectorPrice := hd.host.DownloadBandwidthPrice.Mul64(modules.SectorSize) if hd.contract.RenterFunds().Cmp(sectorPrice) < 0 { return modules.RenterContract{}, nil, errors.New("contract has insufficient funds to support download") } // create the download revision rev := newDownloadRevision(hd.contract.LastRevision, sectorPrice) // if a SaveFn was provided, call it before doing any further I/O, because // that's when we're most likely to experience failure if hd.SaveFn != nil { if err := hd.SaveFn(rev); err != nil { return modules.RenterContract{}, nil, errors.New("failed to save unsigned revision: " + err.Error()) } } // initiate download by confirming host settings if err := startDownload(hd.conn, hd.host); err != nil { return modules.RenterContract{}, nil, err } // send download action err := encoding.WriteObject(hd.conn, []modules.DownloadAction{{ MerkleRoot: root, Offset: 0, Length: modules.SectorSize, }}) if err != nil { return modules.RenterContract{}, nil, err } // send the revision to the host for approval signedTxn, err := negotiateRevision(hd.conn, rev, hd.contract.SecretKey) if err != nil { return modules.RenterContract{}, nil, err } // read sector data, completing one iteration of the download loop var sectors [][]byte if err := encoding.ReadObject(hd.conn, §ors, modules.SectorSize+16); err != nil { return modules.RenterContract{}, nil, err } else if len(sectors) != 1 { return modules.RenterContract{}, nil, errors.New("host did not send enough sectors") } sector := sectors[0] if uint64(len(sector)) != modules.SectorSize { return modules.RenterContract{}, nil, errors.New("host did not send enough sector data") } else if crypto.MerkleRoot(sector) != root { return modules.RenterContract{}, nil, errors.New("host sent bad sector data") } // update contract and metrics hd.contract.LastRevision = rev hd.contract.LastRevisionTxn = signedTxn hd.DownloadSpending = hd.DownloadSpending.Add(sectorPrice) return hd.contract, sector, nil }
// TestStorageProofBoundaries creates file contracts and submits storage proofs // for them, probing segment boundaries (first segment, last segment, // incomplete segment, etc.). func TestStorageProofBoundaries(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester("TestStorageProofBoundaries") if err != nil { t.Fatal(err) } defer cst.Close() // Mine enough blocks to put us beyond the testing hardfork. for i := 0; i < 10; i++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Try storage proofs on data between 0 bytes and 128 bytes (0 segments and // 1 segment). Perform the operation five times because we can't control // which segment gets selected - it is randomly decided by the block. segmentRange := []int{0, 1, 2, 3, 4, 5, 15, 25, 30, 32, 62, 63, 64, 65, 66, 70, 81, 89, 90, 126, 127, 128, 129} for i := 0; i < 3; i++ { randData, err := crypto.RandBytes(140) if err != nil { t.Fatal(err) } // Create a file contract for all sizes of the data between 0 and 2 // segments and put them in the transaction pool. var fcids []types.FileContractID for _, k := range segmentRange { // Create the data and the file contract around it. truncatedData := randData[:k] fc := types.FileContract{ FileSize: uint64(k), FileMerkleRoot: crypto.MerkleRoot(truncatedData), WindowStart: cst.cs.dbBlockHeight() + 2, WindowEnd: cst.cs.dbBlockHeight() + 4, Payout: types.NewCurrency64(500), // Too small to be subject to siafund fee. ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, } // Create a transaction around the file contract and add it to the // transaction pool. b := cst.wallet.StartTransaction() err = b.FundSiacoins(types.NewCurrency64(500)) if err != nil { t.Fatal(err) } b.AddFileContract(fc) txnSet, err := b.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } // Store the file contract id for later when building the storage // proof. fcids = append(fcids, txnSet[len(txnSet)-1].FileContractID(0)) } // Mine blocks to get the file contracts into the blockchain and // confirming. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Create storage proofs for the file contracts and submit the proofs // to the blockchain. for j, k := range segmentRange { // Build the storage proof. truncatedData := randData[:k] proofIndex, err := cst.cs.StorageProofSegment(fcids[j]) if err != nil { t.Fatal(err) } base, hashSet := crypto.MerkleProof(truncatedData, proofIndex) sp := types.StorageProof{ ParentID: fcids[j], HashSet: hashSet, } copy(sp.Segment[:], base) if k > 0 { // Try submitting an empty storage proof, to make sure that the // hardfork code didn't accidentally allow empty storage proofs // in situations other than file sizes with 0 bytes. badSP := types.StorageProof{ParentID: fcids[j]} badTxn := types.Transaction{ StorageProofs: []types.StorageProof{badSP}, } if sp.Segment == badSP.Segment { continue } err = cst.tpool.AcceptTransactionSet([]types.Transaction{badTxn}) if err == nil { t.Fatal("An empty storage proof got into the transaction pool with non-empty data") } } // Submit the storage proof to the blockchain in a transaction. txn := types.Transaction{ StorageProofs: []types.StorageProof{sp}, } err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err, "-", j, k) } } // Mine blocks to get the storage proofs on the blockchain. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } } }
// TestValidFileContractRevisions probes the validFileContractRevisions method // of the consensus set. func TestValidFileContractRevisions(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester("TestValidFileContractRevisions") if err != nil { t.Fatal(err) } defer cst.Close() // Grab an address + unlock conditions for the transaction. unlockConditions, err := cst.wallet.NextAddress() if err != nil { t.Fatal(err) } // Create a file contract for which a storage proof can be created. var fcid types.FileContractID fcid[0] = 12 simFile := make([]byte, 64*1024) rand.Read(simFile) root := crypto.MerkleRoot(simFile) fc := types.FileContract{ FileSize: 64 * 1024, FileMerkleRoot: root, WindowStart: 102, WindowEnd: 1200, Payout: types.NewCurrency64(1), UnlockHash: unlockConditions.UnlockHash(), RevisionNumber: 1, } cst.cs.dbAddFileContract(fcid, fc) // Try a working file contract revision. txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 2, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != nil { t.Error(err) } // Try a transaction with an insufficient revision number. txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 1, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != errLowRevisionNumber { t.Error(err) } txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 0, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != errLowRevisionNumber { t.Error(err) } // Submit a file contract revision pointing to an invalid parent. txn.FileContractRevisions[0].ParentID[0]-- err = cst.cs.dbValidFileContractRevisions(txn) if err != errNilItem { t.Error(err) } txn.FileContractRevisions[0].ParentID[0]++ // Submit a file contract revision for a file contract whose window has // already opened. fc, err = cst.cs.dbGetFileContract(fcid) if err != nil { t.Fatal(err) } fc.WindowStart = 0 cst.cs.dbRemoveFileContract(fcid) cst.cs.dbAddFileContract(fcid, fc) txn.FileContractRevisions[0].NewRevisionNumber = 3 err = cst.cs.dbValidFileContractRevisions(txn) if err != errLateRevision { t.Error(err) } // Submit a file contract revision with incorrect unlock conditions. fc.WindowStart = 100 cst.cs.dbRemoveFileContract(fcid) cst.cs.dbAddFileContract(fcid, fc) txn.FileContractRevisions[0].UnlockConditions.Timelock++ err = cst.cs.dbValidFileContractRevisions(txn) if err != errWrongUnlockConditions { t.Error(err) } txn.FileContractRevisions[0].UnlockConditions.Timelock-- // Submit file contract revisions for file contracts with altered payouts. txn.FileContractRevisions[0].NewValidProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} txn.FileContractRevisions[0].NewMissedProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } txn.FileContractRevisions[0].NewValidProofOutputs = nil err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } txn.FileContractRevisions[0].NewValidProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} txn.FileContractRevisions[0].NewMissedProofOutputs = nil err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } }
// testFileContractRevision creates and revises a file contract on the // blockchain. func (cst *consensusSetTester) testFileContractRevision() { // COMPATv0.4.0 - Step the block height up past the hardfork amount. This // code stops nondeterministic failures when producing storage proofs that // is related to buggy old code. for cst.cs.dbBlockHeight() <= 10 { _, err := cst.miner.AddBlock() if err != nil { panic(err) } } // Create a file (as a bytes.Buffer) that will be used for the file // contract. filesize := uint64(4e3) file, err := crypto.RandBytes(int(filesize)) if err != nil { panic(err) } merkleRoot := crypto.MerkleRoot(file) // Create a spendable unlock hash for the file contract. sk, pk, err := crypto.GenerateKeyPair() if err != nil { panic(err) } uc := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{{ Algorithm: types.SignatureEd25519, Key: pk[:], }}, SignaturesRequired: 1, } // Create a file contract that will be revised. validProofDest := randAddress() payout := types.NewCurrency64(400e6) fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: crypto.Hash{}, WindowStart: cst.cs.dbBlockHeight() + 2, WindowEnd: cst.cs.dbBlockHeight() + 3, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{ UnlockHash: validProofDest, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, MissedProofOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{}, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, UnlockHash: uc.UnlockHash(), } // Submit a transaction with the file contract. txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Submit a revision for the file contract. ti := len(txnSet) - 1 fcid := txnSet[ti].FileContractID(fcIndex) fcr := types.FileContractRevision{ ParentID: fcid, UnlockConditions: uc, NewRevisionNumber: 69292, NewFileSize: filesize, NewFileMerkleRoot: merkleRoot, NewWindowStart: cst.cs.dbBlockHeight() + 1, NewWindowEnd: cst.cs.dbBlockHeight() + 2, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewUnlockHash: uc.UnlockHash(), } ts := types.TransactionSignature{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{WholeTransaction: true}, PublicKeyIndex: 0, } txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{fcr}, TransactionSignatures: []types.TransactionSignature{ts}, } encodedSig, err := crypto.SignHash(txn.SigHash(0), sk) if err != nil { panic(err) } txn.TransactionSignatures[0].Signature = encodedSig[:] err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Create and submit a storage proof for the file contract. segmentIndex, err := cst.cs.StorageProofSegment(fcid) if err != nil { panic(err) } segment, hashSet := crypto.MerkleProof(file, segmentIndex) sp := types.StorageProof{ ParentID: fcid, HashSet: hashSet, } copy(sp.Segment[:], segment) txnBuilder = cst.wallet.StartTransaction() txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the file contract has been removed. _, err = cst.cs.dbGetFileContract(fcid) if err != errNilItem { panic("file contract should not exist in the database") } }
// Sector retrieves the sector with the specified Merkle root, and revises // the underlying contract to pay the host proportionally to the data // retrieve. func (hd *Downloader) Sector(root crypto.Hash) (modules.RenterContract, []byte, error) { extendDeadline(hd.conn, modules.NegotiateDownloadTime) defer extendDeadline(hd.conn, time.Hour) // reset deadline when finished // calculate price sectorPrice := hd.host.DownloadBandwidthPrice.Mul64(modules.SectorSize) if hd.contract.RenterFunds().Cmp(sectorPrice) < 0 { return modules.RenterContract{}, nil, errors.New("contract has insufficient funds to support download") } // create the download revision rev := newDownloadRevision(hd.contract.LastRevision, sectorPrice) // initiate download by confirming host settings if err := startDownload(hd.conn, hd.host); err != nil { return modules.RenterContract{}, nil, err } // Before we continue, save the revision. Unexpected termination (e.g. // power failure) during the signature transfer leaves in an ambiguous // state: the host may or may not have received the signature, and thus // may report either revision as being the most recent. To mitigate this, // we save the old revision as a fallback. if hd.SaveFn != nil { if err := hd.SaveFn(rev, hd.contract.MerkleRoots); err != nil { return modules.RenterContract{}, nil, err } } // send download action err := encoding.WriteObject(hd.conn, []modules.DownloadAction{{ MerkleRoot: root, Offset: 0, Length: modules.SectorSize, }}) if err != nil { return modules.RenterContract{}, nil, err } // send the revision to the host for approval signedTxn, err := negotiateRevision(hd.conn, rev, hd.contract.SecretKey) if err == modules.ErrStopResponse { // if host gracefully closed, close our connection as well; this will // cause the next download to fail. However, we must delay closing // until we've finished downloading the sector. defer hd.conn.Close() } else if err != nil { return modules.RenterContract{}, nil, err } // read sector data, completing one iteration of the download loop var sectors [][]byte if err := encoding.ReadObject(hd.conn, §ors, modules.SectorSize+16); err != nil { return modules.RenterContract{}, nil, err } else if len(sectors) != 1 { return modules.RenterContract{}, nil, errors.New("host did not send enough sectors") } sector := sectors[0] if uint64(len(sector)) != modules.SectorSize { return modules.RenterContract{}, nil, errors.New("host did not send enough sector data") } else if crypto.MerkleRoot(sector) != root { return modules.RenterContract{}, nil, errors.New("host sent bad sector data") } // update contract and metrics hd.contract.LastRevision = rev hd.contract.LastRevisionTxn = signedTxn hd.DownloadSpending = hd.DownloadSpending.Add(sectorPrice) return hd.contract, sector, nil }
// TestErasureDownload tests parallel downloading of erasure-coded data. It // mocks the fetcher interface in order to directly test the downloading // algorithm. func TestErasureDownload(t *testing.T) { if testing.Short() { t.SkipNow() } // generate data const dataSize = 777 data, err := crypto.RandBytes(dataSize) if err != nil { t.Fatal(err) } // create Reed-Solomon encoder rsc, err := NewRSCode(2, 10) if err != nil { t.Fatal(err) } // create hosts const pieceSize = 10 hosts := make([]fetcher, rsc.NumPieces()) for i := range hosts { hosts[i] = &testFetcher{ sectors: make(map[crypto.Hash][]byte), pieceMap: make(map[uint64][]pieceData), pieceSize: pieceSize, delay: time.Millisecond, failRate: 5, // 20% failure rate } } // make one host really slow hosts[0].(*testFetcher).delay = 100 * time.Millisecond // make one host always fail hosts[1].(*testFetcher).failRate = 1 // upload data to hosts r := bytes.NewReader(data) // makes chunking easier chunk := make([]byte, pieceSize*rsc.MinPieces()) var i uint64 for i = uint64(0); ; i++ { _, err := io.ReadFull(r, chunk) if err == io.EOF { break } else if err != nil && err != io.ErrUnexpectedEOF { t.Fatal(err) } pieces, err := rsc.Encode(chunk) if err != nil { t.Fatal(err) } for j, p := range pieces { root := crypto.MerkleRoot(p) host := hosts[j%len(hosts)].(*testFetcher) // distribute evenly host.pieceMap[i] = append(host.pieceMap[i], pieceData{ Chunk: uint64(i), Piece: uint64(j), MerkleRoot: root, }) host.sectors[root] = p } } // check hosts (not strictly necessary) err = checkHosts(hosts, rsc.MinPieces(), i) if err != nil { t.Fatal(err) } // download data d := newFile("foo", rsc, pieceSize, dataSize).newDownload(hosts, "") buf := new(bytes.Buffer) err = d.run(buf) if err != nil { t.Fatal(err) } if !bytes.Equal(buf.Bytes(), data) { t.Fatal("recovered data does not match original") } /* // These metrics can be used to assess the efficiency of the download // algorithm. totFetch := 0 for i, h := range hosts { h := h.(*testHost) t.Logf("Host %2d: Fetched: %v/%v", i, h.nFetch, h.nAttempt) totFetch += h.nAttempt } t.Log("Optimal fetches:", i*uint64(rsc.MinPieces())) t.Log("Total fetches: ", totFetch) */ }
// TestMultiSectorObligationStack checks that the host correctly manages a // storage obligation with a single sector, the revision is created the same // block as the file contract. // // Unlike the SingleSector test, the multi sector test attempts to spread file // contract revisions over multiple blocks. func TestMultiSectorStorageObligationStack(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestMultiSectorStorageObligationStack") if err != nil { t.Fatal(err) } defer ht.Close() // Start by adding a storage obligation to the host. To emulate conditions // of a renter creating the first contract, the storage obligation has no // data, but does have money. so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } err = ht.host.lockStorageObligation(so) if err != nil { t.Fatal(err) } err = ht.host.addStorageObligation(so) if err != nil { t.Fatal(err) } err = ht.host.unlockStorageObligation(so) if err != nil { t.Fatal(err) } // Storage obligation should not be marked as having the transaction // confirmed on the blockchain. if so.OriginConfirmed { t.Fatal("storage obligation should not yet be marked as confirmed, confirmation is on the way") } // Deviation from SingleSector test - mine a block here to confirm the // storage obligation before a file contract revision is created. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { *so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } // Add a file contract revision, moving over a small amount of money to pay // for the file contract. sectorRoot, sectorData, err := randSector() if err != nil { t.Fatal(err) } so.SectorRoots = []crypto.Hash{sectorRoot} sectorCost := types.SiacoinPrecision.Mul64(550) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost) validPayouts, missedPayouts := so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost) revisionSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 1, NewFileSize: uint64(len(sectorData)), NewFileMerkleRoot: sectorRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} err = ht.host.lockStorageObligation(so) if err != nil { t.Fatal(err) } err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot}, [][]byte{sectorData}) if err != nil { t.Fatal(err) } err = ht.host.unlockStorageObligation(so) if err != nil { t.Fatal(err) } // Submit the revision set to the transaction pool. err = ht.tpool.AcceptTransactionSet(revisionSet) if err != nil { t.Fatal(err) } // Create a second file contract revision, which is going to be submitted // to the transaction pool after the first revision. Though, in practice // this should never happen, we want to check that the transaction pool is // correctly handling multiple file contract revisions being submitted in // the same block cycle. This test will additionally tell us whether or not // the host can correctly handle buildling storage proofs for files with // multiple sectors. sectorRoot2, sectorData2, err := randSector() if err != nil { t.Fatal(err) } so.SectorRoots = []crypto.Hash{sectorRoot, sectorRoot2} sectorCost2 := types.SiacoinPrecision.Mul64(650) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost2) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost2) validPayouts, missedPayouts = so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost2) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost2) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost2) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost2) combinedSectors := append(sectorData, sectorData2...) combinedRoot := crypto.MerkleRoot(combinedSectors) revisionSet2 := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 2, NewFileSize: uint64(len(sectorData) + len(sectorData2)), NewFileMerkleRoot: combinedRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} err = ht.host.lockStorageObligation(so) if err != nil { t.Fatal(err) } err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot2}, [][]byte{sectorData2}) if err != nil { t.Fatal(err) } err = ht.host.unlockStorageObligation(so) if err != nil { t.Fatal(err) } // Submit the revision set to the transaction pool. err = ht.tpool.AcceptTransactionSet(revisionSet2) if err != nil { t.Fatal(err) } // Mine a block to confirm the transactions containing the file contract // and the file contract revision. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { *so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } // Mine until the host submits a storage proof. for i := ht.host.blockHeight; i <= so.expiration()+resubmissionTimeout; i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { *so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } if !so.ProofConfirmed { t.Fatal("storage obligation is not saying that the storage proof was confirmed on the blockchain") } // Mine blocks until the storage proof has enough confirmations that the // host will delete the file entirely. for i := 0; i <= int(defaultWindowSize); i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { *so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != errNoStorageObligation { t.Fatal(err) } if ht.host.financialMetrics.StorageRevenue.Cmp(sectorCost.Add(sectorCost2)) != 0 { t.Fatal("the host should be reporting revenue after a successful storage proof") } }
// TestStorageProof checks that the host can create and submit a storage proof. func TestStorageProof(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestStorageProof") if err != nil { t.Fatal(err) } defer ht.Close() // create a file contract fc := types.FileContract{ WindowStart: types.MaturityDelay + 3, WindowEnd: 1000, Payout: types.NewCurrency64(1), UnlockHash: types.UnlockConditions{}.UnlockHash(), ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, } txnBuilder := ht.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) } txnBuilder.AddFileContract(fc) signedTxnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } fcid := signedTxnSet[len(signedTxnSet)-1].FileContractID(0) // generate data const dataSize = 777 data := make([]byte, dataSize) _, err = rand.Read(data) if err != nil { t.Fatal(err) } root := crypto.MerkleRoot(data) err = ioutil.WriteFile(filepath.Join(ht.host.persistDir, "foo"), data, 0777) if err != nil { t.Fatal(err) } // create revision rev := types.FileContractRevision{ ParentID: fcid, UnlockConditions: types.UnlockConditions{}, NewFileSize: dataSize, NewWindowStart: fc.WindowStart, NewFileMerkleRoot: root, NewWindowEnd: fc.WindowEnd, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewRevisionNumber: 1, } _ = types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, } /* // create obligation obligation := &contractObligation{ ID: fcid, OriginTransaction: types.Transaction{ FileContracts: []types.FileContract{fc}, }, Path: filepath.Join(ht.host.persistDir, "foo"), } ht.host.obligationsByID[fcid] = obligation ht.host.addActionItem(fc.WindowStart+1, obligation) // submit both to tpool err = ht.tpool.AcceptTransactionSet(append(signedTxnSet, revTxn)) if err != nil { t.Fatal(err) } _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // storage proof will be submitted after mining one more block _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } */ }
// TestIntegrationModify tests that the contractor can modify a previously- // uploaded sector. func TestIntegrationModify(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio("TestIntegrationModify") if err != nil { t.Fatal(err) } // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } // revise the contract editor, err := c.Editor(contract) if err != nil { t.Fatal(err) } data, err := crypto.RandBytes(int(modules.SectorSize)) if err != nil { t.Fatal(err) } // insert the sector _, err = editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // modify the sector oldRoot := crypto.MerkleRoot(data) offset, newData := uint64(10), []byte{1, 2, 3, 4, 5} copy(data[offset:], newData) newRoot := crypto.MerkleRoot(data) contract = c.contracts[contract.ID] editor, err = c.Editor(contract) if err != nil { t.Fatal(err) } err = editor.Modify(oldRoot, newRoot, offset, newData) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } }
// TestValidStorageProofs probes the validStorageProofs method of the consensus // set. func TestValidStorageProofs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester("TestValidStorageProofs") if err != nil { t.Fatal(err) } defer cst.Close() // COMPATv0.4.0 // // Mine 10 blocks so that the post-hardfork rules are in effect. for i := 0; i < 10; i++ { block, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { t.Fatal(err) } } // Create a file contract for which a storage proof can be created. var fcid types.FileContractID fcid[0] = 12 simFile := make([]byte, 64*1024) _, err = rand.Read(simFile) if err != nil { t.Fatal(err) } root := crypto.MerkleRoot(simFile) fc := types.FileContract{ FileSize: 64 * 1024, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } cst.cs.dbAddFileContract(fcid, fc) // Create a transaction with a storage proof. proofIndex, err := cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } base, proofSet := crypto.MerkleProof(simFile, proofIndex) txn := types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != nil { t.Error(err) } // Corrupt the proof set. proofSet[0][0]++ txn = types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != errInvalidStorageProof { t.Error(err) } // Try to validate a proof for a file contract that doesn't exist. txn.StorageProofs[0].ParentID = types.FileContractID{} err = cst.cs.dbValidStorageProofs(txn) if err != errUnrecognizedFileContractID { t.Error(err) } // Try a proof set where there is padding on the last segment in the file. file := make([]byte, 100) _, err = rand.Read(file) if err != nil { t.Fatal(err) } root = crypto.MerkleRoot(file) fc = types.FileContract{ FileSize: 100, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } // Find a proofIndex that has the value '1'. for { fcid[0]++ cst.cs.dbAddFileContract(fcid, fc) proofIndex, err = cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } if proofIndex == 1 { break } } base, proofSet = crypto.MerkleProof(file, proofIndex) txn = types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != nil { t.Fatal(err) } }
// managedRevisionIteration handles one iteration of the revision loop. As a // performance optimization, multiple iterations of revisions are allowed to be // made over the same connection. func (h *Host) managedRevisionIteration(conn net.Conn, so *storageObligation, finalIter bool) error { // Send the settings to the renter. The host will keep going even if it is // not accepting contracts, because in this case the contract already // exists. err := h.managedRPCSettings(conn) if err != nil { return err } // Set the negotiation deadline. conn.SetDeadline(time.Now().Add(modules.NegotiateFileContractRevisionTime)) // The renter will either accept or reject the settings + revision // transaction. It may also return a stop response to indicate that it // wishes to terminate the revision loop. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return err } // Read some variables from the host for use later in the function. h.mu.RLock() settings := h.settings secretKey := h.secretKey blockHeight := h.blockHeight h.mu.RUnlock() // The renter is going to send its intended modifications, followed by the // file contract revision that pays for them. var modifications []modules.RevisionAction var revision types.FileContractRevision err = encoding.ReadObject(conn, &modifications, settings.MaxReviseBatchSize) if err != nil { return err } err = encoding.ReadObject(conn, &revision, modules.NegotiateMaxFileContractRevisionSize) if err != nil { return err } // First read all of the modifications. Then make the modifications, but // with the ability to reverse them. Then verify the file contract revision // correctly accounts for the changes. var bandwidthRevenue types.Currency // Upload bandwidth. var storageRevenue types.Currency var newCollateral types.Currency var sectorsRemoved []crypto.Hash var sectorsGained []crypto.Hash var gainedSectorData [][]byte err = func() error { for _, modification := range modifications { // Check that the index points to an existing sector root. If the type // is ActionInsert, we permit inserting at the end. if modification.Type == modules.ActionInsert { if modification.SectorIndex > uint64(len(so.SectorRoots)) { return errBadModificationIndex } } else if modification.SectorIndex >= uint64(len(so.SectorRoots)) { return errBadModificationIndex } // Check that the data sent for the sector is not too large. if uint64(len(modification.Data)) > modules.SectorSize { return errLargeSector } switch modification.Type { case modules.ActionDelete: // There is no financial information to change, it is enough to // remove the sector. sectorsRemoved = append(sectorsRemoved, so.SectorRoots[modification.SectorIndex]) so.SectorRoots = append(so.SectorRoots[0:modification.SectorIndex], so.SectorRoots[modification.SectorIndex+1:]...) case modules.ActionInsert: // Check that the sector size is correct. if uint64(len(modification.Data)) != modules.SectorSize { return errBadSectorSize } // Update finances. blocksRemaining := so.proofDeadline() - blockHeight blockBytesCurrency := types.NewCurrency64(uint64(blocksRemaining)).Mul64(modules.SectorSize) bandwidthRevenue = bandwidthRevenue.Add(settings.MinUploadBandwidthPrice.Mul64(modules.SectorSize)) storageRevenue = storageRevenue.Add(settings.MinStoragePrice.Mul(blockBytesCurrency)) newCollateral = newCollateral.Add(settings.Collateral.Mul(blockBytesCurrency)) // Insert the sector into the root list. newRoot := crypto.MerkleRoot(modification.Data) sectorsGained = append(sectorsGained, newRoot) gainedSectorData = append(gainedSectorData, modification.Data) so.SectorRoots = append(so.SectorRoots[:modification.SectorIndex], append([]crypto.Hash{newRoot}, so.SectorRoots[modification.SectorIndex:]...)...) case modules.ActionModify: // Check that the offset and length are okay. Length is already // known to be appropriately small, but the offset needs to be // checked for being appropriately small as well otherwise there is // a risk of overflow. if modification.Offset > modules.SectorSize || modification.Offset+uint64(len(modification.Data)) > modules.SectorSize { return errIllegalOffsetAndLength } // Get the data for the new sector. sector, err := h.ReadSector(so.SectorRoots[modification.SectorIndex]) if err != nil { return err } copy(sector[modification.Offset:], modification.Data) // Update finances. bandwidthRevenue = bandwidthRevenue.Add(settings.MinUploadBandwidthPrice.Mul64(uint64(len(modification.Data)))) // Update the sectors removed and gained to indicate that the old // sector has been replaced with a new sector. newRoot := crypto.MerkleRoot(sector) sectorsRemoved = append(sectorsRemoved, so.SectorRoots[modification.SectorIndex]) sectorsGained = append(sectorsGained, newRoot) gainedSectorData = append(gainedSectorData, sector) so.SectorRoots[modification.SectorIndex] = newRoot default: return errUnknownModification } } newRevenue := storageRevenue.Add(bandwidthRevenue) return verifyRevision(so, revision, blockHeight, newRevenue, newCollateral) }() if err != nil { return modules.WriteNegotiationRejection(conn, err) } // Revision is acceptable, write an acceptance string. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return err } // Renter will send a transaction signature for the file contract revision. var renterSig types.TransactionSignature err = encoding.ReadObject(conn, &renterSig, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return err } // Verify that the signature is valid and get the host's signature. txn, err := createRevisionSignature(revision, renterSig, secretKey, blockHeight) if err != nil { return modules.WriteNegotiationRejection(conn, err) } so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(storageRevenue) so.RiskedCollateral = so.RiskedCollateral.Add(newCollateral) so.PotentialUploadRevenue = so.PotentialUploadRevenue.Add(bandwidthRevenue) so.RevisionTransactionSet = []types.Transaction{txn} err = h.modifyStorageObligation(so, sectorsRemoved, sectorsGained, gainedSectorData) if err != nil { return modules.WriteNegotiationRejection(conn, err) } // Host will now send acceptance and its signature to the renter. This // iteration is complete. If the finalIter flag is set, StopResponse will // be sent instead. This indicates to the renter that the host wishes to // terminate the revision loop. if finalIter { err = modules.WriteNegotiationStop(conn) } else { err = modules.WriteNegotiationAcceptance(conn) } if err != nil { return err } return encoding.WriteObject(conn, txn.TransactionSignatures[1]) }
// testValidStorageProofBlocks adds a block with a file contract, and then // submits a storage proof for that file contract. func (cst *consensusSetTester) testValidStorageProofBlocks() { // COMPATv0.4.0 - Step the block height up past the hardfork amount. This // code stops nondeterministic failures when producing storage proofs that // is related to buggy old code. for cst.cs.dbBlockHeight() <= 10 { _, err := cst.miner.AddBlock() if err != nil { panic(err) } } // Create a file (as a bytes.Buffer) that will be used for the file // contract. filesize := uint64(4e3) file, err := crypto.RandBytes(int(filesize)) if err != nil { panic(err) } merkleRoot := crypto.MerkleRoot(file) // Create a file contract that will be successful. validProofDest := randAddress() payout := types.NewCurrency64(400e6) fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: merkleRoot, WindowStart: cst.cs.dbBlockHeight() + 1, WindowEnd: cst.cs.dbBlockHeight() + 2, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{ UnlockHash: validProofDest, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, MissedProofOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{}, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, } // Submit a transaction with the file contract. oldSiafundPool := cst.cs.dbGetSiafundPool() txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the siafund pool was increased by the tax on the payout. siafundPool := cst.cs.dbGetSiafundPool() if siafundPool.Cmp(oldSiafundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) != 0 { panic("siafund pool was not increased correctly") } // Check that the file contract made it into the database. ti := len(txnSet) - 1 fcid := txnSet[ti].FileContractID(fcIndex) _, err = cst.cs.dbGetFileContract(fcid) if err != nil { panic(err) } // Create and submit a storage proof for the file contract. segmentIndex, err := cst.cs.StorageProofSegment(fcid) if err != nil { panic(err) } segment, hashSet := crypto.MerkleProof(file, segmentIndex) sp := types.StorageProof{ ParentID: fcid, HashSet: hashSet, } copy(sp.Segment[:], segment) txnBuilder = cst.wallet.StartTransaction() txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the file contract has been removed. _, err = cst.cs.dbGetFileContract(fcid) if err != errNilItem { panic("file contract should not exist in the database") } // Check that the siafund pool has not changed. postProofPool := cst.cs.dbGetSiafundPool() if postProofPool.Cmp(siafundPool) != 0 { panic("siafund pool should not change after submitting a storage proof") } // Check that a delayed output was created for the valid proof. spoid := fcid.StorageProofOutputID(types.ProofValid, 0) dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid) if err != nil { panic(err) } if dsco.UnlockHash != fc.ValidProofOutputs[0].UnlockHash { panic("wrong unlock hash in dsco") } if dsco.Value.Cmp(fc.ValidProofOutputs[0].Value) != 0 { panic("wrong sco value in dsco") } }