Beispiel #1
0
// createRandFile creates a file on disk and fills it with random bytes.
func createRandFile(path string, size int) error {
	data, err := crypto.RandBytes(size)
	if err != nil {
		return err
	}
	return ioutil.WriteFile(path, data, 0600)
}
// TestIntegrationUploadDownload tests that the contractor can upload data to
// a host and download it intact.
func TestIntegrationUploadDownload(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	h, c, _, err := newTestingTrio("TestIntegrationUploadDownload")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	root, err := editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// download the data
	contract = c.contracts[contract.ID]
	downloader, err := c.Downloader(contract)
	if err != nil {
		t.Fatal(err)
	}
	retrieved, err := downloader.Sector(root)
	if err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(data, retrieved) {
		t.Fatal("downloaded data does not match original")
	}
	err = downloader.Close()
	if err != nil {
		t.Fatal(err)
	}
}
// randSector creates a random sector, returning the sector along with the
// Merkle root of the sector.
func randSector() (crypto.Hash, []byte, error) {
	sectorData, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		return crypto.Hash{}, nil, err
	}
	sectorRoot := crypto.MerkleRoot(sectorData)
	return sectorRoot, sectorData, nil
}
Beispiel #4
0
// createSector makes a random, unique sector that can be inserted into the
// storage manager.
func createSector() (sectorRoot crypto.Hash, sectorData []byte, err error) {
	sectorData, err = crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		return crypto.Hash{}, nil, err
	}
	sectorRoot = crypto.MerkleRoot(sectorData)
	return sectorRoot, sectorData, nil
}
// TestIntegrationDelete tests that the contractor can delete a sector from a
// contract previously formed with a host.
func TestIntegrationDelete(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	h, c, _, err := newTestingTrio("TestIntegrationDelete")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	_, err = editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// delete the sector
	contract = c.contracts[contract.ID]
	editor, err = c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Delete(contract.MerkleRoots[0])
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}
}
Beispiel #6
0
// HeaderForWork returns a block that is ready for nonce grinding, along with
// the root hash of the block.
func (m *Miner) HeaderForWork() (types.BlockHeader, types.Target, error) {
	if !m.wallet.Unlocked() {
		return types.BlockHeader{}, types.Target{}, modules.ErrLockedWallet
	}
	lockID := m.mu.Lock()
	defer m.mu.Unlock(lockID)
	err := m.checkAddress()
	if err != nil {
		return types.BlockHeader{}, types.Target{}, err
	}

	if time.Since(m.lastBlock).Seconds() > secondsBetweenBlocks {
		m.prepareNewBlock()
	}

	// The header that will be returned for nonce grinding.
	// The header is constructed from a block and some arbitrary data. The
	// arbitrary data allows for multiple unique blocks to be generated from
	// a single block in memory. A block pointer is used in order to avoid
	// storing multiple copies of the same block in memory
	var header types.BlockHeader
	var arbData []byte
	var block *types.Block

	if m.memProgress%(headerForWorkMemory/blockForWorkMemory) == 0 {
		// Grab a new block. Allocate space for the pointer to store it as well
		block = new(types.Block)
		*block, _ = m.blockForWork()
		header = block.Header()
		arbData = block.Transactions[0].ArbitraryData[0]

		m.lastBlock = time.Now()
	} else {
		// Set block to previous block, but create new arbData
		block = m.blockMem[m.headerMem[m.memProgress-1]]
		arbData, _ = crypto.RandBytes(types.SpecifierLen)
		block.Transactions[0].ArbitraryData[0] = arbData
		header = block.Header()
	}

	// Save a mapping from the header to its block as well as from the
	// header to its arbitrary data, replacing the block that was
	// stored 'headerForWorkMemory' requests ago.
	delete(m.blockMem, m.headerMem[m.memProgress])
	delete(m.arbDataMem, m.headerMem[m.memProgress])
	m.blockMem[header] = block
	m.arbDataMem[header] = arbData
	m.headerMem[m.memProgress] = header
	m.memProgress++
	if m.memProgress == headerForWorkMemory {
		m.memProgress = 0
	}

	// Return the header and target.
	return header, m.target, nil
}
Beispiel #7
0
// newTestingFile initializes a file object with random parameters.
func newTestingFile() *file {
	key, _ := crypto.GenerateTwofishKey()
	data, _ := crypto.RandBytes(8)
	nData, _ := crypto.RandIntn(10)
	nParity, _ := crypto.RandIntn(10)
	rsc, _ := NewRSCode(nData+1, nParity+1)

	return &file{
		name:        "testfile-" + strconv.Itoa(int(data[0])),
		size:        encoding.DecUint64(data[1:5]),
		masterKey:   key,
		erasureCode: rsc,
		pieceSize:   encoding.DecUint64(data[6:8]),
	}
}
Beispiel #8
0
// blockForWork returns a block that is ready for nonce grinding, including
// correct miner payouts and a random transaction to prevent collisions and
// overlapping work with other blocks being mined in parallel or for different
// forks (during testing).
func (m *Miner) blockForWork() types.Block {
	b := m.unsolvedBlock

	// Update the timestmap.
	if b.Timestamp < types.CurrentTimestamp() {
		b.Timestamp = types.CurrentTimestamp()
	}

	// Update the address + payouts.
	_ = m.checkAddress() // Err is ignored - address generation failed but can't do anything about it (log maybe).
	b.MinerPayouts = []types.SiacoinOutput{{Value: b.CalculateSubsidy(m.height + 1), UnlockHash: m.address}}

	// Add an arb-data txn to the block to create a unique merkle root.
	randBytes, _ := crypto.RandBytes(types.SpecifierLen)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	b.Transactions = append([]types.Transaction{randTxn}, b.Transactions...)

	return b
}
Beispiel #9
0
// Creates a block ready for nonce grinding, also returning the MerkleRoot of
// the block. Getting the MerkleRoot of a block requires encoding and hashing
// in a specific way, which are implementation details we didn't want to
// require external miners to need to worry about. All blocks returned are
// unique, which means all miners can safely start at the '0' nonce.
func (m *Miner) blockForWork() (types.Block, types.Target) {
	// Determine the timestamp.
	blockTimestamp := types.CurrentTimestamp()
	if blockTimestamp < m.earliestTimestamp {
		blockTimestamp = m.earliestTimestamp
	}

	// Create the miner payouts.
	subsidy := types.CalculateCoinbase(m.height)
	for _, txn := range m.transactions {
		for _, fee := range txn.MinerFees {
			subsidy = subsidy.Add(fee)
		}
	}
	blockPayouts := []types.SiacoinOutput{types.SiacoinOutput{Value: subsidy, UnlockHash: m.address}}

	// Create the list of transacitons, including the randomized transaction.
	// The transactions are assembled by calling append(singleElem,
	// existingSlic) because doing it the reverse way has some side effects,
	// creating a race condition and ultimately changing the block hash for
	// other parts of the program. This is related to the fact that slices are
	// pointers, and not immutable objects. Use of the builtin `copy` function
	// when passing objects like blocks around may fix this problem.
	randBytes, _ := crypto.RandBytes(types.SpecifierLen)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	blockTransactions := append([]types.Transaction{randTxn}, m.transactions...)

	// Assemble the block
	b := types.Block{
		ParentID:     m.parent,
		Timestamp:    blockTimestamp,
		MinerPayouts: blockPayouts,
		Transactions: blockTransactions,
	}
	return b, m.target
}
Beispiel #10
0
// blockForWork returns a block that is ready for nonce grinding, including
// correct miner payouts and a random transaction to prevent collisions and
// overlapping work with other blocks being mined in parallel or for different
// forks (during testing).
func (m *Miner) blockForWork() types.Block {
	b := m.persist.UnsolvedBlock

	// Update the timestmap.
	if b.Timestamp < types.CurrentTimestamp() {
		b.Timestamp = types.CurrentTimestamp()
	}

	// Update the address + payouts.
	err := m.checkAddress()
	if err != nil {
		m.log.Println(err)
	}
	b.MinerPayouts = []types.SiacoinOutput{{Value: b.CalculateSubsidy(m.persist.Height + 1), UnlockHash: m.persist.Address}}

	// Add an arb-data txn to the block to create a unique merkle root.
	randBytes, _ := crypto.RandBytes(types.SpecifierLen)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	b.Transactions = append([]types.Transaction{randTxn}, b.Transactions...)

	return b
}
Beispiel #11
0
// TestUploadDownload tests the Upload and Download methods using a mock
// contractor.
func TestUploadDownload(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	// create renter with mocked contractor
	hc := &uploadDownloadContractor{
		sectors: make(map[crypto.Hash][]byte),
	}
	rt, err := newContractorTester("TestUploadDownload", nil, hc)
	if err != nil {
		t.Fatal(err)
	}

	// create a file
	data, err := crypto.RandBytes(777)
	if err != nil {
		t.Fatal(err)
	}
	source := filepath.Join(build.SiaTestingDir, "renter", "TestUploadDownload", "test.dat")
	err = ioutil.WriteFile(source, data, 0600)
	if err != nil {
		t.Fatal(err)
	}

	// use 1-1 erasure code, because we'll only have one host
	rsc, _ := NewRSCode(1, 1)

	// upload file
	err = rt.renter.Upload(modules.FileUploadParams{
		Source:      source,
		SiaPath:     "foo",
		ErasureCode: rsc,
		// Upload will use sane defaults for other params
	})
	if err != nil {
		t.Fatal(err)
	}
	files := rt.renter.FileList()
	if len(files) != 1 {
		t.Fatal("expected 1 file, got", len(files))
	}

	// wait for repair loop for fully upload file
	for i := 0; i < 10 && !files[0].Available; i++ {
		files = rt.renter.FileList()
		time.Sleep(time.Second)
	}
	if !files[0].Available {
		t.Fatal("file did not reach full availability:", files[0].UploadProgress)
	}

	// download the file
	dest := filepath.Join(build.SiaTestingDir, "renter", "TestUploadDownload", "test.dat")
	err = rt.renter.Download("foo", dest)
	if err != nil {
		t.Fatal(err)
	}

	downData, err := ioutil.ReadFile(dest)
	if err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(downData, data) {
		t.Fatal("recovered data does not match original")
	}
}
// TestIntegrationModify tests that the contractor can modify a previously-
// uploaded sector.
func TestIntegrationModify(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	h, c, _, err := newTestingTrio("TestIntegrationModify")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	// insert the sector
	_, err = editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// modify the sector
	oldRoot := crypto.MerkleRoot(data)
	offset, newData := uint64(10), []byte{1, 2, 3, 4, 5}
	copy(data[offset:], newData)
	newRoot := crypto.MerkleRoot(data)
	contract = c.contracts[contract.ID]
	editor, err = c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Modify(oldRoot, newRoot, offset, newData)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}
}
Beispiel #13
0
// uploadFile uploads a file to the host from the tester's renter. The data
// used to make the file is returned. The nickname of the file in the renter is
// the same as the name provided as input.
func (ht *hostTester) uploadFile(path string, renew bool) ([]byte, error) {
	// Check that renting is initialized properly.
	err := ht.initRenting()
	if err != nil {
		return nil, err
	}

	// Create a file to upload to the host.
	source := filepath.Join(ht.persistDir, path+".testfile")
	datasize := uint64(1024)
	data, err := crypto.RandBytes(int(datasize))
	if err != nil {
		return nil, err
	}
	dataMerkleRoot, err := crypto.ReaderMerkleRoot(bytes.NewReader(data))
	if err != nil {
		return nil, err
	}
	err = ioutil.WriteFile(source, data, 0600)
	if err != nil {
		return nil, err
	}

	// Have the renter upload to the host.
	rsc, err := renter.NewRSCode(1, 1)
	if err != nil {
		return nil, err
	}
	fup := modules.FileUploadParams{
		Source:      source,
		SiaPath:     path,
		Duration:    testUploadDuration,
		Renew:       renew,
		ErasureCode: rsc,
		PieceSize:   0,
	}
	err = ht.renter.Upload(fup)
	if err != nil {
		return nil, err
	}

	// Wait until the upload has finished.
	for i := 0; i < 100; i++ {
		time.Sleep(time.Millisecond * 100)

		// Asynchronous processes in the host access obligations by id,
		// therefore a lock is required to scan the set of obligations.
		if func() bool {
			ht.host.mu.Lock()
			defer ht.host.mu.Unlock()

			for _, ob := range ht.host.obligationsByID {
				if dataMerkleRoot == ob.merkleRoot() {
					return true
				}
			}
			return false
		}() {
			break
		}
	}

	// Block until the renter is at 50 upload progress - it takes time for the
	// contract to confirm renter-side.
	complete := false
	for i := 0; i < 50 && !complete; i++ {
		fileInfos := ht.renter.FileList()
		for _, fileInfo := range fileInfos {
			if fileInfo.UploadProgress >= 50 {
				complete = true
			}
		}
		if complete {
			break
		}
		time.Sleep(time.Millisecond * 50)
	}
	if !complete {
		return nil, errors.New("renter never recognized that the upload completed")
	}

	// The rest of the upload can be performed under lock.
	ht.host.mu.Lock()
	defer ht.host.mu.Unlock()

	if len(ht.host.obligationsByID) != 1 {
		return nil, errors.New("expecting a single obligation")
	}
	for _, ob := range ht.host.obligationsByID {
		if ob.fileSize() >= datasize {
			return data, nil
		}
	}
	return nil, errors.New("ht.uploadFile: upload failed")
}
Beispiel #14
0
// TestIntegrationAutoRenew tests that contracts are automatically renwed at
// the expected block height.
func TestIntegrationAutoRenew(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	_, c, m, err := newTestingTrio("TestIntegrationAutoRenew")
	if err != nil {
		t.Fatal(err)
	}

	// form a contract with the host
	a := modules.Allowance{
		Funds:       types.SiacoinPrecision.Mul64(100), // 100 SC
		Hosts:       1,
		Period:      50,
		RenewWindow: 10,
	}
	err = c.SetAllowance(a)
	if err != nil {
		t.Fatal(err)
	}
	contract := c.Contracts()[0]

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	// insert the sector
	root, err := editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// set allowance to a lower period; Contractor will auto-renew when
	// current contract expires
	a.Period--
	err = c.SetAllowance(a)
	if err != nil {
		t.Fatal(err)
	}

	// mine until we enter the renew window
	renewHeight := contract.EndHeight() - c.allowance.RenewWindow
	for c.blockHeight < renewHeight {
		_, err := m.AddBlock()
		if err != nil {
			t.Fatal(err)
		}
	}

	// check renewed contract
	contract = c.Contracts()[0]
	if contract.FileContract.FileMerkleRoot != root {
		t.Fatal(contract.FileContract.FileMerkleRoot)
	} else if contract.FileContract.FileSize != modules.SectorSize {
		t.Fatal(contract.FileContract.FileSize)
	} else if contract.FileContract.RevisionNumber != 0 {
		t.Fatal(contract.FileContract.RevisionNumber)
	} else if contract.FileContract.WindowStart != c.blockHeight+c.allowance.Period {
		t.Fatal(contract.FileContract.WindowStart)
	}
}
Beispiel #15
0
// TestErasureDownload tests parallel downloading of erasure-coded data. It
// mocks the fetcher interface in order to directly test the downloading
// algorithm.
func TestErasureDownload(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	// generate data
	const dataSize = 777
	data, err := crypto.RandBytes(dataSize)
	if err != nil {
		t.Fatal(err)
	}

	// create Reed-Solomon encoder
	rsc, err := NewRSCode(2, 10)
	if err != nil {
		t.Fatal(err)
	}

	// create hosts
	const pieceSize = 10
	hosts := make([]fetcher, rsc.NumPieces())
	for i := range hosts {
		hosts[i] = &testFetcher{
			sectors:   make(map[crypto.Hash][]byte),
			pieceMap:  make(map[uint64][]pieceData),
			pieceSize: pieceSize,

			delay:    time.Millisecond,
			failRate: 5, // 20% failure rate
		}
	}
	// make one host really slow
	hosts[0].(*testFetcher).delay = 100 * time.Millisecond
	// make one host always fail
	hosts[1].(*testFetcher).failRate = 1

	// upload data to hosts
	r := bytes.NewReader(data) // makes chunking easier
	chunk := make([]byte, pieceSize*rsc.MinPieces())
	var i uint64
	for i = uint64(0); ; i++ {
		_, err := io.ReadFull(r, chunk)
		if err == io.EOF {
			break
		} else if err != nil && err != io.ErrUnexpectedEOF {
			t.Fatal(err)
		}
		pieces, err := rsc.Encode(chunk)
		if err != nil {
			t.Fatal(err)
		}
		for j, p := range pieces {
			root := crypto.MerkleRoot(p)
			host := hosts[j%len(hosts)].(*testFetcher) // distribute evenly
			host.pieceMap[i] = append(host.pieceMap[i], pieceData{
				Chunk:      uint64(i),
				Piece:      uint64(j),
				MerkleRoot: root,
			})
			host.sectors[root] = p
		}
	}

	// check hosts (not strictly necessary)
	err = checkHosts(hosts, rsc.MinPieces(), i)
	if err != nil {
		t.Fatal(err)
	}

	// download data
	d := newFile("foo", rsc, pieceSize, dataSize).newDownload(hosts, "")
	buf := new(bytes.Buffer)
	err = d.run(buf)
	if err != nil {
		t.Fatal(err)
	}

	if !bytes.Equal(buf.Bytes(), data) {
		t.Fatal("recovered data does not match original")
	}

	/*
		// These metrics can be used to assess the efficiency of the download
		// algorithm.

		totFetch := 0
		for i, h := range hosts {
			h := h.(*testHost)
			t.Logf("Host %2d:  Fetched: %v/%v", i, h.nFetch, h.nAttempt)
			totFetch += h.nAttempt
		}
		t.Log("Optimal fetches:", i*uint64(rsc.MinPieces()))
		t.Log("Total fetches:  ", totFetch)
	*/
}
Beispiel #16
0
// TestStorageProofBoundaries creates file contracts and submits storage proofs
// for them, probing segment boundaries (first segment, last segment,
// incomplete segment, etc.).
func TestStorageProofBoundaries(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestStorageProofBoundaries")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()

	// Mine enough blocks to put us beyond the testing hardfork.
	for i := 0; i < 10; i++ {
		_, err = cst.miner.AddBlock()
		if err != nil {
			t.Fatal(err)
		}
	}

	// Try storage proofs on data between 0 bytes and 128 bytes (0 segments and
	// 1 segment). Perform the operation five times because we can't control
	// which segment gets selected - it is randomly decided by the block.
	segmentRange := []int{0, 1, 2, 3, 4, 5, 15, 25, 30, 32, 62, 63, 64, 65, 66, 70, 81, 89, 90, 126, 127, 128, 129}
	for i := 0; i < 3; i++ {
		randData, err := crypto.RandBytes(140)
		if err != nil {
			t.Fatal(err)
		}

		// Create a file contract for all sizes of the data between 0 and 2
		// segments and put them in the transaction pool.
		var fcids []types.FileContractID
		for _, k := range segmentRange {
			// Create the data and the file contract around it.
			truncatedData := randData[:k]
			fc := types.FileContract{
				FileSize:           uint64(k),
				FileMerkleRoot:     crypto.MerkleRoot(truncatedData),
				WindowStart:        cst.cs.dbBlockHeight() + 2,
				WindowEnd:          cst.cs.dbBlockHeight() + 4,
				Payout:             types.NewCurrency64(500), // Too small to be subject to siafund fee.
				ValidProofOutputs:  []types.SiacoinOutput{{Value: types.NewCurrency64(500)}},
				MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}},
			}

			// Create a transaction around the file contract and add it to the
			// transaction pool.
			b := cst.wallet.StartTransaction()
			err = b.FundSiacoins(types.NewCurrency64(500))
			if err != nil {
				t.Fatal(err)
			}
			b.AddFileContract(fc)
			txnSet, err := b.Sign(true)
			if err != nil {
				t.Fatal(err)
			}
			err = cst.tpool.AcceptTransactionSet(txnSet)
			if err != nil {
				t.Fatal(err)
			}

			// Store the file contract id for later when building the storage
			// proof.
			fcids = append(fcids, txnSet[len(txnSet)-1].FileContractID(0))
		}

		// Mine blocks to get the file contracts into the blockchain and
		// confirming.
		for j := 0; j < 2; j++ {
			_, err = cst.miner.AddBlock()
			if err != nil {
				t.Fatal(err)
			}
		}

		// Create storage proofs for the file contracts and submit the proofs
		// to the blockchain.
		for j, k := range segmentRange {
			// Build the storage proof.
			truncatedData := randData[:k]
			proofIndex, err := cst.cs.StorageProofSegment(fcids[j])
			if err != nil {
				t.Fatal(err)
			}
			base, hashSet := crypto.MerkleProof(truncatedData, proofIndex)
			sp := types.StorageProof{
				ParentID: fcids[j],
				HashSet:  hashSet,
			}
			copy(sp.Segment[:], base)

			if k > 0 {
				// Try submitting an empty storage proof, to make sure that the
				// hardfork code didn't accidentally allow empty storage proofs
				// in situations other than file sizes with 0 bytes.
				badSP := types.StorageProof{ParentID: fcids[j]}
				badTxn := types.Transaction{
					StorageProofs: []types.StorageProof{badSP},
				}
				if sp.Segment == badSP.Segment {
					continue
				}
				err = cst.tpool.AcceptTransactionSet([]types.Transaction{badTxn})
				if err == nil {
					t.Fatal("An empty storage proof got into the transaction pool with non-empty data")
				}
			}

			// Submit the storage proof to the blockchain in a transaction.
			txn := types.Transaction{
				StorageProofs: []types.StorageProof{sp},
			}
			err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn})
			if err != nil {
				t.Fatal(err, "-", j, k)
			}
		}

		// Mine blocks to get the storage proofs on the blockchain.
		for j := 0; j < 2; j++ {
			_, err = cst.miner.AddBlock()
			if err != nil {
				t.Fatal(err)
			}
		}
	}
}
// TestIntegrationRenew tests that the contractor can renew a previously-
// formed file contract.
func TestIntegrationRenew(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	h, c, _, err := newTestingTrio("TestIntegrationRenew")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	// insert the sector
	root, err := editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// renew the contract
	oldContract := c.contracts[contract.ID]
	contract, err = c.managedRenew(oldContract, modules.SectorSize*10, c.blockHeight+200)
	if err != nil {
		t.Fatal(err)
	}

	// check renewed contract
	if contract.FileContract.FileMerkleRoot != root {
		t.Fatal(contract.FileContract.FileMerkleRoot)
	} else if contract.FileContract.FileSize != modules.SectorSize {
		t.Fatal(contract.FileContract.FileSize)
	} else if contract.FileContract.RevisionNumber != 0 {
		t.Fatal(contract.FileContract.RevisionNumber)
	} else if contract.FileContract.WindowStart != c.blockHeight+200 {
		t.Fatal(contract.FileContract.WindowStart)
	}
	// check that Merkle roots are intact
	if len(contract.MerkleRoots) != len(oldContract.MerkleRoots) {
		t.Fatal(len(contract.MerkleRoots), len(oldContract.MerkleRoots))
	}

	// download the renewed contract
	downloader, err := c.Downloader(contract)
	if err != nil {
		t.Fatal(err)
	}
	retrieved, err := downloader.Sector(root)
	if err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(data, retrieved) {
		t.Fatal("downloaded data does not match original")
	}
	err = downloader.Close()
	if err != nil {
		t.Fatal(err)
	}

	// renew to a lower height
	oldContract = c.contracts[contract.ID]
	contract, err = c.managedRenew(oldContract, modules.SectorSize*10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}
	if contract.FileContract.WindowStart != c.blockHeight+100 {
		t.Fatal(contract.FileContract.WindowStart)
	}
	// check that Merkle roots are intact
	if len(contract.MerkleRoots) != len(oldContract.MerkleRoots) {
		t.Fatal(len(contract.MerkleRoots), len(oldContract.MerkleRoots))
	}

	// revise the contract
	editor, err = c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err = crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	// insert the sector
	_, err = editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}
}
Beispiel #18
0
// TestIntegrationHosting tests that the host correctly receives payment for
// hosting files.
func TestIntegrationHosting(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	st, err := createServerTester("TestIntegrationHosting")
	if err != nil {
		t.Fatal(err)
	}

	// announce the host
	err = st.stdGetAPI("/host/announce?address=" + string(st.host.Address()))
	if err != nil {
		t.Fatal(err)
	}
	// we need to announce twice, or the renter will complain about not having enough hosts
	loopAddr := "127.0.0.1:" + st.host.Address().Port()
	err = st.stdGetAPI("/host/announce?address=" + loopAddr)
	if err != nil {
		t.Fatal(err)
	}

	// wait for announcement to register
	st.miner.AddBlock()
	var hosts ActiveHosts
	st.getAPI("/hostdb/hosts/active", &hosts)
	if len(hosts.Hosts) == 0 {
		t.Fatal("host announcement not seen")
	}

	// create a file
	path := filepath.Join(build.SiaTestingDir, "api", "TestIntegrationHosting", "test.dat")
	data, err := crypto.RandBytes(1024)
	if err != nil {
		t.Fatal(err)
	}
	err = ioutil.WriteFile(path, data, 0600)
	if err != nil {
		t.Fatal(err)
	}

	// upload to host
	err = st.stdGetAPI("/renter/files/upload?nickname=test&duration=10&source=" + path)
	if err != nil {
		t.Fatal(err)
	}
	var fi []FileInfo
	for len(fi) != 1 || fi[0].UploadProgress != 100 {
		st.getAPI("/renter/files/list", &fi)
		time.Sleep(3 * time.Second)
	}

	// mine blocks until storage proof is complete
	for i := 0; i < 20+int(types.MaturityDelay); i++ {
		st.miner.AddBlock()
	}

	// check profit
	var hi modules.HostInfo
	st.getAPI("/host/status", &hi)
	expProfit := "382129999999997570526"
	if hi.Profit.String() != expProfit {
		t.Fatalf("host's profit was not affected: expected %v, got %v", expProfit, hi.Profit)
	}
}
Beispiel #19
0
// testFileContractRevision creates and revises a file contract on the
// blockchain.
func (cst *consensusSetTester) testFileContractRevision() {
	// COMPATv0.4.0 - Step the block height up past the hardfork amount. This
	// code stops nondeterministic failures when producing storage proofs that
	// is related to buggy old code.
	for cst.cs.dbBlockHeight() <= 10 {
		_, err := cst.miner.AddBlock()
		if err != nil {
			panic(err)
		}
	}

	// Create a file (as a bytes.Buffer) that will be used for the file
	// contract.
	filesize := uint64(4e3)
	file, err := crypto.RandBytes(int(filesize))
	if err != nil {
		panic(err)
	}
	merkleRoot := crypto.MerkleRoot(file)

	// Create a spendable unlock hash for the file contract.
	sk, pk, err := crypto.GenerateKeyPair()
	if err != nil {
		panic(err)
	}
	uc := types.UnlockConditions{
		PublicKeys: []types.SiaPublicKey{{
			Algorithm: types.SignatureEd25519,
			Key:       pk[:],
		}},
		SignaturesRequired: 1,
	}

	// Create a file contract that will be revised.
	validProofDest := randAddress()
	payout := types.NewCurrency64(400e6)
	fc := types.FileContract{
		FileSize:       filesize,
		FileMerkleRoot: crypto.Hash{},
		WindowStart:    cst.cs.dbBlockHeight() + 2,
		WindowEnd:      cst.cs.dbBlockHeight() + 3,
		Payout:         payout,
		ValidProofOutputs: []types.SiacoinOutput{{
			UnlockHash: validProofDest,
			Value:      types.PostTax(cst.cs.dbBlockHeight(), payout),
		}},
		MissedProofOutputs: []types.SiacoinOutput{{
			UnlockHash: types.UnlockHash{},
			Value:      types.PostTax(cst.cs.dbBlockHeight(), payout),
		}},
		UnlockHash: uc.UnlockHash(),
	}

	// Submit a transaction with the file contract.
	txnBuilder := cst.wallet.StartTransaction()
	err = txnBuilder.FundSiacoins(payout)
	if err != nil {
		panic(err)
	}
	fcIndex := txnBuilder.AddFileContract(fc)
	txnSet, err := txnBuilder.Sign(true)
	if err != nil {
		panic(err)
	}
	err = cst.tpool.AcceptTransactionSet(txnSet)
	if err != nil {
		panic(err)
	}
	_, err = cst.miner.AddBlock()
	if err != nil {
		panic(err)
	}

	// Submit a revision for the file contract.
	ti := len(txnSet) - 1
	fcid := txnSet[ti].FileContractID(fcIndex)
	fcr := types.FileContractRevision{
		ParentID:          fcid,
		UnlockConditions:  uc,
		NewRevisionNumber: 69292,

		NewFileSize:           filesize,
		NewFileMerkleRoot:     merkleRoot,
		NewWindowStart:        cst.cs.dbBlockHeight() + 1,
		NewWindowEnd:          cst.cs.dbBlockHeight() + 2,
		NewValidProofOutputs:  fc.ValidProofOutputs,
		NewMissedProofOutputs: fc.MissedProofOutputs,
		NewUnlockHash:         uc.UnlockHash(),
	}
	ts := types.TransactionSignature{
		ParentID:       crypto.Hash(fcid),
		CoveredFields:  types.CoveredFields{WholeTransaction: true},
		PublicKeyIndex: 0,
	}
	txn := types.Transaction{
		FileContractRevisions: []types.FileContractRevision{fcr},
		TransactionSignatures: []types.TransactionSignature{ts},
	}
	encodedSig, err := crypto.SignHash(txn.SigHash(0), sk)
	if err != nil {
		panic(err)
	}
	txn.TransactionSignatures[0].Signature = encodedSig[:]
	err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn})
	if err != nil {
		panic(err)
	}
	_, err = cst.miner.AddBlock()
	if err != nil {
		panic(err)
	}

	// Create and submit a storage proof for the file contract.
	segmentIndex, err := cst.cs.StorageProofSegment(fcid)
	if err != nil {
		panic(err)
	}
	segment, hashSet := crypto.MerkleProof(file, segmentIndex)
	sp := types.StorageProof{
		ParentID: fcid,
		HashSet:  hashSet,
	}
	copy(sp.Segment[:], segment)
	txnBuilder = cst.wallet.StartTransaction()
	txnBuilder.AddStorageProof(sp)
	txnSet, err = txnBuilder.Sign(true)
	if err != nil {
		panic(err)
	}
	err = cst.tpool.AcceptTransactionSet(txnSet)
	if err != nil {
		panic(err)
	}
	_, err = cst.miner.AddBlock()
	if err != nil {
		panic(err)
	}

	// Check that the file contract has been removed.
	_, err = cst.cs.dbGetFileContract(fcid)
	if err != errNilItem {
		panic("file contract should not exist in the database")
	}
}
// TestResync tests that the contractor can resync with a host after being
// interrupted during contract revision.
func TestResync(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	// create testing trio
	h, c, _, err := newTestingTrio("TestResync")
	if err != nil {
		t.Fatal(err)
	}

	// get the host's entry from the db
	hostEntry, ok := c.hdb.Host(h.ExternalSettings().NetAddress)
	if !ok {
		t.Fatal("no entry for host in db")
	}

	// form a contract with the host
	contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100)
	if err != nil {
		t.Fatal(err)
	}

	// revise the contract
	editor, err := c.Editor(contract)
	if err != nil {
		t.Fatal(err)
	}
	data, err := crypto.RandBytes(int(modules.SectorSize))
	if err != nil {
		t.Fatal(err)
	}
	root, err := editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	err = editor.Close()
	if err != nil {
		t.Fatal(err)
	}

	// download the data
	contract = c.contracts[contract.ID]
	downloader, err := c.Downloader(contract)
	if err != nil {
		t.Fatal(err)
	}
	retrieved, err := downloader.Sector(root)
	if err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(data, retrieved) {
		t.Fatal("downloaded data does not match original")
	}
	err = downloader.Close()
	if err != nil {
		t.Fatal(err)
	}
	contract = c.contracts[contract.ID]

	// corrupt contract and delete its cachedRevision
	badContract := contract
	badContract.LastRevision.NewRevisionNumber--
	badContract.LastRevisionTxn.TransactionSignatures = nil // delete signatures

	c.mu.Lock()
	delete(c.cachedRevisions, contract.ID)
	c.mu.Unlock()

	// Editor and Downloader should fail with the bad contract
	_, err = c.Editor(badContract)
	if !proto.IsRevisionMismatch(err) {
		t.Fatal("expected revision mismatch, got", err)
	}
	_, err = c.Downloader(badContract)
	if !proto.IsRevisionMismatch(err) {
		t.Fatal("expected revision mismatch, got", err)
	}

	// add cachedRevision
	cachedRev := cachedRevision{contract.LastRevision, contract.MerkleRoots}
	c.mu.Lock()
	c.cachedRevisions[contract.ID] = cachedRev
	c.mu.Unlock()

	// Editor and Downloader should now succeed after loading the cachedRevision
	editor, err = c.Editor(badContract)
	if err != nil {
		t.Fatal(err)
	}
	editor.Close()

	downloader, err = c.Downloader(badContract)
	if err != nil {
		t.Fatal(err)
	}
	downloader.Close()

	// corrupt contract and delete its cachedRevision
	badContract = contract
	badContract.LastRevision.NewRevisionNumber--
	badContract.MerkleRoots = nil // delete Merkle roots

	c.mu.Lock()
	delete(c.cachedRevisions, contract.ID)
	c.mu.Unlock()

	// Editor and Downloader should fail with the bad contract
	_, err = c.Editor(badContract)
	if !proto.IsRevisionMismatch(err) {
		t.Fatal("expected revision mismatch, got", err)
	}
	_, err = c.Downloader(badContract)
	if !proto.IsRevisionMismatch(err) {
		t.Fatal("expected revision mismatch, got", err)
	}

	// add cachedRevision
	c.mu.Lock()
	c.cachedRevisions[contract.ID] = cachedRev
	c.mu.Unlock()

	// should be able to upload after loading the cachedRevision
	editor, err = c.Editor(badContract)
	if err != nil {
		t.Fatal(err)
	}
	_, err = editor.Upload(data)
	if err != nil {
		t.Fatal(err)
	}
	editor.Close()
}
Beispiel #21
0
// testValidStorageProofBlocks adds a block with a file contract, and then
// submits a storage proof for that file contract.
func (cst *consensusSetTester) testValidStorageProofBlocks() {
	// COMPATv0.4.0 - Step the block height up past the hardfork amount. This
	// code stops nondeterministic failures when producing storage proofs that
	// is related to buggy old code.
	for cst.cs.dbBlockHeight() <= 10 {
		_, err := cst.miner.AddBlock()
		if err != nil {
			panic(err)
		}
	}

	// Create a file (as a bytes.Buffer) that will be used for the file
	// contract.
	filesize := uint64(4e3)
	file, err := crypto.RandBytes(int(filesize))
	if err != nil {
		panic(err)
	}
	merkleRoot := crypto.MerkleRoot(file)

	// Create a file contract that will be successful.
	validProofDest := randAddress()
	payout := types.NewCurrency64(400e6)
	fc := types.FileContract{
		FileSize:       filesize,
		FileMerkleRoot: merkleRoot,
		WindowStart:    cst.cs.dbBlockHeight() + 1,
		WindowEnd:      cst.cs.dbBlockHeight() + 2,
		Payout:         payout,
		ValidProofOutputs: []types.SiacoinOutput{{
			UnlockHash: validProofDest,
			Value:      types.PostTax(cst.cs.dbBlockHeight(), payout),
		}},
		MissedProofOutputs: []types.SiacoinOutput{{
			UnlockHash: types.UnlockHash{},
			Value:      types.PostTax(cst.cs.dbBlockHeight(), payout),
		}},
	}

	// Submit a transaction with the file contract.
	oldSiafundPool := cst.cs.dbGetSiafundPool()
	txnBuilder := cst.wallet.StartTransaction()
	err = txnBuilder.FundSiacoins(payout)
	if err != nil {
		panic(err)
	}
	fcIndex := txnBuilder.AddFileContract(fc)
	txnSet, err := txnBuilder.Sign(true)
	if err != nil {
		panic(err)
	}
	err = cst.tpool.AcceptTransactionSet(txnSet)
	if err != nil {
		panic(err)
	}
	_, err = cst.miner.AddBlock()
	if err != nil {
		panic(err)
	}

	// Check that the siafund pool was increased by the tax on the payout.
	siafundPool := cst.cs.dbGetSiafundPool()
	if siafundPool.Cmp(oldSiafundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) != 0 {
		panic("siafund pool was not increased correctly")
	}

	// Check that the file contract made it into the database.
	ti := len(txnSet) - 1
	fcid := txnSet[ti].FileContractID(fcIndex)
	_, err = cst.cs.dbGetFileContract(fcid)
	if err != nil {
		panic(err)
	}

	// Create and submit a storage proof for the file contract.
	segmentIndex, err := cst.cs.StorageProofSegment(fcid)
	if err != nil {
		panic(err)
	}
	segment, hashSet := crypto.MerkleProof(file, segmentIndex)
	sp := types.StorageProof{
		ParentID: fcid,
		HashSet:  hashSet,
	}
	copy(sp.Segment[:], segment)
	txnBuilder = cst.wallet.StartTransaction()
	txnBuilder.AddStorageProof(sp)
	txnSet, err = txnBuilder.Sign(true)
	if err != nil {
		panic(err)
	}
	err = cst.tpool.AcceptTransactionSet(txnSet)
	if err != nil {
		panic(err)
	}
	_, err = cst.miner.AddBlock()
	if err != nil {
		panic(err)
	}

	// Check that the file contract has been removed.
	_, err = cst.cs.dbGetFileContract(fcid)
	if err != errNilItem {
		panic("file contract should not exist in the database")
	}

	// Check that the siafund pool has not changed.
	postProofPool := cst.cs.dbGetSiafundPool()
	if postProofPool.Cmp(siafundPool) != 0 {
		panic("siafund pool should not change after submitting a storage proof")
	}

	// Check that a delayed output was created for the valid proof.
	spoid := fcid.StorageProofOutputID(types.ProofValid, 0)
	dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid)
	if err != nil {
		panic(err)
	}
	if dsco.UnlockHash != fc.ValidProofOutputs[0].UnlockHash {
		panic("wrong unlock hash in dsco")
	}
	if dsco.Value.Cmp(fc.ValidProofOutputs[0].Value) != 0 {
		panic("wrong sco value in dsco")
	}
}