func (r *Renter) newHostUploader(settings modules.HostSettings, filesize uint64, duration types.BlockHeight, masterKey crypto.TwofishKey) (*hostUploader, error) { hu := &hostUploader{ settings: settings, masterKey: masterKey, tree: crypto.NewTree(), renter: r, } // TODO: maybe do this later? err := hu.negotiateContract(filesize, duration) if err != nil { return nil, err } // initiate the revision loop hu.conn, err = net.DialTimeout("tcp", string(hu.settings.IPAddress), 15*time.Second) if err != nil { return nil, err } if err := encoding.WriteObject(hu.conn, modules.RPCRevise); err != nil { return nil, err } if err := encoding.WriteObject(hu.conn, hu.contract.ID); err != nil { return nil, err } return hu, nil }
// newHostUploader initiates the contract revision process with a host, and // returns a hostUploader, which satisfies the Uploader interface. func (hdb *HostDB) newHostUploader(hc hostContract) (*hostUploader, error) { hdb.mu.RLock() settings, ok := hdb.allHosts[hc.IP] // or activeHosts? hdb.mu.RUnlock() if !ok { return nil, errors.New("no record of that host") } // TODO: check for excessive price again? // initiate revision loop conn, err := net.DialTimeout("tcp", string(hc.IP), 15*time.Second) if err != nil { return nil, err } if err := encoding.WriteObject(conn, modules.RPCRevise); err != nil { return nil, err } if err := encoding.WriteObject(conn, hc.ID); err != nil { return nil, err } // TODO: some sort of acceptance would be good here, so that we know the // uploader will actually work. Maybe send the Merkle root? hu := &hostUploader{ contract: hc, price: settings.Price, tree: crypto.NewTree(), conn: conn, hdb: hdb, } return hu, nil }
// UnlockHash calculates the root hash of a Merkle tree of the // UnlockConditions object. The leaves of this tree are formed by taking the // hash of the timelock, the hash of the public keys (one leaf each), and the // hash of the number of signatures. The keys are put in the middle because // Timelock and SignaturesRequired are both low entropy fields; they can be // protected by having random public keys next to them. func (uc UnlockConditions) UnlockHash() UnlockHash { tree := crypto.NewTree() tree.PushObject(uc.Timelock) for i := range uc.PublicKeys { tree.PushObject(uc.PublicKeys[i]) } tree.PushObject(uc.SignaturesRequired) return UnlockHash(tree.Root()) }
// MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle // tree are composed of the Timestamp, the miner outputs (one leaf per // payout), and the transactions (one leaf per transaction). func (b Block) MerkleRoot() crypto.Hash { tree := crypto.NewTree() for _, payout := range b.MinerPayouts { tree.PushObject(payout) } for _, txn := range b.Transactions { tree.PushObject(txn) } return tree.Root() }
// consensusChecksum grabs a checksum of the consensus set by pushing all of // the elements in sorted order into a merkle tree and taking the root. All // consensus sets with the same current block should have identical consensus // checksums. func consensusChecksum(tx *bolt.Tx) crypto.Hash { // Create a checksum tree. tree := crypto.NewTree() // For all of the constant buckets, push every key and every value. Buckets // are sorted in byte-order, therefore this operation is deterministic. consensusSetBuckets := []*bolt.Bucket{ tx.Bucket(BlockPath), tx.Bucket(SiacoinOutputs), tx.Bucket(FileContracts), tx.Bucket(SiafundOutputs), tx.Bucket(SiafundPool), } for i := range consensusSetBuckets { err := consensusSetBuckets[i].ForEach(func(k, v []byte) error { tree.Push(k) tree.Push(v) return nil }) if err != nil { manageErr(tx, err) } } // Iterate through all the buckets looking for buckets prefixed with // prefixDSCO or prefixFCEX. Buckets are presented in byte-sorted order by // name. err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { // If the bucket is not a delayed siacoin output bucket or a file // contract expiration bucket, skip. if !bytes.HasPrefix(name, prefixDSCO) && !bytes.HasPrefix(name, prefixFCEX) { return nil } // The bucket is a prefixed bucket - add all elements to the tree. return b.ForEach(func(k, v []byte) error { tree.Push(k) tree.Push(v) return nil }) }) if err != nil { manageErr(tx, err) } return tree.Root() }
// newHostUploader negotiates an initial file contract with the specified host // and returns a hostUploader, which satisfies the uploader interface. func (r *Renter) newHostUploader(settings modules.HostSettings, filesize uint64, duration types.BlockHeight, masterKey crypto.TwofishKey) (*hostUploader, error) { // reject hosts that are too expensive if settings.Price.Cmp(maxPrice) > 0 { return nil, errTooExpensive } hu := &hostUploader{ settings: settings, masterKey: masterKey, tree: crypto.NewTree(), renter: r, } // get an address to use for negotiation // TODO: use more than one shared address if r.cachedAddress == (types.UnlockHash{}) { uc, err := r.wallet.NextAddress() if err != nil { return nil, err } r.cachedAddress = uc.UnlockHash() } // TODO: check for existing contract? err := hu.negotiateContract(filesize, duration, r.cachedAddress) if err != nil { return nil, err } // if negotiation was sucessful, clear the cached address r.cachedAddress = types.UnlockHash{} // initiate the revision loop hu.conn, err = net.DialTimeout("tcp", string(hu.settings.IPAddress), 15*time.Second) if err != nil { return nil, err } if err := encoding.WriteObject(hu.conn, modules.RPCRevise); err != nil { return nil, err } if err := encoding.WriteObject(hu.conn, hu.contract.ID); err != nil { return nil, err } return hu, nil }
// managedRPCRevise is an RPC that allows a renter to revise a file contract. It will // read new revisions in a loop until the renter sends a termination signal. func (h *Host) managedRPCRevise(conn net.Conn) error { // read ID of contract to be revised var fcid types.FileContractID if err := encoding.ReadObject(conn, &fcid, crypto.HashSize); err != nil { return errors.New("couldn't read contract ID: " + err.Error()) } // remove conn deadline while we wait for lock and rebuild the Merkle tree. err := conn.SetDeadline(time.Now().Add(15 * time.Minute)) if err != nil { return err } h.mu.RLock() obligation, exists := h.obligationsByID[fcid] h.mu.RUnlock() if !exists { return errors.New("no record of that contract") } // need to protect against two simultaneous revisions to the same // contract; this can cause inconsistency and data loss, making storage // proofs impossible // // TODO: DOS vector - the host has locked the obligation even though the // renter has not proven themselves to be the owner of the file contract. obligation.mu.Lock() defer obligation.mu.Unlock() // open the file in append mode file, err := os.OpenFile(obligation.Path, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660) if err != nil { return err } // rebuild current Merkle tree tree := crypto.NewTree() err = tree.ReadSegments(file) if err != nil { // Error does not need to be checked when closing the file, already // there have been issues related to the filesystem. _ = file.Close() return err } // accept new revisions in a loop. The final good transaction will be // submitted to the blockchain. revisionErr := func() error { for { // allow 5 minutes between revisions err := conn.SetDeadline(time.Now().Add(5 * time.Minute)) if err != nil { return err } // read proposed revision var revTxn types.Transaction if err = encoding.ReadObject(conn, &revTxn, types.BlockSizeLimit); err != nil { return errors.New("couldn't read revision: " + err.Error()) } // an empty transaction indicates completion if revTxn.ID() == (types.Transaction{}).ID() { return nil } // allow 5 minutes for each revision err = conn.SetDeadline(time.Now().Add(5 * time.Minute)) if err != nil { return err } // check revision against original file contract h.mu.RLock() err = h.considerRevision(revTxn, obligation) h.mu.RUnlock() if err != nil { // There is nothing that can be done if there is an error while // writing to a connection. _ = encoding.WriteObject(conn, err.Error()) return err } // indicate acceptance if err := encoding.WriteObject(conn, modules.AcceptResponse); err != nil { return errors.New("couldn't write acceptance: " + err.Error()) } // read piece // TODO: simultaneously read into tree and file rev := revTxn.FileContractRevisions[0] piece := make([]byte, rev.NewFileSize-obligation.fileSize()) _, err = io.ReadFull(conn, piece) if err != nil { return errors.New("couldn't read piece data: " + err.Error()) } // verify Merkle root err = tree.ReadSegments(bytes.NewReader(piece)) if err != nil { return errors.New("couldn't verify Merkle root: " + err.Error()) } if tree.Root() != rev.NewFileMerkleRoot { return errors.New("revision has bad Merkle root") } // manually sign the transaction revTxn.TransactionSignatures = append(revTxn.TransactionSignatures, types.TransactionSignature{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 1, // host key is always second }) encodedSig, err := crypto.SignHash(revTxn.SigHash(1), h.secretKey) if err != nil { return err } revTxn.TransactionSignatures[1].Signature = encodedSig[:] // append piece to file if _, err := file.Write(piece); err != nil { return errors.New("couldn't write new data to file: " + err.Error()) } // save updated obligation to disk h.mu.Lock() h.reviseObligation(revTxn) h.mu.Unlock() // send the signed transaction - this must be the last thing that happens. if err := encoding.WriteObject(conn, revTxn); err != nil { return errors.New("couldn't write signed revision transaction: " + err.Error()) } } }() err = file.Close() if err != nil { return err } err = h.tpool.AcceptTransactionSet([]types.Transaction{obligation.RevisionTransaction}) if err != nil { h.log.Println("WARN: transaction pool rejected revision transaction: " + err.Error()) } return revisionErr }
// consensusSetHash returns the Merkle root of the current state of consensus. func (cs *ConsensusSet) consensusSetHash() crypto.Hash { // Check is too slow to be done on a full node. if build.Release == "standard" { return crypto.Hash{} } // Items of interest: // 1. genesis block // 3. current height // 4. current target // 5. current depth // 6. current path + diffs // (7) earliest allowed timestamp of next block // 8. unspent siacoin outputs, sorted by id. // 9. open file contracts, sorted by id. // 10. unspent siafund outputs, sorted by id. // 11. delayed siacoin outputs, sorted by height, then sorted by id. // 12. siafund pool // Create a slice of hashes representing all items of interest. tree := crypto.NewTree() tree.PushObject(cs.blockRoot.Block) tree.PushObject(cs.height()) tree.PushObject(cs.currentProcessedBlock().ChildTarget) tree.PushObject(cs.currentProcessedBlock().Depth) // tree.PushObject(cs.earliestChildTimestamp(cs.currentProcessedBlock())) // Add all the blocks in the current path TODO: along with their diffs. for i := 0; i < int(cs.db.pathHeight()); i++ { tree.PushObject(cs.db.getPath(types.BlockHeight(i))) } // Add all of the siacoin outputs, sorted by id. var openSiacoinOutputs crypto.HashSlice cs.db.forEachSiacoinOutputs(func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { openSiacoinOutputs = append(openSiacoinOutputs, crypto.Hash(scoid)) }) sort.Sort(openSiacoinOutputs) for _, id := range openSiacoinOutputs { sco := cs.db.getSiacoinOutputs(types.SiacoinOutputID(id)) tree.PushObject(id) tree.PushObject(sco) } // Add all of the file contracts, sorted by id. var openFileContracts crypto.HashSlice cs.db.forEachFileContracts(func(fcid types.FileContractID, fc types.FileContract) { openFileContracts = append(openFileContracts, crypto.Hash(fcid)) }) sort.Sort(openFileContracts) for _, id := range openFileContracts { // Sanity Check - file contract should exist. fc := cs.db.getFileContracts(types.FileContractID(id)) tree.PushObject(id) tree.PushObject(fc) } // Add all of the siafund outputs, sorted by id. var openSiafundOutputs crypto.HashSlice cs.db.forEachSiafundOutputs(func(sfoid types.SiafundOutputID, sfo types.SiafundOutput) { openSiafundOutputs = append(openSiafundOutputs, crypto.Hash(sfoid)) }) sort.Sort(openSiafundOutputs) for _, id := range openSiafundOutputs { sco := cs.db.getSiafundOutputs(types.SiafundOutputID(id)) tree.PushObject(id) tree.PushObject(sco) } // Get the set of delayed siacoin outputs, sorted by maturity height then // sorted by id and add them. for i := cs.height() + 1; i <= cs.height()+types.MaturityDelay; i++ { var delayedSiacoinOutputs crypto.HashSlice if cs.db.inDelayedSiacoinOutputs(i) { cs.db.forEachDelayedSiacoinOutputsHeight(i, func(id types.SiacoinOutputID, output types.SiacoinOutput) { delayedSiacoinOutputs = append(delayedSiacoinOutputs, crypto.Hash(id)) }) } sort.Sort(delayedSiacoinOutputs) for _, delayedSiacoinOutputID := range delayedSiacoinOutputs { delayedSiacoinOutput := cs.db.getDelayedSiacoinOutputs(i, types.SiacoinOutputID(delayedSiacoinOutputID)) tree.PushObject(delayedSiacoinOutput) tree.PushObject(delayedSiacoinOutputID) } } // Add the siafund pool var siafundPool types.Currency err := cs.db.Update(func(tx *bolt.Tx) error { siafundPool = getSiafundPool(tx) return nil }) if err != nil { panic(err) } tree.PushObject(siafundPool) return tree.Root() }
// consensusSetHash returns the Merkle root of the current state of consensus. func (cs *State) consensusSetHash() crypto.Hash { // Items of interest: // 1. genesis block // 3. current height // 4. current target // 5. current depth // 6. earliest allowed timestamp of next block // 7. current path, ordered by height. // 8. unspent siacoin outputs, sorted by id. // 9. open file contracts, sorted by id. // 10. unspent siafund outputs, sorted by id. // 11. delayed siacoin outputs, sorted by height, then sorted by id. // TODO: Add the diff set ? // Create a slice of hashes representing all items of interest. tree := crypto.NewTree() tree.PushObject(cs.blockRoot.block) tree.PushObject(cs.height()) tree.PushObject(cs.currentBlockNode().childTarget) tree.PushObject(cs.currentBlockNode().depth) tree.PushObject(cs.currentBlockNode().earliestChildTimestamp()) // Add all the blocks in the current path. for i := 0; i < len(cs.currentPath); i++ { tree.PushObject(cs.currentPath[types.BlockHeight(i)]) } // Add all of the siacoin outputs, sorted by id. var openSiacoinOutputs crypto.HashSlice for siacoinOutputID, _ := range cs.siacoinOutputs { openSiacoinOutputs = append(openSiacoinOutputs, crypto.Hash(siacoinOutputID)) } sort.Sort(openSiacoinOutputs) for _, id := range openSiacoinOutputs { sco, exists := cs.siacoinOutputs[types.SiacoinOutputID(id)] if !exists { panic("trying to push nonexistent siacoin output") } tree.PushObject(id) tree.PushObject(sco) } // Add all of the file contracts, sorted by id. var openFileContracts crypto.HashSlice for fileContractID, _ := range cs.fileContracts { openFileContracts = append(openFileContracts, crypto.Hash(fileContractID)) } sort.Sort(openFileContracts) for _, id := range openFileContracts { // Sanity Check - file contract should exist. fc, exists := cs.fileContracts[types.FileContractID(id)] if !exists { panic("trying to push a nonexistent file contract") } tree.PushObject(id) tree.PushObject(fc) } // Add all of the siafund outputs, sorted by id. var openSiafundOutputs crypto.HashSlice for siafundOutputID, _ := range cs.siafundOutputs { openSiafundOutputs = append(openSiafundOutputs, crypto.Hash(siafundOutputID)) } sort.Sort(openSiafundOutputs) for _, id := range openSiafundOutputs { sco, exists := cs.siafundOutputs[types.SiafundOutputID(id)] if !exists { panic("trying to push nonexistent siafund output") } tree.PushObject(id) tree.PushObject(sco) } // Get the set of delayed siacoin outputs, sorted by maturity height then // sorted by id and add them. for i := cs.height() + 1; i <= cs.height()+types.MaturityDelay; i++ { var delayedSiacoinOutputs crypto.HashSlice for id := range cs.delayedSiacoinOutputs[i] { delayedSiacoinOutputs = append(delayedSiacoinOutputs, crypto.Hash(id)) } sort.Sort(delayedSiacoinOutputs) for _, delayedSiacoinOutputID := range delayedSiacoinOutputs { delayedSiacoinOutput, exists := cs.delayedSiacoinOutputs[i][types.SiacoinOutputID(delayedSiacoinOutputID)] if !exists { panic("trying to push nonexistent delayed siacoin output") } tree.PushObject(delayedSiacoinOutput) tree.PushObject(delayedSiacoinOutputID) } } return tree.Root() }
// rpcRevise is an RPC that allows a renter to revise a file contract. It will // read new revisions in a loop until the renter sends a termination signal. func (h *Host) rpcRevise(conn net.Conn) error { // read ID of contract to be revised var fcid types.FileContractID if err := encoding.ReadObject(conn, &fcid, crypto.HashSize); err != nil { return err } lockID := h.mu.RLock() obligation, exists := h.obligationsByID[fcid] h.mu.RUnlock(lockID) if !exists { return errors.New("no record of that contract") } // need to protect against two simultaneous revisions to the same // contract; this can cause inconsistency and data loss, making storage // proofs impossible obligation.mu.Lock() defer obligation.mu.Unlock() // open the file in append mode file, err := os.OpenFile(obligation.Path, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660) if err != nil { return err } defer func() { // if a newly-created file was not updated, remove it if stat, _ := file.Stat(); stat.Size() == 0 { os.Remove(obligation.Path) } file.Close() }() // rebuild current Merkle tree tree := crypto.NewTree() buf := make([]byte, crypto.SegmentSize) for { _, err := io.ReadFull(file, buf) if err == io.EOF { break } else if err != nil && err != io.ErrUnexpectedEOF { return err } tree.Push(buf) } // accept new revisions in a loop. The final good transaction will be // submitted to the blockchain. var finalTxn types.Transaction defer func() { h.tpool.AcceptTransactionSet([]types.Transaction{finalTxn}) }() for { // read proposed revision var revTxn types.Transaction if err := encoding.ReadObject(conn, &revTxn, types.BlockSizeLimit); err != nil { return err } // an empty transaction indicates completion if revTxn.ID() == (types.Transaction{}).ID() { break } // check revision against original file contract lockID = h.mu.RLock() err := h.considerRevision(revTxn, obligation) h.mu.RUnlock(lockID) if err != nil { encoding.WriteObject(conn, err.Error()) continue // don't terminate loop; subsequent revisions may be okay } // indicate acceptance if err := encoding.WriteObject(conn, modules.AcceptResponse); err != nil { return err } // read piece // TODO: simultaneously read into tree? rev := revTxn.FileContractRevisions[0] piece := make([]byte, rev.NewFileSize-obligation.FileContract.FileSize) _, err = io.ReadFull(conn, piece) if err != nil { return err } // verify Merkle root r := bytes.NewReader(piece) for { _, err := io.ReadFull(r, buf) if err == io.EOF { break } else if err != nil && err != io.ErrUnexpectedEOF { return err } tree.Push(buf) } if tree.Root() != rev.NewFileMerkleRoot { return errors.New("revision has bad Merkle root") } // manually sign the transaction revTxn.TransactionSignatures = append(revTxn.TransactionSignatures, types.TransactionSignature{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 1, // host key is always second }) encodedSig, err := crypto.SignHash(revTxn.SigHash(1), h.secretKey) if err != nil { return err } revTxn.TransactionSignatures[1].Signature = encodedSig[:] // send the signed transaction if err := encoding.WriteObject(conn, revTxn); err != nil { return err } // append piece to file if _, err := file.Write(piece); err != nil { return err } // save updated obligation to disk lockID = h.mu.Lock() h.spaceRemaining -= int64(len(piece)) obligation.FileContract.RevisionNumber = rev.NewRevisionNumber obligation.FileContract.FileSize = rev.NewFileSize obligation.FileContract.FileMerkleRoot = rev.NewFileMerkleRoot h.obligationsByID[obligation.ID] = obligation heightObligations := h.obligationsByHeight[obligation.FileContract.WindowStart+StorageProofReorgDepth] for i := range heightObligations { if heightObligations[i].ID == obligation.ID { heightObligations[i] = obligation } } h.save() h.mu.Unlock(lockID) finalTxn = revTxn } return nil }
// rpcRevise is an RPC that allows a renter to revise a file contract. It will // read new revisions in a loop until the renter sends a termination signal. func (h *Host) rpcRevise(conn net.Conn) error { // read ID of contract to be revised var fcid types.FileContractID if err := encoding.ReadObject(conn, &fcid, crypto.HashSize); err != nil { return errors.New("couldn't read contract ID: " + err.Error()) } // remove conn deadline while we wait for lock and rebuild the Merkle tree conn.SetDeadline(time.Time{}) h.mu.RLock() obligation, exists := h.obligationsByID[fcid] h.mu.RUnlock() if !exists { return errors.New("no record of that contract") } // need to protect against two simultaneous revisions to the same // contract; this can cause inconsistency and data loss, making storage // proofs impossible obligation.mu.Lock() defer obligation.mu.Unlock() // open the file in append mode file, err := os.OpenFile(obligation.Path, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660) if err != nil { return err } // rebuild current Merkle tree tree := crypto.NewTree() err = tree.ReadSegments(file) if err != nil { file.Close() return err } // accept new revisions in a loop. The final good transaction will be // submitted to the blockchain. revisionErr := func() error { for { // allow 2 minutes between revisions conn.SetDeadline(time.Now().Add(2 * time.Minute)) // read proposed revision var revTxn types.Transaction if err := encoding.ReadObject(conn, &revTxn, types.BlockSizeLimit); err != nil { return errors.New("couldn't read revision: " + err.Error()) } // an empty transaction indicates completion if revTxn.ID() == (types.Transaction{}).ID() { return nil } // allow 5 minutes for each revision conn.SetDeadline(time.Now().Add(5 * time.Minute)) // check revision against original file contract h.mu.RLock() err := h.considerRevision(revTxn, *obligation) h.mu.RUnlock() if err != nil { encoding.WriteObject(conn, err.Error()) continue // don't terminate loop; subsequent revisions may be okay } // indicate acceptance if err := encoding.WriteObject(conn, modules.AcceptResponse); err != nil { return errors.New("couldn't write acceptance: " + err.Error()) } // read piece // TODO: simultaneously read into tree and file rev := revTxn.FileContractRevisions[0] last := obligation.LastRevisionTxn.FileContractRevisions[0] piece := make([]byte, rev.NewFileSize-last.NewFileSize) _, err = io.ReadFull(conn, piece) if err != nil { return errors.New("couldn't read piece data: " + err.Error()) } // verify Merkle root err = tree.ReadSegments(bytes.NewReader(piece)) if err != nil { return errors.New("couldn't verify Merkle root: " + err.Error()) } if tree.Root() != rev.NewFileMerkleRoot { return errors.New("revision has bad Merkle root") } // manually sign the transaction revTxn.TransactionSignatures = append(revTxn.TransactionSignatures, types.TransactionSignature{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 1, // host key is always second }) encodedSig, err := crypto.SignHash(revTxn.SigHash(1), h.secretKey) if err != nil { return err } revTxn.TransactionSignatures[1].Signature = encodedSig[:] // send the signed transaction if err := encoding.WriteObject(conn, revTxn); err != nil { return errors.New("couldn't write signed revision transaction: " + err.Error()) } // append piece to file if _, err := file.Write(piece); err != nil { return errors.New("couldn't write new data to file: " + err.Error()) } // save updated obligation to disk h.mu.Lock() obligation.LastRevisionTxn = revTxn h.spaceRemaining -= int64(len(piece)) h.save() h.mu.Unlock() } }() file.Close() // if a newly-created file was not updated, remove it if obligation.LastRevisionTxn.FileContractRevisions[0].NewRevisionNumber == 0 { os.Remove(obligation.Path) return revisionErr } err = h.tpool.AcceptTransactionSet([]types.Transaction{obligation.LastRevisionTxn}) if err != nil { h.log.Println("WARN: transaction pool rejected revision transaction: " + err.Error()) } return revisionErr }