// NewDeviceID generates a new device ID from the raw bytes of a certificate func NewDeviceID(rawCert []byte) DeviceID { var n DeviceID hf := sha256.New() hf.Write(rawCert) hf.Sum(n[:0]) return n }
// Verify returns nil or an error describing the mismatch between the block // list and actual reader contents func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error { hf := sha256.New() // A 32k buffer is used for copying into the hash function. buf := make([]byte, 32<<10) for i, block := range blocks { lr := &io.LimitedReader{R: r, N: int64(blocksize)} _, err := io.CopyBuffer(hf, lr, buf) if err != nil { return err } hash := hf.Sum(nil) hf.Reset() if !bytes.Equal(hash, block.Hash) { return fmt.Errorf("hash mismatch %x != %x for block %d", hash, block.Hash, i) } } // We should have reached the end now bs := make([]byte, 1) n, err := r.Read(bs) if n != 0 || err != io.EOF { return fmt.Errorf("file continues past end of blocks") } return nil }
func cpuBenchOnce(duration time.Duration) float64 { chunkSize := 100 * 1 << 10 h := sha256.New() bs := make([]byte, chunkSize) rand.Reader.Read(bs) t0 := time.Now() b := 0 for time.Since(t0) < duration { h.Write(bs) b += chunkSize } h.Sum(nil) d := time.Since(t0) return float64(int(float64(b)/d.Seconds()/(1<<20)*100)) / 100 }
func VerifyBuffer(buf []byte, block protocol.BlockInfo) ([]byte, error) { if len(buf) != int(block.Size) { return nil, fmt.Errorf("length mismatch %d != %d", len(buf), block.Size) } hf := sha256.New() _, err := hf.Write(buf) if err != nil { return nil, err } hash := hf.Sum(nil) if !bytes.Equal(hash, block.Hash) { return hash, fmt.Errorf("hash mismatch %x != %x", hash, block.Hash) } return hash, nil }
// Blocks returns the blockwise hash of the reader. func Blocks(r io.Reader, blocksize int, sizehint int64, counter Counter) ([]protocol.BlockInfo, error) { hf := sha256.New() hashLength := hf.Size() whf := weakhash.NewHash(blocksize) var blocks []protocol.BlockInfo var hashes, thisHash []byte if sizehint >= 0 { // Allocate contiguous blocks for the BlockInfo structures and their // hashes once and for all, and stick to the specified size. r = io.LimitReader(r, sizehint) numBlocks := int(sizehint / int64(blocksize)) blocks = make([]protocol.BlockInfo, 0, numBlocks) hashes = make([]byte, 0, hashLength*numBlocks) } // A 32k buffer is used for copying into the hash function. buf := make([]byte, 32<<10) var offset int64 for { lr := io.LimitReader(r, int64(blocksize)) n, err := io.CopyBuffer(hf, io.TeeReader(lr, whf), buf) if err != nil { return nil, err } if n == 0 { break } if counter != nil { counter.Update(int64(n)) } // Carve out a hash-sized chunk of "hashes" to store the hash for this // block. hashes = hf.Sum(hashes) thisHash, hashes = hashes[:hashLength], hashes[hashLength:] b := protocol.BlockInfo{ Size: int32(n), Offset: offset, Hash: thisHash, WeakHash: whf.Sum32(), } blocks = append(blocks, b) offset += int64(n) hf.Reset() whf.Reset() } if len(blocks) == 0 { // Empty file blocks = append(blocks, protocol.BlockInfo{ Offset: 0, Size: 0, Hash: SHA256OfNothing, }) } return blocks, nil }