Example #1
0
func TestPut(t *testing.T) {
	s := NewStore("test", &nullStreamReader{}, nil)

	testData := []byte{0x01, 0x02, 0x03}

	err := s.Put(testData)
	if err != nil {
		t.Errorf("Failed to put %v", err)
	}

	fname := *s.currentFilename
	defer os.Remove(fname)

	s.Close()

	f, err := os.Open(fname)
	if err != nil {
		t.Errorf("Failed to open")
		return
	}

	df := snappy.NewReader(f)
	data, err := ioutil.ReadAll(df)
	if err != nil {
		t.Errorf("Failed to read %v", err)
	} else {
		if bytes.Compare(data, testData) != 0 {
			t.Errorf("Data mismatch")
		}
	}
}
func TestBuildWriteValueRequest(t *testing.T) {
	assert := assert.New(t)
	input1, input2 := "abc", "def"
	chnx := []chunks.Chunk{
		chunks.NewChunk([]byte(input1)),
		chunks.NewChunk([]byte(input2)),
	}

	hints := map[hash.Hash]struct{}{
		hash.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
		hash.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
	}
	compressed := buildWriteValueRequest(serializeChunks(chnx, assert), hints)
	gr := snappy.NewReader(compressed)

	count := 0
	for hint := range deserializeHints(gr) {
		count++
		_, present := hints[hint]
		assert.True(present)
	}
	assert.Equal(len(hints), count)

	chunkChan := make(chan *chunks.Chunk, 16)
	go chunks.DeserializeToChan(gr, chunkChan)
	for c := range chunkChan {
		assert.Equal(chnx[0].Hash(), c.Hash())
		chnx = chnx[1:]
	}
	assert.Empty(chnx)
}
Example #3
0
func newCompStream(conn net.Conn) *compStream {
	c := new(compStream)
	c.conn = conn
	c.w = snappy.NewBufferedWriter(conn)
	c.r = snappy.NewReader(conn)
	return c
}
Example #4
0
func main() {
	http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
		reqBuf, err := ioutil.ReadAll(snappy.NewReader(r.Body))
		if err != nil {
			http.Error(w, err.Error(), http.StatusBadRequest)
			return
		}

		var req remote.WriteRequest
		if err := proto.Unmarshal(reqBuf, &req); err != nil {
			http.Error(w, err.Error(), http.StatusBadRequest)
			return
		}

		for _, ts := range req.Timeseries {
			m := make(model.Metric, len(ts.Labels))
			for _, l := range ts.Labels {
				m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
			}
			fmt.Println(m)

			for _, s := range ts.Samples {
				fmt.Printf("  %f %d\n", s.Value, s.TimestampMs)
			}
		}
	})

	http.ListenAndServe(":1234", nil)
}
Example #5
0
func (suite *LevelDBPutCacheSuite) extractChunks(hashes hashSet) <-chan *chunks.Chunk {
	buf := &bytes.Buffer{}
	err := suite.cache.ExtractChunks(hashes, buf)
	suite.NoError(err)

	chunkChan := make(chan *chunks.Chunk)
	go chunks.DeserializeToChan(snappy.NewReader(buf), chunkChan)
	return chunkChan
}
Example #6
0
func runRemote() {
	if *destinationFlag == "" {
		log.Println("Must specify destination in receive mode")
		os.Exit(1)
	}
	if *timestampFlag == "" {
		log.Println("Must specify timestamp in receive mode")
		os.Exit(1)
	}
	var snapshotsLoc SnapshotsLoc
	snapshotsLoc.Directory = *destinationFlag
	snapshotsLoc.Limits = Limits{
		Hourly:  *hourlyFlag,
		Daily:   *dailyFlag,
		Weekly:  *weeklyFlag,
		Monthly: *monthlyFlag}

	lock, err := NewDirLock(snapshotsLoc.Directory)
	if err != nil {
		log.Println(err.Error())
		return
	}
	defer lock.Unlock()
	timestamp := Timestamp(*timestampFlag)
	_, err = parseTimestamp(timestamp)
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	if verbosity > 2 {
		log.Println("runRemote: ReceiveAndCleanUp")
	}
	var runner CmdRunner
	if *noCompressionFlag {
		runner = snapshotsLoc.ReceiveAndCleanUp(os.Stdin, timestamp)
	} else {
		rd := snappy.NewReader(os.Stdin)
		runner = snapshotsLoc.ReceiveAndCleanUp(rd, timestamp)
	}
	err = <-runner.Started
	if verbosity > 2 {
		log.Println("runRemote: ReceiveAndCleanUp Started")
	}
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	err = <-runner.Done
	if verbosity > 2 {
		log.Println("runRemote: ReceiveAndCleanUp Done")
	}
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
}
Example #7
0
func do(isDecompress bool, filename, suffix string, isToStdout bool) (percentage, speed float64, err error) {
	var (
		input   io.Reader
		output  io.Writer
		outName string = "-"
	)
	if filename == "-" {
		input = os.Stdin
		output = os.Stdout
	} else {
		fi, err := os.Open(filename)
		if err != nil {
			return 0, 0, err
		}
		input = fi
		defer fi.Close()

		if isToStdout {
			output = os.Stdout
		} else {
			if isDecompress {
				if !strings.HasSuffix(filename, suffix) {
					err = errors.New(fmt.Sprintf("file: %s not has suffix %s", filename, suffix))
					return 0, 0, err
				}
				outName = filename[:(len(filename) - len(suffix))]
			} else {
				outName = filename + suffix
			}
			fo, err := os.Create(outName)
			if err != nil {
				return 0, 0, err
			}
			output = fo
			defer fo.Close()
		}
	}
	start := time.Now()
	rwc := NewRWCounter(input, output)
	if isDecompress {
		_, err = io.Copy(rwc, snappy.NewReader(rwc))
	} else {
		_, err = io.Copy(snappy.NewWriter(rwc), rwc)
	}
	useTime := time.Since(start).Seconds()
	if isDecompress {
		percentage = 1 - float64(rwc.CountR())/float64(rwc.CountW())
		speed = float64(rwc.CountW()) / 1024.0 / 1024.0 / useTime
	} else {
		percentage = 1 - float64(rwc.CountW())/float64(rwc.CountR())
		speed = float64(rwc.CountR()) / 1024.0 / 1024.0 / useTime
	}
	return
}
Example #8
0
func resBodyReader(res *http.Response) (reader io.ReadCloser) {
	reader = res.Body
	if strings.Contains(res.Header.Get("Content-Encoding"), "gzip") {
		gr, err := gzip.NewReader(reader)
		d.Chk.NoError(err)
		reader = gr
	} else if strings.Contains(res.Header.Get("Content-Encoding"), "x-snappy-framed") {
		sr := snappy.NewReader(reader)
		reader = ioutil.NopCloser(sr)
	}
	return
}
func bodyReader(req *http.Request) (reader io.ReadCloser) {
	reader = req.Body
	if strings.Contains(req.Header.Get("Content-Encoding"), "gzip") {
		gr, err := gzip.NewReader(reader)
		d.PanicIfError(err)
		reader = gr
	} else if strings.Contains(req.Header.Get("Content-Encoding"), "x-snappy-framed") {
		sr := snappy.NewReader(reader)
		reader = ioutil.NopCloser(sr)
	}
	return
}
Example #10
0
File: rcf.go Project: reusee/rcf
func (f *File) decode(bs []byte, target interface{}) (err error) {
	var r io.Reader
	if f.compressMethod == _COMPRESS_SNAPPY {
		r = snappy.NewReader(bytes.NewReader(bs))
	} else {
		r = bytes.NewReader(bs)
	}
	if f.codec == _CODEC_GOB {
		return gob.NewDecoder(r).Decode(target)
	} else if f.codec == _CODEC_MSGPACK {
		return msgpack.NewDecoder(r).Decode(target)
	}
	return fmt.Errorf("not reachable")
}
Example #11
0
// Get can be called from any goroutine to retrieve the chunk referenced by hash. If the chunk is not present, Get returns the empty Chunk.
func (p *orderedChunkCache) Get(hash hash.Hash) chunks.Chunk {
	// Don't use defer p.mu.RUnlock() here, because I want reading from orderedChunks NOT to be guarded by the lock. LevelDB handles its own goroutine-safety.
	p.mu.RLock()
	dbKey, ok := p.chunkIndex[hash]
	p.mu.RUnlock()

	if !ok {
		return chunks.EmptyChunk
	}
	data, err := p.orderedChunks.Get(dbKey, nil)
	d.Chk.NoError(err)
	reader := snappy.NewReader(bytes.NewReader(data))
	chunkChan := make(chan *chunks.Chunk)
	go chunks.DeserializeToChan(reader, chunkChan)
	return *(<-chunkChan)
}
Example #12
0
File: conn.go Project: nsqio/go-nsq
func (c *Conn) upgradeSnappy() error {
	conn := net.Conn(c.conn)
	if c.tlsConn != nil {
		conn = c.tlsConn
	}
	c.r = snappy.NewReader(conn)
	c.w = snappy.NewWriter(conn)
	frameType, data, err := ReadUnpackedResponse(c)
	if err != nil {
		return err
	}
	if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) {
		return errors.New("invalid response from Snappy upgrade")
	}
	return nil
}
Example #13
0
func NewSnappyResponseReader(resp *http.Response) io.ReadCloser {
	var reader io.Reader
	reader = resp.Body
	if resp.Header.Get("Content-Encoding") == "snappy" {
		if isr := snappyReaderPool.Get(); isr != nil {
			sr := isr.(*snappy.Reader)
			sr.Reset(reader)
			reader = sr
		} else {
			// Creates a new one if the pool is empty
			reader = snappy.NewReader(reader)
		}
	}
	return &snappyResponseReader{
		resp:   resp,
		Reader: reader,
	}
}
Example #14
0
// Decompress a snappy archive.
func unsnap(src *os.File) (dst *os.File, err error) {
	srcInfo, err := src.Stat()
	if err != nil {
		return
	}
	srcName := srcInfo.Name()

	// Make sure existing files are not overwritten.
	dstName := strings.TrimSuffix(srcName, ".sz")

	getUnusedFilename(&dstName)
	print(concat(srcName, "  >  ", dstName))

	// Create the destination file.
	dst, err = create(dstName, srcInfo.Mode())
	if err != nil {
		return
	}
	// Remember to re-open the uncompressed file after it has been written.
	defer func() {
		if err == nil {
			dst, err = os.Open(dstName)
		}
	}()

	pt := &passthru{
		Reader:    src,
		nExpected: uint64(srcInfo.Size()),
	}
	defer pt.Reset()

	szr := snappy.NewReader(pt)
	defer szr.Reset(nil)

	defer print()
	_, err = io.Copy(dst, szr)
	return
}
Example #15
0
func NewArchiveReader(ir io.Reader) (or Reader) {
	sr := snappy.NewReader(ir)
	mr := msgp.NewReader(sr)

	return &ArchiveReader{mr}
}
Example #16
0
func (d *snappyDecompressor) Do(r io.Reader) ([]byte, error) {
	sr := snappy.NewReader(r)
	return ioutil.ReadAll(sr)
}
Example #17
0
func FuzzFraming(data []byte) int {
	r := snappy.NewReader(bytes.NewReader(data))
	buf := make([]byte, 0, 1023)
	dec := make([]byte, 0, 1024)
	for i := 0; ; i++ {
		x := i
		if x > cap(buf) {
			x = cap(buf)
		}
		n, err := r.Read(buf[:x])
		if n != 0 {
			dec = append(dec, buf[:n]...)
		}
		if err == io.EOF {
			break
		}
		if err != nil {
			return 0
		}
	}
	r.Reset(bytes.NewReader(data))
	dec1, err := ioutil.ReadAll(r)
	if err != nil {
		panic(err)
	}
	if bytes.Compare(dec, dec1) != 0 {
		fmt.Printf("dec0: %q\n", dec)
		fmt.Printf("dec1: %q\n", dec1)
		panic("not equal")
	}

	bufw := new(bytes.Buffer)
	w := snappy.NewBufferedWriter(bufw)
	for i := 0; len(dec1) > 0; i++ {
		x := i
		if x > len(dec1) {
			x = len(dec1)
		}
		n, err := w.Write(dec1[:x])
		if n != x {
			panic("short write")
		}
		if err != nil {
			panic(err)
		}
		dec1 = dec1[x:]
		if (i % 2) != 0 {
			w.Flush()
		}
	}
	w.Close()

	dec1 = append([]byte{}, dec...)
	bufw2 := new(bytes.Buffer)
	w2 := snappy.NewWriter(bufw2)
	for i := 2; len(dec1) > 0; i++ {
		x := i
		if x > len(dec1) {
			x = len(dec1)
		}
		n, err := w2.Write(dec1[:x])
		if n != x {
			panic("short write")
		}
		if err != nil {
			panic(err)
		}
		dec1 = dec1[x:]
		if (i % 2) != 0 {
			w2.Flush()
		}
	}
	w2.Close()

	r2 := snappy.NewReader(bufw)
	dec2, err := ioutil.ReadAll(r2)
	if err != nil {
		panic(err)
	}
	if bytes.Compare(dec, dec2) != 0 {
		panic("not equal")
	}

	r3 := snappy.NewReader(bufw2)
	dec3, err := ioutil.ReadAll(r3)
	if err != nil {
		panic(err)
	}
	if bytes.Compare(dec, dec3) != 0 {
		panic("not equal")
	}

	return 1
}
Example #18
0
func progd_reverse(ar cmdoptS) {

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, metadata reconstruction was ignored:" + err.Error())
		}

		cmd := exec.Command("par2", "r", "mdpp.par2", "-v", "--", "md")
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction data compute was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, metadata reconstruction data compute was ignored(or failed):" + err.Error())
		}
	}

	//Open metadata leveldb
	dbi, err := bolt.Open(ar.in_dir+"/md", 0600, nil)

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	tx, err := dbi.Begin(false)
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	defer tx.Rollback()
	db := tx.Bucket([]byte("Ketv1"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	ndb := db.Get([]byte("packagesum"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	var nd int64

	missing_file := make([]string, 0, 25)
	all_file := make([]string, 0, 25)

	binary.Read(bytes.NewBuffer(ndb), binary.LittleEndian, nd)
	var cfn int64
	for cfn <= nd {
		cnnn := fmt.Sprintf(ar.in_dir+"/df%X", cfn)
		all_file = append(all_file, fmt.Sprintf("df%X", cfn))

		if _, err := os.Stat(cnnn); err != nil {
			if ar.parrate == 0 {
				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))
			} else {

				//touch the missing file so that par2 will try to recover this
				cfnd, err := os.Create(cnnn)

				if err != nil {
					fmt.Println(err.Error())
					os.Exit(-1)
				}

				cfnd.Close()

				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))

			}
		}
		cfn++
	}

	if len(missing_file) != 0 {
		if ar.parrate == 0 {
			fmt.Println("%d file missing", len(missing_file))

			for cf := range missing_file {
				fmt.Println(cf)
			}

			fmt.Println("Failed to reverse operate as there is file missing.")
			os.Exit(-1)

		} else {
			fmt.Println("%d file missing, but reconstruction by par2 underway.")

			for cf := range missing_file {
				fmt.Println(cf)
			}
		}
	}

	data_reconstruction_unsuccessful := true

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, data reconstruction was ignored:" + err.Error())
		}

		cmdargs := []string{"r", "mdpp.par2", "-v", "--"}

		cmdargs = append(cmdargs, all_file...)

		cmd := exec.Command("par2", cmdargs...)
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, data reconstruction was ignored(or failed):" + err.Error())
		} else {
			data_reconstruction_unsuccessful = false
		}
	}

	if ar.parrate != 0 && data_reconstruction_unsuccessful {
		fmt.Println("operation failed: unable to reconstruct.")
		fmt.Println("If data were correct, remove parrate might do.")

		for cf := range missing_file {
			os.Remove(fmt.Sprint("%s/%s", ar.in_dir, cf))
		}

		os.Exit(-1)
	}

	//now we do the actual job

	nonce := db.Get([]byte("nonce"))
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//calc key

	keyhasher := sha3.NewShake256()

	keyhasher.Write(nonce)
	keyhasher.Write([]byte(ar.secret_key))

	xchachakey := make([]byte, 32)
	keyhasher.Read(xchachakey)

	poly1305key := make([]byte, 32)
	keyhasher.Read(poly1305key)

	//set up stream

	var LimitedSizeReadFromi LimitedSizeReadFrom

	LimitedSizeReadFromi.InitNow()

	LimitedSizeReadFromi.TargetPatten = ar.in_dir + "/df%X"

	cryptos, err := chacha20.NewXChaCha(xchachakey, nonce)

	HashWriter := sha3.NewShake256()

	Tread := io.TeeReader(LimitedSizeReadFromi, HashWriter)

	DataReader := NewDecryptedReader(Tread, cryptos)

	DeCompressedStream := snappy.NewReader(DataReader)

	TarStream := tar.NewReader(DeCompressedStream)

	for {
		hdr, err := TarStream.Next()
		if err == io.EOF {
			// end of tar archive
			break
		}
		if err != nil {
			log.Fatalln(err)
		}
		filenamex := hdr.Name
		if !IsPathAllowed(hdr.Name) {
			filenamex = url.QueryEscape(hdr.Name)
		}

		dirc := filepath.Dir(ar.out_dir + "/" + filenamex)
		os.MkdirAll(dirc, 0700)

		cfhd, err := os.Create(ar.out_dir + "/" + filenamex)

		if err != nil {
			log.Fatalln(err)
		}

		_, err = io.Copy(cfhd, TarStream)

		if err != nil {
			log.Fatalln(err)
		}

		cfhd.Close()

	}

	LimitedSizeReadFromi.Finialize()

	FileHash := make([]byte, 64)
	HashWriter.Read(FileHash)
	fmt.Printf("Hash: %x\n", FileHash)

	var poly1305sum [16]byte
	var poly1305sum_key [32]byte
	poly1305sums := db.Get([]byte("poly1305sum"))

	copy(poly1305sum[:], poly1305sums)
	copy(poly1305sum_key[:], poly1305key)

	iscorrect := poly1305.Verify(&poly1305sum, FileHash, &poly1305sum_key)

	dbi.Close()

	if iscorrect == true {
		fmt.Println("Correct File data")
		os.Exit(0)
	} else {
		fmt.Println("File data is't match!")
		os.Exit(-2)
	}

}
Example #19
0
func runLoadFile() {
	if *destinationFlag == "" {
		log.Println("Must specify destination in loadFile mode")
		os.Exit(1)
	}
	var snapshotsLoc SnapshotsLoc
	snapshotsLoc.Directory = *destinationFlag
	snapshotsLoc.Limits = Limits{
		Hourly:  *hourlyFlag,
		Daily:   *dailyFlag,
		Weekly:  *weeklyFlag,
		Monthly: *monthlyFlag}

	lock, err := NewDirLock(snapshotsLoc.Directory)
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	defer lock.Unlock()
	fileName := *loadFileFlag
	baseName := path.Base(fileName)
	var timestampStr string
	var compressed bool
	if strings.HasSuffix(baseName, ".snap.snpy") {
		timestampStr = strings.TrimSuffix(baseName, ".snap.snpy")
		compressed = true
	} else if strings.HasSuffix(baseName, ".snap") {
		timestampStr = strings.TrimSuffix(baseName, ".snap")
		compressed = false
	} else {
		log.Printf("Unrecognized file type for %s", baseName)
		os.Exit(1)
	}
	f, err := os.Open(fileName)
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	defer f.Close()

	timestamp := Timestamp(timestampStr)
	var runner CmdRunner
	if compressed {
		cf := snappy.NewReader(f)
		runner = snapshotsLoc.ReceiveSnapshot(cf, timestamp)
	} else {
		runner = snapshotsLoc.ReceiveSnapshot(f, timestamp)
	}
	err = <-runner.Started
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	err = <-runner.Done
	if err != nil {
		log.Println(err.Error())
		os.Exit(1)
	}
	if *pinnedFlag {
		err = snapshotsLoc.PinTimestamp(timestamp)
		if err != nil {
			log.Println(err.Error())
			os.Exit(1)
		}
	}
}
Example #20
0
// Iterator implements part of the Interface interface.
func (m *mergeSorter) Iterator() (iter Iterator, err error) {
	if m.finalized {
		return nil, ErrAlreadyFinalized
	}
	m.finalized = true // signal that further operations should fail

	it := &mergeIterator{workDir: m.workDir, marshaler: m.opts.Marshaler}

	if len(m.shards) == 0 {
		// Fast path for a single, in-memory shard
		it.buffer, m.buffer = m.buffer, nil
		sortutil.Sort(m.opts.Lesser, it.buffer)
		return it, nil
	}

	// This is a heap storing the head of each shard.
	merger := &sortutil.ByLesser{
		Lesser: &mergeElementLesser{Lesser: m.opts.Lesser},
	}
	it.merger = merger

	defer func() {
		// Try to cleanup on errors
		if err != nil {
			if cErr := it.Close(); cErr != nil {
				log.Printf("WARNING: error closing Iterator after error: %v", cErr)
			}
		}
	}()

	if len(m.buffer) != 0 {
		// To make the merging algorithm simpler, dump the last shard to disk.
		if err := m.dumpShard(); err != nil {
			m.buffer = nil
			return nil, fmt.Errorf("error dumping final shard: %v", err)
		}
	}
	m.buffer = nil

	// Initialize the merger heap by reading the first element of each shard.
	for _, shard := range m.shards {
		f, err := os.OpenFile(shard, os.O_RDONLY, shardFileMode)
		if err != nil {
			return nil, fmt.Errorf("error opening shard %q: %v", shard, err)
		}

		r := io.Reader(f)
		if m.opts.CompressShards {
			r = snappy.NewReader(r)
		}

		rd := delimited.NewReader(bufio.NewReaderSize(r, m.opts.IOBufferSize))
		first, err := rd.Next()
		if err != nil {
			f.Close()
			return nil, fmt.Errorf("error reading beginning of shard %q: %v", shard, err)
		}
		el, err := m.opts.Marshaler.Unmarshal(first)
		if err != nil {
			f.Close()
			return nil, fmt.Errorf("error unmarshaling beginning of shard %q: %v", shard, err)
		}

		heap.Push(merger, &mergeElement{el: el, rd: rd, f: f})
	}

	return it, nil
}
Example #21
0
// Read implements part of the Interface interface.
func (m *mergeSorter) Read(f func(i interface{}) error) (err error) {
	if m.finalized {
		return ErrAlreadyFinalized
	}
	m.finalized = true // signal that further operations should fail

	// Ensure that the working directory is always cleaned up.
	defer func() {
		cleanupErr := os.RemoveAll(m.workDir)
		if err == nil {
			err = cleanupErr
		} else {
			log.Println("WARNING: error removing temporary directory:", m.workDir)
		}
	}()

	if len(m.shards) == 0 {
		// Fast path for a single, in-memory shard
		defer func() { m.buffer = nil }()
		sortutil.Sort(m.opts.Lesser, m.buffer)
		for len(m.buffer) > 0 {
			if err := f(m.buffer[0]); err != nil {
				return err
			}
			m.buffer = m.buffer[1:]
		}
		return nil
	}

	if len(m.buffer) != 0 {
		// To make the merging algorithm simpler, dump the last shard to disk.
		if err := m.dumpShard(); err != nil {
			m.buffer = nil
			return fmt.Errorf("error dumping final shard: %v", err)
		}
	}
	m.buffer = nil

	// This is a heap storing the head of each shard.
	merger := &sortutil.ByLesser{
		Lesser: &mergeElementLesser{Lesser: m.opts.Lesser},
	}

	defer func() {
		// Try to cleanup on errors
		for merger.Len() != 0 {
			x := heap.Pop(merger).(*mergeElement)
			_ = x.f.Close() // ignore errors (file is only open for reading)
		}
	}()

	// Initialize the merger heap by reading the first element of each shard.
	for _, shard := range m.shards {
		f, err := os.OpenFile(shard, os.O_RDONLY, shardFileMode)
		if err != nil {
			return fmt.Errorf("error opening shard %q: %v", shard, err)
		}

		r := io.Reader(f)
		if m.opts.CompressShards {
			r = snappy.NewReader(r)
		}

		rd := delimited.NewReader(bufio.NewReaderSize(r, m.opts.IOBufferSize))
		first, err := rd.Next()
		if err != nil {
			f.Close()
			return fmt.Errorf("error reading beginning of shard %q: %v", shard, err)
		}
		el, err := m.opts.Marshaler.Unmarshal(first)
		if err != nil {
			f.Close()
			return fmt.Errorf("error unmarshaling beginning of shard %q: %v", shard, err)
		}

		heap.Push(merger, &mergeElement{el: el, rd: rd, f: f})
	}

	// While the merger heap is non-empty:
	//   el := pop the head of the heap
	//   pass it to the user-specific function
	//   push the next element el.rd to the merger heap
	for merger.Len() != 0 {
		x := heap.Pop(merger).(*mergeElement)

		// Give the value to the user-supplied function
		if err := f(x.el); err != nil {
			return err
		}

		// Read and parse the next value on the same shard
		rec, err := x.rd.Next()
		if err != nil {
			_ = x.f.Close()           // ignore errors (file is only open for reading)
			_ = os.Remove(x.f.Name()) // ignore errors (os.RemoveAll used in defer)
			if err == io.EOF {
				continue
			} else {
				return fmt.Errorf("error reading shard: %v", err)
			}
		}
		next, err := m.opts.Marshaler.Unmarshal(rec)
		if err != nil {
			return fmt.Errorf("error unmarshaling element: %v", err)
		}

		// Reuse mergeElement, push it back onto the merger heap with the next value
		x.el = next
		heap.Push(merger, x)
	}

	return nil
}