Exemplo n.º 1
0
func (l *Log) WritePoints(points []models.Point, fields map[string]*tsdb.MeasurementFields, series []*tsdb.SeriesCreate) error {
	// add everything to the cache, or return an error if we've hit our max memory
	if addedToCache := l.addToCache(points, fields, series, true); !addedToCache {
		return fmt.Errorf("WAL backed up flushing to index, hit max memory")
	}

	// make the write durable if specified
	if !l.SkipDurability {
		// write the points
		pointStrings := make([]string, len(points))
		for i, p := range points {
			pointStrings[i] = p.String()
		}
		data := strings.Join(pointStrings, "\n")
		compressed := snappy.Encode(nil, []byte(data))

		if err := l.writeToLog(pointsEntry, compressed); err != nil {
			return err
		}

		// write the new fields
		if len(fields) > 0 {
			data, err := json.Marshal(fields)
			if err != nil {
				return err
			}
			compressed = snappy.Encode(compressed, data)
			if err := l.writeToLog(fieldsEntry, compressed); err != nil {
				return err
			}
		}

		// write the new series
		if len(series) > 0 {
			data, err := json.Marshal(series)
			if err != nil {
				return err
			}
			compressed = snappy.Encode(compressed, data)
			if err := l.writeToLog(seriesEntry, compressed); err != nil {
				return err
			}
		}
	}

	// usually skipping the cache is only for testing purposes and this was the easiest
	// way to represent the logic (to cache and then immediately flush)
	if l.SkipCache {
		if err := l.flush(idleFlush); err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 2
0
func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
	// Compress the buffer if necessary.
	var b []byte
	if compression == opt.SnappyCompression {
		// Allocate scratch enough for compression and block trailer.
		if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
			w.compressionScratch = make([]byte, n)
		}
		compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
		n := len(compressed)
		b = compressed[:n+blockTrailerLen]
		b[n] = blockTypeSnappyCompression
	} else {
		tmp := buf.Alloc(blockTrailerLen)
		tmp[0] = blockTypeNoCompression
		b = buf.Bytes()
	}

	// Calculate the checksum.
	n := len(b) - 4
	checksum := util.NewCRC(b[:n]).Value()
	binary.LittleEndian.PutUint32(b[n:], checksum)

	// Write the buffer to the file.
	_, err = w.writer.Write(b)
	if err != nil {
		return
	}
	bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
	w.offset += uint64(len(b))
	return
}
Exemplo n.º 3
0
func (p *Protocol) Send(msg proto.Message) error {
	if p.w == nil {
		return ErrNoWriter
	}

	data, err := proto.Marshal(msg)
	if err != nil {
		return err
	}

	if p.compress {
		data = snappy.Encode(nil, data)
	}

	sizeBuf := make([]byte, 4)
	binary.LittleEndian.PutUint32(sizeBuf, uint32(len(data)))
	if _, err := p.w.Write(sizeBuf); err != nil {
		return err
	}

	if _, err := p.w.Write(data); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 4
0
func SendFrame(w io.Writer, data proto.Message) (err error) {
	// marshal response
	pbData := []byte{}
	if data == nil {
		return errors.New("data is nil")
	}

	pbData, err = proto.Marshal(data)
	if err != nil {
		return err
	}

	compressedPb := snappy.Encode(nil, pbData)

	err = sendFrame(w, compressedPb)

	if err != nil {
		return err
	}

	if data == nil {
		return errors.New("data is nil")
	}

	err = proto.Unmarshal(pbData, data)

	return
}
Exemplo n.º 5
0
func (e *Engine) writeSeries(series map[string]*tsdb.Series) error {
	data, err := json.Marshal(series)
	if err != nil {
		return err
	}

	fn := filepath.Join(e.path, SeriesFileExtension+"tmp")
	ff, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)
	if err != nil {
		return err
	}
	_, err = ff.Write(snappy.Encode(nil, data))
	if err != nil {
		return err
	}
	if err := ff.Close(); err != nil {
		return err
	}
	seriesFileName := filepath.Join(e.path, SeriesFileExtension)

	if _, err := os.Stat(seriesFileName); !os.IsNotExist(err) {
		if err := os.Remove(seriesFileName); err != nil && err != os.ErrNotExist {
			return err
		}
	}

	return os.Rename(fn, seriesFileName)
}
Exemplo n.º 6
0
func Fuzz(data []byte) int {
	n, err := snappy.DecodedLen(data)
	if err != nil || n > 1e6 {
		return 0
	}
	if n < 0 {
		panic("negative decoded len")
	}
	dec, err := snappy.Decode(nil, data)
	if err != nil {
		if dec != nil {
			panic("dec is not nil")
		}
		return 0
	}
	if len(dec) != n {
		println(len(dec), n)
		panic("bad decoded len")
	}
	n = snappy.MaxEncodedLen(len(dec))
	enc := snappy.Encode(nil, dec)
	if len(enc) > n {
		panic("bad encoded len")
	}

	dec1, err := snappy.Decode(nil, enc)
	if err != nil {
		panic(err)
	}
	if bytes.Compare(dec, dec1) != 0 {
		panic("not equal")
	}
	return 1
}
Exemplo n.º 7
0
// writeSeriesAndFields will write the compressed fields and series to the meta file. This file persists the data
// in case the server gets shutdown before the WAL has a chance to flush everything to the cache. By default this
// file is flushed on start when bz1 calls LoadMetaDataIndex
func (l *Log) writeSeriesAndFields(fields map[string]*tsdb.MeasurementFields, series []*tsdb.SeriesCreate) error {
	if len(fields) == 0 && len(series) == 0 {
		return nil
	}

	sf := &seriesAndFields{Fields: fields, Series: series}
	b, err := json.Marshal(sf)
	if err != nil {
		return err
	}
	cb := snappy.Encode(nil, b)

	l.mu.Lock()
	defer l.mu.Unlock()

	if _, err := l.metaFile.Write(u64tob(uint64(len(cb)))); err != nil {
		return err
	}

	if _, err := l.metaFile.Write(cb); err != nil {
		return err
	}

	return l.metaFile.Sync()
}
Exemplo n.º 8
0
func TestSnappyCompressor(t *testing.T) {
	c := SnappyCompressor{}
	if c.Name() != "snappy" {
		t.Fatalf("expected name to be 'snappy', got %v", c.Name())
	}

	str := "My Test String"
	//Test Encoding
	expected := snappy.Encode(nil, []byte(str))
	if res, err := c.Encode([]byte(str)); err != nil {
		t.Fatalf("failed to encode '%v' with error %v", str, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected encoded value with the result encoded value.")
	}

	val, err := c.Encode([]byte(str))
	if err != nil {
		t.Fatalf("failed to encode '%v' with error '%v'", str, err)
	}

	//Test Decoding
	if expected, err := snappy.Decode(nil, val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if res, err := c.Decode(val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected decoded value with the result decoded value.")
	}
}
Exemplo n.º 9
0
// Save stores data to a file.
func (d *Data) Save() error {
	// open or create file
	fh, err := os.OpenFile(d.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0655)
	if err != nil {
		return err
	}
	defer fh.Close()

	// encode content in a buffer
	buf := new(bytes.Buffer)
	endata := gob.NewEncoder(buf)
	endata.Encode(d.Store)
	b := buf.Bytes()

	// compress content
	da := snappy.Encode(nil, b)

	// write content to file
	_, err = fh.Write(da)
	if err != nil {
		return err
	}

	return nil
}
Exemplo n.º 10
0
func (b *EncryptBackend) Put(hash string, rawData []byte) (err error) {
	// #blobstash/secretbox\n
	// data hash\n
	// data
	var nonce [24]byte
	//out := make([]byte, len(data) + secretbox.Overhead + 24 + headerSize)
	if err := GenerateNonce(&nonce); err != nil {
		return err
	}
	// First we compress the data with snappy
	data := snappy.Encode(nil, rawData)

	var out bytes.Buffer
	out.WriteString("#blobstash/secretbox\n")
	out.WriteString(fmt.Sprintf("%v\n", hash))
	encData := make([]byte, len(data)+secretbox.Overhead)
	secretbox.Seal(encData[0:0], data, &nonce, b.key)
	out.Write(nonce[:])
	out.Write(encData)
	encHash := fmt.Sprintf("%x", blake2b.Sum256(out.Bytes()))
	b.dest.Put(encHash, out.Bytes())
	b.Lock()
	b.index[hash] = encHash
	defer b.Unlock()
	blobsUploaded.Add(b.dest.String(), 1)
	bytesUploaded.Add(b.dest.String(), int64(len(out.Bytes())))
	return
}
Exemplo n.º 11
0
func (l *WAL) writeToLog(entry WALEntry) (int, error) {
	// encode and compress the entry while we're not locked
	bytes := make([]byte, defaultBufLen)

	b, err := entry.Encode(bytes)
	if err != nil {
		return -1, err
	}

	compressed := snappy.Encode(b, b)

	l.mu.Lock()
	defer l.mu.Unlock()

	// Make sure the log has not been closed
	select {
	case <-l.closing:
		return -1, ErrWALClosed
	default:
	}

	// roll the segment file if needed
	if err := l.rollSegment(); err != nil {
		return -1, fmt.Errorf("error rolling WAL segment: %v", err)
	}

	// write and sync
	if err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil {
		return -1, fmt.Errorf("error writing WAL entry: %v", err)
	}

	l.lastWriteTime = time.Now()

	return l.currentSegmentID, l.currentSegmentWriter.sync()
}
Exemplo n.º 12
0
func (w *WALSegmentWriter) Write(e WALEntry) error {
	bytes := make([]byte, defaultBufLen)

	b, err := e.Encode(bytes)
	if err != nil {
		return err
	}

	compressed := snappy.Encode(b, b)

	w.mu.Lock()
	defer w.mu.Unlock()

	if _, err := w.w.Write([]byte{byte(e.Type())}); err != nil {
		return err
	}

	if _, err = w.w.Write(u32tob(uint32(len(compressed)))); err != nil {
		return err
	}

	if _, err = w.w.Write(compressed); err != nil {
		return err
	}

	// 5 is the 1 byte type + 4 byte uint32 length
	w.size += len(compressed) + 5

	return nil
}
Exemplo n.º 13
0
func compressor(fw *Writer, toCompress <-chan *writerBlock, toWrite chan<- *writerBlock) {
	switch fw.CompressionCodec {
	case CompressionDeflate:
		bb := new(bytes.Buffer)
		comp, _ := flate.NewWriter(bb, flate.DefaultCompression)
		for block := range toCompress {
			_, block.err = comp.Write(block.encoded.Bytes())
			block.err = comp.Close()
			if block.err == nil {
				block.compressed = bb.Bytes()
				toWrite <- block
			}
			bb = new(bytes.Buffer)
			comp.Reset(bb)
		}
	case CompressionNull:
		for block := range toCompress {
			block.compressed = block.encoded.Bytes()
			toWrite <- block
		}
	case CompressionSnappy:
		for block := range toCompress {
			block.compressed = snappy.Encode(block.compressed, block.encoded.Bytes())
			toWrite <- block
		}
	}
	close(toWrite)
}
Exemplo n.º 14
0
func (e *Engine) writeFields(fields map[string]*tsdb.MeasurementFields) error {
	// compress and save everything
	data, err := json.Marshal(fields)
	if err != nil {
		return err
	}

	fn := filepath.Join(e.path, FieldsFileExtension+"tmp")
	ff, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)
	if err != nil {
		return err
	}
	_, err = ff.Write(snappy.Encode(nil, data))
	if err != nil {
		return err
	}
	if err := ff.Close(); err != nil {
		return err
	}
	fieldsFileName := filepath.Join(e.path, FieldsFileExtension)

	if _, err := os.Stat(fieldsFileName); !os.IsNotExist(err) {
		if err := os.Remove(fieldsFileName); err != nil {
			return err
		}
	}

	return os.Rename(fn, fieldsFileName)
}
Exemplo n.º 15
0
// Write a record
func (rw *Writer) WriteRecord(data []byte, flags Flags) error {
	if rw.Err != nil {
		return rw.Err
	}
	flags = flags | rw.Flags
	if flags&NoCompression == 0 {
		data = snappy.Encode(rw.compressBuf, data)
	}
	header := recordHeader{bodyLength: uint32(len(data)), flags: flags}
	var headerBuf [recordHeaderStorageSize]byte
	header.encode(headerBuf[:])
	if size, _ := rw.bytesWriter.Write(headerBuf[:]); size != recordHeaderStorageSize {
		return rw.err(ErrWriteBytes)
	}
	bodyWriter := checksumWriter{writer: rw.bytesWriter, crc: crc32.NewIEEE()}
	if size, _ := bodyWriter.Write(data); size != len(data) {
		return rw.err(ErrWriteBytes)
	}
	var checksumBuf [4]byte
	binary.LittleEndian.PutUint32(checksumBuf[:], bodyWriter.checksum())
	if size, _ := rw.bytesWriter.Write(checksumBuf[:]); size != len(checksumBuf) {
		return rw.err(ErrWriteBytes)
	}
	return nil
}
Exemplo n.º 16
0
func (snappyMessageCompressor) compressData(dst, src []byte) (n int, err error) {
	p := snappy.Encode(dst, src)
	if len(p) > len(dst) {
		err = io.ErrShortBuffer
	}
	n = len(p)
	return
}
Exemplo n.º 17
0
func (e *Engine) writeSeries(tx *bolt.Tx, series map[string]*tsdb.Series) error {
	data, err := json.Marshal(series)
	if err != nil {
		return err
	}

	return tx.Bucket([]byte("meta")).Put([]byte("series"), snappy.Encode(nil, data))
}
Exemplo n.º 18
0
func (l *Log) writeDeleteEntry(d *deleteData) error {
	js, err := json.Marshal(d)
	if err != nil {
		return err
	}
	data := snappy.Encode(nil, js)
	return l.writeToLog(deleteEntry, data)
}
Exemplo n.º 19
0
func (l *internalLevelDBStore) putByKey(key []byte, c Chunk) {
	l.concurrentWriteLimit <- struct{}{}
	data := snappy.Encode(nil, c.Data())
	err := l.db.Put(key, data, nil)
	d.Chk.NoError(err)
	l.putCount++
	l.putBytes += int64(len(data))
	<-l.concurrentWriteLimit
}
Exemplo n.º 20
0
func (e *Engine) writeFields(tx *bolt.Tx, fields map[string]*tsdb.MeasurementFields) error {
	// compress and save everything
	data, err := json.Marshal(fields)
	if err != nil {
		return err
	}

	return tx.Bucket([]byte("meta")).Put([]byte("fields"), snappy.Encode(nil, data))
}
Exemplo n.º 21
0
func block_compressor(in DuplexPipe, block_size, conc_level int) DuplexPipe {

	// This is the output of the generator
	out := DuplexPipe{make(chan Block, conc_level), make(chan Block, conc_level)}
	// Block ready to hold reading
	comp_len := snappy.MaxEncodedLen(block_size)
	for i := 0; i < conc_level; i++ {
		out.Upstream <- Block{make([]byte, comp_len), 0, 0}
	}

	var comp_buf Block

	go func() {

		done := make(chan bool, conc_level)

		for block := range in.Downstream {
			comp_buf = <-out.Upstream
			done <- false

			go func() {

				fmt.Println("Compressing block", block.N, "ID", block.BlockID)

				if block.N == block_size {

					// We are allocating comp_chunk extra here to know length
					// ! Fork snappy to return len(comp_buf.Buf) instead of
					// the the actual slice of comp_buf.Buf
					comp_chunk := snappy.Encode(comp_buf.Buf, block.Buf)

					// this misses the point of having reusable slices... :-(
					comp_buf.N = len(comp_chunk)
					comp_buf.BlockID = block.BlockID

				} else {
					comp_buf.N = block.N
					comp_buf.BlockID = block.BlockID
					copy(comp_buf.Buf[:comp_buf.N], block.Buf)
				}

				in.Upstream <- block
				out.Downstream <- comp_buf

				<-done
			}()
		}
		// Wait for them to finish
		for i := 0; i < conc_level; i++ {
			done <- true
		}
		close(out.Downstream)
	}()

	return out
}
Exemplo n.º 22
0
// Write will write a compressed block of the points to the current segment file. If the segment
// file is larger than the max size, it will roll over to a new file before performing the write.
// This method will also add the points to the in memory cache
func (p *Partition) Write(points []tsdb.Point) error {
	block := make([]byte, 0)
	for _, pp := range points {
		block = append(block, marshalWALEntry(pp.Key(), pp.UnixNano(), pp.Data())...)
	}
	b := snappy.Encode(nil, block)

	if backoff, ok := func() (time.Duration, bool) {
		p.mu.Lock()
		defer p.mu.Unlock()
		// pause writes for a bit if we've hit the size threshold
		if p.memorySize > p.sizeThreshold {
			p.backoffCount += 1
			return time.Millisecond * 20, true
		}

		return 0, false
	}(); ok {
		go p.flushAndCompact(memoryFlush)
		time.Sleep(backoff)
	}
	p.mu.Lock()
	defer p.mu.Unlock()

	// rotate to a new file if we've gone over our limit
	if p.currentSegmentFile == nil || p.currentSegmentSize > p.maxSegmentSize {
		err := p.newSegmentFile()
		if err != nil {
			return err
		}
	}

	if n, err := p.currentSegmentFile.Write(u64tob(uint64(len(b)))); err != nil {
		return err
	} else if n != 8 {
		return fmt.Errorf("expected to write %d bytes but wrote %d", 8, n)
	}

	if n, err := p.currentSegmentFile.Write(b); err != nil {
		return err
	} else if n != len(b) {
		return fmt.Errorf("expected to write %d bytes but wrote %d", len(b), n)
	}

	if err := p.currentSegmentFile.Sync(); err != nil {
		return err
	}

	p.currentSegmentSize += int64(8 + len(b))
	p.lastWriteTime = time.Now()

	for _, pp := range points {
		p.addToCache(pp.Key(), pp.Data(), pp.UnixNano())
	}
	return nil
}
Exemplo n.º 23
0
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
	bytes := make([]byte, 1024<<2)

	b, err := entry.Encode(bytes)
	if err != nil {
		panic(fmt.Sprintf("error encoding: %v", err))
	}

	return entry.Type(), snappy.Encode(b, b)
}
Exemplo n.º 24
0
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) {
	numBytes := 0
	b := new(leveldb.Batch)
	for _, c := range chunks {
		data := snappy.Encode(nil, c.Data())
		numBytes += len(data)
		b.Put(l.toChunkKey(c.Hash()), data)
	}
	l.putBatch(b, numBytes)
	return
}
Exemplo n.º 25
0
func NewSnappyEncodeOp() stream.Operator {
	name := "SnappyEncodeOp"
	generator := func() mapper.Worker {
		fn := func(obj stream.Object, out mapper.Outputer) error {
			compressed := snappy.Encode(nil, obj.([]byte))
			out.Sending(1).Send(compressed)
			return nil
		}
		return mapper.NewWorker(fn, name)
	}
	return mapper.NewClosureOp(generator, nil, name)
}
Exemplo n.º 26
0
// writeMessageSet writes a Message Set into w.
// It returns the number of bytes written and any error.
func writeMessageSet(w io.Writer, messages []*Message, compression Compression) (int, error) {
	if len(messages) == 0 {
		return 0, nil
	}
	// NOTE(caleb): it doesn't appear to be documented, but I observed that the
	// Java client sets the offset of the synthesized message set for a group of
	// compressed messages to be the offset of the last message in the set.
	compressOffset := messages[len(messages)-1].Offset
	switch compression {
	case CompressionGzip:
		var buf bytes.Buffer
		gz := gzip.NewWriter(&buf)
		if _, err := writeMessageSet(gz, messages, CompressionNone); err != nil {
			return 0, err
		}
		if err := gz.Close(); err != nil {
			return 0, err
		}
		messages = []*Message{
			{
				Value:  buf.Bytes(),
				Offset: compressOffset,
			},
		}
	case CompressionSnappy:
		var buf bytes.Buffer
		if _, err := writeMessageSet(&buf, messages, CompressionNone); err != nil {
			return 0, err
		}
		messages = []*Message{
			{
				Value:  snappy.Encode(nil, buf.Bytes()),
				Offset: compressOffset,
			},
		}
	}

	enc := NewEncoder(w)
	totalSize := 0
	for _, message := range messages {
		totalSize += 26 + len(message.Key) + len(message.Value)
		enc.Encode(int64(message.Offset))
		messageSize := 14 + len(message.Key) + len(message.Value)
		enc.Encode(int32(messageSize))
		enc.Encode(ComputeCrc(message, compression))
		enc.Encode(int8(0)) // magic byte
		enc.Encode(int8(compression))
		enc.Encode(message.Key)
		enc.Encode(message.Value)
	}
	return totalSize, enc.Err()
}
Exemplo n.º 27
0
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) {
	d.Chk.True(l.internalLevelDBStore != nil, "Cannot use LevelDBStore after Close().")
	l.versionSetOnce.Do(l.setVersIfUnset)
	numBytes := 0
	b := new(leveldb.Batch)
	for _, c := range chunks {
		data := snappy.Encode(nil, c.Data())
		numBytes += len(data)
		b.Put(l.toChunkKey(c.Hash()), data)
	}
	l.putBatch(b, numBytes)
	return
}
Exemplo n.º 28
0
func (s *hopCipher) encrypt(msg []byte) []byte {
	cmsg := make([]byte, snappy.MaxEncodedLen(len(msg)))
	cmsg = snappy.Encode(cmsg, msg)

	pmsg := PKCS5Padding(cmsg, cipherBlockSize)
	buf := make([]byte, len(pmsg)+cipherBlockSize)

	iv := buf[:cipherBlockSize]
	rand.Read(iv)
	encrypter := _cipher.NewCBCEncrypter(s.block, iv)
	encrypter.CryptBlocks(buf[cipherBlockSize:], pmsg)

	return buf
}
Exemplo n.º 29
0
func writeResponse(w io.Writer, id uint64, serr string, response proto.Message) (err error) {
	// clean response if error
	if serr != "" {
		response = nil
	}

	// marshal response
	pbResponse := []byte{}
	if response != nil {
		pbResponse, err = proto.Marshal(response)
		if err != nil {
			return err
		}
	}

	// compress serialized proto data
	compressedPbResponse := snappy.Encode(nil, pbResponse)

	// generate header
	header := &wire.ResponseHeader{
		Id:                          proto.Uint64(id),
		Error:                       proto.String(serr),
		RawResponseLen:              proto.Uint32(uint32(len(pbResponse))),
		SnappyCompressedResponseLen: proto.Uint32(uint32(len(compressedPbResponse))),
		Checksum:                    proto.Uint32(crc32.ChecksumIEEE(compressedPbResponse)),
	}

	// check header size
	pbHeader, err := proto.Marshal(header)
	if err != err {
		return
	}
	if uint32(len(pbHeader)) > wire.Default_Const_MaxHeaderLen {
		return fmt.Errorf("protorpc.writeResponse: header larger than max_header_len: %d.",
			len(pbHeader),
		)
	}

	// send header (more)
	if err = sendFrame(w, pbHeader); err != nil {
		return
	}

	// send body (end)
	if err = sendFrame(w, compressedPbResponse); err != nil {
		return
	}

	return nil
}
Exemplo n.º 30
0
func compressor(fw *Writer, toCompress <-chan *writerBlock, toWrite chan<- *writerBlock) {
	switch fw.CompressionCodec {
	case CompressionNull:
		for block := range toCompress {
			block.compressed = block.encoded.Bytes()
			toWrite <- block
		}

	case CompressionDeflate:
		bb := new(bytes.Buffer)
		cw, _ := flate.NewWriter(bb, flate.DefaultCompression)

		for block := range toCompress {
			bb = new(bytes.Buffer)
			cw.Reset(bb)

			if _, block.err = cw.Write(block.encoded.Bytes()); block.err != nil {
				continue
			}

			if block.err = cw.Close(); block.err != nil {
				continue
			}

			block.compressed = bb.Bytes()
			toWrite <- block
		}

	case CompressionSnappy:
		var bb *bytes.Buffer

		var dst []byte

		for block := range toCompress {
			checksum := crc32.ChecksumIEEE(block.encoded.Bytes())

			dst = snappy.Encode(nil, block.encoded.Bytes())
			bb = bytes.NewBuffer(dst)
			if block.err = binary.Write(bb, binary.BigEndian, checksum); block.err != nil {
				continue
			}

			block.compressed = bb.Bytes()
			toWrite <- block
		}

	}
	close(toWrite)
}