func (vs *VolumeServer) getVolumeRawDataHandler(w http.ResponseWriter, r *http.Request) {
	v, e := vs.getVolume("volume", r)
	if v == nil {
		http.Error(w, e.Error(), http.StatusBadRequest)
		return
	}

	if origin, err := strconv.ParseBool(r.FormValue("origin")); err == nil && origin {
		http.ServeFile(w, r, v.FileName()+".dat")
		return
	}

	cr, e := v.GetVolumeCleanReader()
	if e != nil {
		http.Error(w, fmt.Sprintf("Get volume clean reader: %v", e), http.StatusInternalServerError)
		return
	}
	totalSize, e := cr.Size()
	if e != nil {
		http.Error(w, fmt.Sprintf("Get volume size: %v", e), http.StatusInternalServerError)
		return
	}
	w.Header().Set("Accept-Ranges", "bytes")
	w.Header().Set("Content-Disposition", fmt.Sprintf(`filename="%d.dat.lz4"`, v.Id))

	rangeReq := r.Header.Get("Range")
	if rangeReq == "" {
		w.Header().Set("X-Content-Length", strconv.FormatInt(totalSize, 10))
		w.Header().Set("Content-Encoding", "lz4")
		lz4w := lz4.NewWriter(w)
		if _, e = io.Copy(lz4w, cr); e != nil {
			glog.V(4).Infoln("response write error:", e)
		}
		lz4w.Close()
		return
	}
	ranges, e := parseRange(rangeReq, totalSize)
	if e != nil {
		http.Error(w, e.Error(), http.StatusRequestedRangeNotSatisfiable)
		return
	}
	if len(ranges) != 1 {
		http.Error(w, "Only support one range", http.StatusNotImplemented)
		return
	}
	ra := ranges[0]
	if _, e := cr.Seek(ra.start, 0); e != nil {
		http.Error(w, e.Error(), http.StatusInternalServerError)
		return
	}
	w.Header().Set("X-Content-Length", strconv.FormatInt(ra.length, 10))
	w.Header().Set("Content-Range", ra.contentRange(totalSize))
	w.Header().Set("Content-Encoding", "lz4")
	w.WriteHeader(http.StatusPartialContent)
	lz4w := lz4.NewWriter(w)
	if _, e = io.CopyN(lz4w, cr, ra.length); e != nil {
		glog.V(2).Infoln("response write error:", e)
	}
	lz4w.Close()
}
Esempio n. 2
0
func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
	lzwriter := lz4.NewWriter(output)
	if compressionLevel > gzip.DefaultCompression {
		lzwriter.Header.HighCompression = true
	}
	return lzwriter, nil
}
Esempio n. 3
0
// lz4.Reader fuzz function
func Fuzz(data []byte) int {
	// uncompress some data
	d, err := ioutil.ReadAll(lz4.NewReader(bytes.NewReader(data)))
	if err != nil {
		return 0
	}

	// got valid compressed data
	// compress the uncompressed data
	// and compare with the original input
	buf := bytes.NewBuffer(nil)
	zw := lz4.NewWriter(buf)
	n, err := zw.Write(d)
	if err != nil {
		panic(err)
	}
	if n != len(d) {
		panic("short write")
	}
	err = zw.Close()
	if err != nil {
		panic(err)
	}

	// uncompress the newly compressed data
	ud, err := ioutil.ReadAll(lz4.NewReader(buf))
	if err != nil {
		panic(err)
	}
	if bytes.Compare(d, ud) != 0 {
		panic("not equal")
	}

	return 1
}
Esempio n. 4
0
func TestIsLZ4(t *testing.T) {
	var buf bytes.Buffer
	lw := lz4.NewWriter(&buf)
	n, err := lw.Write(testVal)
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	if n != 452 {
		t.Errorf("Expected 452 bytes to be written; %d were", n)
	}
	lw.Close()
	r := bytes.NewReader(buf.Bytes())
	ok, err := IsLZ4(r)
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	if !ok {
		t.Error("Expected ok to be true, got false")
	}
	format, err := GetFormat(r)
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	if format != LZ4 {
		t.Errorf("Expected format to be LZ4 got %s", format)
	}
}
Esempio n. 5
0
func (_ Test) CompressLZ4(data []byte) []byte {
	in := bytes.Buffer{}

	w := lz4.NewWriter(&in)
	w.Write(data)
	w.Close()
	return in.Bytes()
}
Esempio n. 6
0
// TestFrame compresses and decompresses LZ4 streams with various input data and options.
func TestFrame(t *testing.T) {
	for _, tdata := range testDataItems {
		data := tdata.data
		// test various options
		for _, headerItem := range testHeaderItems {
			tag := tdata.label + ": " + headerItem.label
			rw := bytes.NewBuffer(nil)

			// Set all options to non default values and compress
			w := lz4.NewWriter(rw)
			w.Header = headerItem.header

			n, err := w.Write(data)
			if err != nil {
				t.Errorf("%s: Write(): unexpected error: %v", tag, err)
				t.FailNow()
			}
			if n != len(data) {
				t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n)
				t.FailNow()
			}
			if err = w.Close(); err != nil {
				t.Errorf("%s: Close(): unexpected error: %v", tag, err)
				t.FailNow()
			}

			// Decompress
			r := lz4.NewReader(rw)
			n, err = r.Read(nil)
			if err != nil {
				t.Errorf("%s: Read(): unexpected error: %v", tag, err)
				t.FailNow()
			}
			if n != 0 {
				t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n)
			}

			buf := make([]byte, len(data))
			n, err = r.Read(buf)
			if err != nil && err != io.EOF {
				t.Errorf("%s: Read(): unexpected error: %v", tag, err)
				t.FailNow()
			}
			if n != len(data) {
				t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n)
			}
			buf = buf[:n]
			if !bytes.Equal(buf, data) {
				t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data))
				t.FailNow()
			}

			compareHeaders(w.Header, r.Header, t)
		}
	}
}
Esempio n. 7
0
File: c.go Progetto: mohae/peu
// CompressLZ4 compresses using lz4 compression. Bytes read is returned along
// with any non io.EOF error that may have occurred.
func CompressLZ4(r io.Reader, w io.Writer) (int64, error) {
	// create the lz4 writer
	lzw := lz4.NewWriter(w)
	n, err := io.Copy(lzw, r)
	if err != nil {
		// errors get counted and aggregated
		return n, err
	}
	return n, nil
}
Esempio n. 8
0
func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) {
	cw := lz4.NewWriter(c.w)
	//	cw.Header.HighCompression = true
	cw.Header.NoChecksum = true
	b.ResetTimer()

	_, err := io.Copy(cw, c.r)
	if err != nil {
		b.Fatal(err)
	}
	cw.Close()
	c.w.Sync()
}
Esempio n. 9
0
func TestSkippable(t *testing.T) {
	w := lz4.NewWriter(nil)
	r := lz4.NewReader(nil)

	skippable := make([]byte, 1<<20)
	binary.LittleEndian.PutUint32(skippable, lz4.FrameSkipMagic)
	binary.LittleEndian.PutUint32(skippable[4:], uint32(len(skippable)-8))

	buf := make([]byte, len(lorem))

	tag := "skippable first"
	zbuf := bytes.NewBuffer(skippable)
	w.Reset(zbuf)
	w.Write(lorem)
	w.Close()

	r.Reset(zbuf)
	if _, err := r.Read(buf); err != nil {
		t.Errorf("%s: unexpected error: %s", tag, err)
		t.FailNow()
	}

	tag = "skippable last"
	zbuf = bytes.NewBuffer(nil)
	w.Reset(zbuf)
	w.Write(lorem)
	w.Close()
	zbuf.Write(skippable)

	r.Reset(zbuf)
	if _, err := r.Read(buf); err != nil {
		t.Errorf("%s: unexpected error: %s", tag, err)
		t.FailNow()
	}

	tag = "skippable middle"
	zbuf = bytes.NewBuffer(nil)
	w.Reset(zbuf)
	w.Write(lorem)
	zbuf.Write(skippable)
	w.Write(lorem)
	w.Close()

	r.Reset(zbuf)
	if _, err := r.Read(buf); err != nil {
		t.Errorf("%s: unexpected error: %s", tag, err)
		t.FailNow()
	}

}
Esempio n. 10
0
// TestNoWrite compresses without any call to Write() (empty frame).
// It does so checking all possible headers.
func TestNoWrite(t *testing.T) {
	// that is 2*2*2*2*2*2^4 = 512 headers!
	seed := map[string][]interface{}{
		"BlockDependency": {true},
		"BlockChecksum":   {true},
		"NoChecksum":      {true},
		"Size":            {999},
		// "Dict":            {true},
		// Enabling this substantially increase the testing time.
		// As this test is not really required it is disabled.
		// "HighCompression": {true},
	}
	for _, bms := range lz4.BlockMaxSizeItems {
		seed["BlockMaxSize"] = append(seed["BlockMaxSize"], bms)
	}
	testHeaderItems := buildHeaders(seed)

	for _, h := range testHeaderItems {
		rw := bytes.NewBuffer(nil)

		w := lz4.NewWriter(rw)
		w.Header = h.header
		if err := w.Close(); err != nil {
			t.Errorf("Close(): unexpected error: %v", err)
			t.FailNow()
		}

		r := lz4.NewReader(rw)
		n, err := r.Read(nil)
		if err != nil {
			t.Errorf("Read(): unexpected error: %v", err)
			t.FailNow()
		}
		if n != 0 {
			t.Errorf("expected 0 bytes read, got %d", n)
			t.FailNow()
		}

		buf := make([]byte, 16)
		n, err = r.Read(buf)
		if err != nil && err != io.EOF {
			t.Errorf("Read(): unexpected error: %v", err)
			t.FailNow()
		}
		if n != 0 {
			t.Errorf("expected 0 bytes read, got %d", n)
			t.FailNow()
		}
	}
}
Esempio n. 11
0
// TestReset tests that the Reset() method resets the header on the Reader and Writer.
func TestReset(t *testing.T) {
	h := lz4.Header{
		BlockDependency: true,
		BlockChecksum:   true,
		NoChecksum:      true,
		BlockMaxSize:    123,
		Size:            999,
		// Dict:            true,
		// DictID:          555,
	}
	dh := lz4.Header{}

	w := lz4.NewWriter(nil)
	w.Header = h
	w.Reset(nil)
	compareHeaders(w.Header, dh, t)

	r := lz4.NewReader(nil)
	r.Header = h
	r.Reset(nil)
	compareHeaders(r.Header, dh, t)
}
Esempio n. 12
0
// TestCopy will use io.Copy and avoid using Reader.WriteTo() and Writer.ReadFrom().
func TestCopy(t *testing.T) {
	w := lz4.NewWriter(nil)
	r := lz4.NewReader(nil)
	for _, tdata := range testDataItems {
		data := tdata.data

		// test various options
		for _, headerItem := range testHeaderItems {
			tag := "io.Copy: " + tdata.label + ": " + headerItem.label
			dbuf := &testBuffer{bytes.NewBuffer(data)}

			zbuf := bytes.NewBuffer(nil)
			w.Reset(zbuf)
			w.Header = headerItem.header
			if _, err := io.Copy(w, dbuf); err != nil {
				t.Errorf("%s: unexpected error: %s", tag, err)
				t.FailNow()
			}

			if err := w.Close(); err != nil {
				t.Errorf("%s: unexpected error: %s", tag, err)
				t.FailNow()
			}

			buf := &testBuffer{bytes.NewBuffer(nil)}
			r.Reset(zbuf)
			if _, err := io.Copy(buf, r); err != nil {
				t.Errorf("%s: unexpected error: %s", tag, err)
				t.FailNow()
			}

			if !bytes.Equal(buf.Bytes(), data) {
				t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
				t.FailNow()
			}
		}
	}
}
Esempio n. 13
0
func cpflate(dst, src io.ReadWriteCloser) {
	w := lz4.NewWriter(dst)

	errc := make(chan error)
	rbuf := make(chan []byte)

	go func() {
		for {
			buf := make([]byte, 32*512)
			rlen, err := src.Read(buf)
			if err != nil {
				errc <- err
			}
			rbuf <- buf[0:rlen]
		}
	}()

loop:
	for {
		select {
		case <-time.Tick(time.Millisecond * 16):
			w.Flush()
		case buf := <-rbuf:
			_, err := w.Write(buf)
			if err != nil {
				break loop
			}
			w.Flush()

		case <-errc:

			break loop
		}

	}
}
Esempio n. 14
0
func main() {
	// Process command line arguments
	var (
		blockMaxSizeDefault = 4 << 20
		flagStdout          = flag.Bool("c", false, "output to stdout")
		flagDecompress      = flag.Bool("d", false, "decompress flag")
		flagBlockMaxSize    = flag.Int("B", blockMaxSizeDefault, "block max size [64Kb,256Kb,1Mb,4Mb]")
		flagBlockDependency = flag.Bool("BD", false, "enable block dependency")
		flagBlockChecksum   = flag.Bool("BX", false, "enable block checksum")
		flagStreamChecksum  = flag.Bool("Sx", false, "disable stream checksum")
		flagHighCompression = flag.Bool("9", false, "enabled high compression")
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage:\n\t%s [arg] [input]...\n\tNo input means [de]compress stdin to stdout\n\n", os.Args[0])
		flag.PrintDefaults()
	}
	flag.Parse()

	// Use all CPUs
	runtime.GOMAXPROCS(runtime.NumCPU())

	zr := lz4.NewReader(nil)
	zw := lz4.NewWriter(nil)
	zh := lz4.Header{
		BlockDependency: *flagBlockDependency,
		BlockChecksum:   *flagBlockChecksum,
		BlockMaxSize:    *flagBlockMaxSize,
		NoChecksum:      *flagStreamChecksum,
		HighCompression: *flagHighCompression,
	}

	worker := func(in io.Reader, out io.Writer) {
		if *flagDecompress {
			zr.Reset(in)
			if _, err := io.Copy(out, zr); err != nil {
				log.Fatalf("Error while decompressing input: %v", err)
			}
		} else {
			zw.Reset(out)
			zw.Header = zh
			if _, err := io.Copy(zw, in); err != nil {
				log.Fatalf("Error while compressing input: %v", err)
			}
		}
	}

	// No input means [de]compress stdin to stdout
	if len(flag.Args()) == 0 {
		worker(os.Stdin, os.Stdout)
		os.Exit(0)
	}

	// Compress or decompress all input files
	for _, inputFileName := range flag.Args() {
		outputFileName := path.Clean(inputFileName)

		if !*flagStdout {
			if *flagDecompress {
				outputFileName = strings.TrimSuffix(outputFileName, lz4.Extension)
				if outputFileName == inputFileName {
					log.Fatalf("Invalid output file name: same as input: %s", inputFileName)
				}
			} else {
				outputFileName += lz4.Extension
			}
		}

		inputFile, err := os.Open(inputFileName)
		if err != nil {
			log.Fatalf("Error while opening input: %v", err)
		}

		outputFile := os.Stdout
		if !*flagStdout {
			outputFile, err = os.Create(outputFileName)
			if err != nil {
				log.Fatalf("Error while opening output: %v", err)
			}
		}
		worker(inputFile, outputFile)

		inputFile.Close()
		if !*flagStdout {
			outputFile.Close()
		}
	}
}
Esempio n. 15
0
func (m *Message) encode(pe packetEncoder) error {
	pe.push(&crc32Field{})

	pe.putInt8(m.Version)

	attributes := int8(m.Codec) & compressionCodecMask
	pe.putInt8(attributes)

	if m.Version >= 1 {
		pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
	}

	err := pe.putBytes(m.Key)
	if err != nil {
		return err
	}

	var payload []byte

	if m.compressedCache != nil {
		payload = m.compressedCache
		m.compressedCache = nil
	} else if m.Value != nil {
		switch m.Codec {
		case CompressionNone:
			payload = m.Value
		case CompressionGZIP:
			var buf bytes.Buffer
			writer := gzip.NewWriter(&buf)
			if _, err = writer.Write(m.Value); err != nil {
				return err
			}
			if err = writer.Close(); err != nil {
				return err
			}
			m.compressedCache = buf.Bytes()
			payload = m.compressedCache
		case CompressionSnappy:
			tmp := snappy.Encode(m.Value)
			m.compressedCache = tmp
			payload = m.compressedCache
		case CompressionLZ4:
			var buf bytes.Buffer
			writer := lz4.NewWriter(&buf)
			if _, err = writer.Write(m.Value); err != nil {
				return err
			}
			if err = writer.Close(); err != nil {
				return err
			}
			m.compressedCache = buf.Bytes()
			payload = m.compressedCache

		default:
			return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
		}
		// Keep in mind the compressed payload size for metric gathering
		m.compressedSize = len(payload)
	}

	if err = pe.putBytes(payload); err != nil {
		return err
	}

	return pe.pop()
}