Exemple #1
0
func snappify(b []byte) ([]byte, error) {
	b[4] |= (2 << 4) // set the document type to '2' (incr snappy)

	optHeaderLength, optHeaderSize := varintdecode(b[5:])
	optHeaderLength += optHeaderSize

	// XXX this could be more efficient!  I'm creating a new buffer to
	//     store the compressed document, which isn't necessary.  You
	//     could probably write directly to the slice after the header
	//     and after the varint holding the length
	compressed, err := snappy.Encode(nil, b[5+optHeaderLength:])
	if err != nil {
		return nil, err
	}
	// XXX I'm sure that this could be using a slice of b rather than nil
	//     so we don't need to copy, but my Go-fu is too low to do it.
	compressedLength := varint(nil, uint(len(compressed)))
	copy(b[5+optHeaderLength:], compressedLength)

	bytesCopied := copy(b[5+optHeaderLength+len(compressedLength):], compressed)

	// XXX should we verify that bytesCopied == len(compressed)?

	return b[0:bytesCopied], nil
}
Exemple #2
0
func (cn *connection) _send(opcode byte, body []byte, compression bool) error {
	if cn.c == nil {
		return driver.ErrBadConn
	}
	var flags byte = 0x00
	if compression {
		var err error
		body, err = snappy.Encode(nil, body)
		if err != nil {
			return err
		}
		flags = flagCompressed
	}
	frame := make([]byte, len(body)+8)
	frame[0] = protoRequest
	frame[1] = flags
	frame[2] = 0
	frame[3] = opcode
	binary.BigEndian.PutUint32(frame[4:8], uint32(len(body)))
	copy(frame[8:], body)
	if _, err := cn.c.Write(frame); err != nil {
		return err
	}
	return nil
}
Exemple #3
0
func (this *SnappyCodec) Forward(src, dst []byte) (uint, uint, error) {
	if src == nil {
		return uint(0), uint(0), errors.New("Invalid null source buffer")
	}

	if dst == nil {
		return uint(0), uint(0), errors.New("Invalid null destination buffer")
	}

	if kanzi.SameByteSlices(src, dst, false) {
		return 0, 0, errors.New("Input and output buffers cannot be equal")
	}

	count := this.size

	if this.size == 0 {
		count = uint(len(src))
	}

	if n := snappy.MaxEncodedLen(int(count)); len(dst) < n {
		return 0, 0, fmt.Errorf("Output buffer is too small - size: %d, required %d", len(dst), n)
	}

	res, err := snappy.Encode(dst, src[0:count])

	if err != nil {
		return 0, 0, fmt.Errorf("Encoding error: %v", err)
	}

	return count, uint(len(res)), nil
}
func TestSnappyCompressor(t *testing.T) {
	c := SnappyCompressor{}
	if c.Name() != "snappy" {
		t.Fatalf("expected name to be 'snappy', got %v", c.Name())
	}

	str := "My Test String"
	//Test Encoding
	if expected, err := snappy.Encode(nil, []byte(str)); err != nil {
		t.Fatalf("failed to encode '%v' with error %v", str, err)
	} else if res, err := c.Encode([]byte(str)); err != nil {
		t.Fatalf("failed to encode '%v' with error %v", str, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected encoded value with the result encoded value.")
	}

	val, err := c.Encode([]byte(str))
	if err != nil {
		t.Fatalf("failed to encode '%v' with error '%v'", str, err)
	}

	//Test Decoding
	if expected, err := snappy.Decode(nil, val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if res, err := c.Decode(val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected decoded value with the result decoded value.")
	}
}
// compressedChunkGreaterN like compressedChunk produces a single, compressed,
// snappy-framed block. The returned block will have decoded length at most n
// and encoded length greater than n.
func compressedChunkGreaterN(t *testing.T, n int) []byte {
	decoded := make([]byte, n)
	var numTries int
	var encoded []byte
	for len(encoded) <= n && numTries < 3 {
		numTries++
		nrd, err := io.ReadFull(rand.Reader, decoded)
		if err != nil {
			t.Errorf("crypto/rand: %v", err)
			return nil
		}
		if nrd != n {
			t.Errorf("crypto/rand: bad read (%d bytes)", nrd)
			return nil
		}
		encoded, err = snappy.Encode(encoded[:cap(encoded)], decoded)
		if err != nil {
			t.Errorf("snappy: %v", err)
			return nil
		}
	}
	if len(encoded) <= n {
		t.Error(errNotEnoughEntropy)
		return nil
	}

	return compressedChunk(t, decoded)
}
Exemple #6
0
//*********COMMPRESSION*************************
func Compress(src []byte) ([]byte, bool) {
	dst, err := snappy.Encode(nil, src)
	if err != nil {
		return nil, false
	}
	return dst, true
}
Exemple #7
0
func compressor(fw *Writer, toCompress <-chan *writerBlock, toWrite chan<- *writerBlock) {
	switch fw.CompressionCodec {
	case CompressionDeflate:
		bb := new(bytes.Buffer)
		comp, _ := flate.NewWriter(bb, flate.DefaultCompression)
		for block := range toCompress {
			_, block.err = comp.Write(block.encoded.Bytes())
			block.err = comp.Close()
			if block.err == nil {
				block.compressed = bb.Bytes()
				toWrite <- block
			}
			bb = new(bytes.Buffer)
			comp.Reset(bb)
		}
	case CompressionNull:
		for block := range toCompress {
			block.compressed = block.encoded.Bytes()
			toWrite <- block
		}
	case CompressionSnappy:
		for block := range toCompress {
			block.compressed, block.err = snappy.Encode(block.compressed, block.encoded.Bytes())
			if block.err != nil {
				block.err = fmt.Errorf("cannot compress: %v", block.err)
			}
			toWrite <- block
		}
	}
	close(toWrite)
}
Exemple #8
0
func store(b []byte) string {
	encoded := make([]byte, len(b))
	encoded, err := snappy.Encode(encoded, b)
	if err != nil {
		log.Fatal(err)
	}
	return fmt.Sprintf("%x", sha1.Sum(encoded))
}
Exemple #9
0
func main() {
	f := file{Name: "", Dir: false}
	f.Name = opt.Args[0]
	if f.Name != "" {
		//Determine if fname is a file or directory
		fi, err := os.Stat(f.Name)
		if err != nil {
			fmt.Printf("%s does not exit!\n", f.Name)
			os.Exit(2)
		}

		mode := fi.Mode()
		if mode&os.ModeSymlink == os.ModeSymlink {
			fmt.Printf("%s is a symbolic link\n", f.Name)
			os.Exit(2)
		}

		if fi.IsDir() {
			f.Dir = true
		}

		oldfile, err := os.Open(f.Name)
		if err != nil {
			fmt.Printf("Error: %s\n", err.Error())
			os.Exit(2)
		}

		buffer := make([]byte, 100)
		//for n, e := oldfile.Read(buffer); e == nil; n, e = oldfile.Read(buffer) {
		//	if n > 0 {
		//		os.Stdout.Write(buffer[0:n])
		//	}
		//}
		_, err = oldfile.Read(buffer)
		if err != nil {
			fmt.Printf("Error reading file %s: %s", f.Name, err)
			os.Exit(2)
		}
		newfile := make([]byte, 100)
		_, err = snappy.Encode(buffer, newfile)
		if err != nil {
			fmt.Printf("Errer compressing file: %s", f.Name)
			os.Exit(2)
		}

		f.EncName = f.Name + ".sz"
		export, err := os.OpenFile(f.EncName, os.O_WRONLY|os.O_CREATE, 0644) //Should keep
		//original perms
		export.Write(newfile)
		export.Close()

		os.Exit(0)
	} else {
		//Should never get here but just in-case
		fmt.Println("No arguments given!")
		os.Exit(2)
	}
}
Exemple #10
0
func (d *DFS) GetSnappyBlock(blockSize int, reply *DataChunk) error {
	err := d.GetBlock(blockSize, reply)
	handleError(err)
	//
	reply.Chunk, err = snappy.Encode(reply.Chunk, reply.Chunk)
	handleError(err)
	//
	return nil
}
Exemple #11
0
func (w *Writer) saveBlock() error {
	// Calculate hash of uncompressed data for ref.
	ref := calculateRef(w.h, w.buf[:w.n])

	//TODO check if this block exists on disk.
	if blockExistsOnDisk(ref) {
		// Append ref to list.
		w.refs = append(w.refs, ref)
		w.n = 0
		w.blockCount++
		return nil
	}

	// Compress.
	compressedData, err := snappy.Encode(w.cdata[headerSize:], w.buf[:w.n])
	if err != nil {
		return err
	}
	dataLen := headerSize + len(compressedData)

	// Pad with zeroes so that the encrypted box is multiple of PadSize.
	var paddedLen int
	if dataLen == 0 {
		paddedLen = PadSize - nonceSize - secretbox.Overhead
	} else {
		paddedLen = (((dataLen + nonceSize + secretbox.Overhead) + (PadSize - 1)) / PadSize) * PadSize
		paddedLen -= nonceSize + secretbox.Overhead
	}
	for i := dataLen; i < paddedLen; i++ {
		w.cdata[i] = 0
	}
	plainBlock := w.cdata[:paddedLen]

	// Set block kind.
	plainBlock[0] = w.kind
	// Store compressed length.
	binary.BigEndian.PutUint32(plainBlock[1:], uint32(len(compressedData)))

	// Encrypt.
	var nonce [24]byte
	if err := generateNonce(&nonce); err != nil {
		return err
	}
	//TODO avoid allocation
	fullBox := make([]byte, len(nonce)+len(plainBlock)+secretbox.Overhead)
	copy(fullBox, nonce[:])
	secretbox.Seal(fullBox[len(nonce):len(nonce)], plainBlock, &nonce, &config.Keys.BlockEnc)
	// Save to disk.
	if err := writeBlockToDisk(ref, fullBox); err != nil {
		return err
	}
	// Append ref to list.
	w.refs = append(w.refs, ref)
	w.n = 0
	w.blockCount++
	return nil
}
Exemple #12
0
func writeResponse(w io.Writer, id uint64, serr string, response proto.Message) (err error) {
	// clean response if error
	if serr != "" {
		response = nil
	}

	// marshal response
	pbResponse := []byte{}
	if response != nil {
		pbResponse, err = proto.Marshal(response)
		if err != nil {
			return err
		}
	}

	// compress serialized proto data
	compressedPbResponse, err := snappy.Encode(nil, pbResponse)
	if err != nil {
		return err
	}

	// generate header
	header := &wire.ResponseHeader{
		Id:                          id,
		Error:                       serr,
		RawResponseLen:              uint32(len(pbResponse)),
		SnappyCompressedResponseLen: uint32(len(compressedPbResponse)),
		Checksum:                    crc32.ChecksumIEEE(compressedPbResponse),
	}

	// check header size
	pbHeader, err := proto.Marshal(header)
	if err != err {
		return
	}
	if uint32(len(pbHeader)) > wire.Default_Const_MaxHeaderLen {
		return fmt.Errorf("protorpc.writeResponse: header larger than max_header_len: %d.",
			len(pbHeader),
		)
	}

	// send header (more)
	if err = sendFrame(w, pbHeader); err != nil {
		return
	}

	// send body (end)
	if err = sendFrame(w, compressedPbResponse); err != nil {
		return
	}

	return nil
}
Exemple #13
0
func NewSnappyEncodeOp() stream.Operator {
	generator := func() interface{} {
		fn := func(in []byte) [][]byte {
			compressed, err := snappy.Encode(nil, in)
			if err != nil {
				log.Printf("Error in snappy compression %v", err)
			}
			return [][]byte{compressed}
		}
		return fn
	}
	return mapper.NewOpFactory(generator, "NewSnappyEncodeOp")
}
Exemple #14
0
// finishBlock finishes the current block and returns its block handle, which is
// its offset and length in the table.
func (w *Writer) finishBlock() (blockHandle, error) {
	// Write the restart points to the buffer.
	if w.nEntries == 0 {
		// Every block must have at least one restart point.
		w.restarts = w.restarts[:1]
		w.restarts[0] = 0
	}
	tmp4 := w.tmp[:4]
	for _, x := range w.restarts {
		binary.LittleEndian.PutUint32(tmp4, x)
		w.buf.Write(tmp4)
	}
	binary.LittleEndian.PutUint32(tmp4, uint32(len(w.restarts)))
	w.buf.Write(tmp4)

	// Compress the buffer, discarding the result if the improvement
	// isn't at least 12.5%.
	b := w.buf.Bytes()
	w.tmp[0] = noCompressionBlockType
	if w.compression == db.SnappyCompression {
		compressed, err := snappy.Encode(w.compressedBuf, b)
		if err != nil {
			return blockHandle{}, err
		}
		w.compressedBuf = compressed[:cap(compressed)]
		if len(compressed) < len(b)-len(b)/8 {
			w.tmp[0] = snappyCompressionBlockType
			b = compressed
		}
	}

	// Calculate the checksum.
	checksum := crc.New(b).Update(w.tmp[:1]).Value()
	binary.LittleEndian.PutUint32(w.tmp[1:5], checksum)

	// Write the bytes to the file.
	if _, err := w.writer.Write(b); err != nil {
		return blockHandle{}, err
	}
	if _, err := w.writer.Write(w.tmp[:5]); err != nil {
		return blockHandle{}, err
	}
	bh := blockHandle{w.offset, uint64(len(b))}
	w.offset += uint64(len(b)) + blockTrailerLen

	// Reset the per-block state.
	w.buf.Reset()
	w.nEntries = 0
	w.restarts = w.restarts[:0]
	return bh, nil
}
Exemple #15
0
// write attempts to encode p as a block and write it to the underlying writer.
// The returned int may not equal p's length if compression below
// MaxBlockSize-4 could not be achieved.
func (w *writer) write(p []byte) (int, error) {
	var err error

	if len(p) > MaxBlockSize {
		return 0, errors.New(fmt.Sprintf("block too large %d > %d", len(p), MaxBlockSize))
	}

	w.dst = w.dst[:cap(w.dst)] // Encode does dumb resize w/o context. reslice avoids alloc.
	w.dst, err = snappy.Encode(w.dst, p)
	if err != nil {
		return 0, err
	}
	block := w.dst
	n := len(p)
	compressed := true

	// check for data which is better left uncompressed.  this is determined if
	// the encoded content is longer than the source.
	if len(w.dst) >= len(p) {
		compressed = false
		block = p[:n]
	}

	if !w.sentStreamID {
		_, err := w.writer.Write(streamID)
		if err != nil {
			return 0, err
		}
		w.sentStreamID = true
	}

	// set the block type
	if compressed {
		writeHeader(w.hdr, blockCompressed, block, p[:n])
	} else {
		writeHeader(w.hdr, blockUncompressed, block, p[:n])
	}

	_, err = w.writer.Write(w.hdr)
	if err != nil {
		return 0, err
	}

	_, err = w.writer.Write(block)
	if err != nil {
		return 0, err
	}

	return n, nil
}
Exemple #16
0
func (w *writer) write(p []byte) (int, error) {
	var err error

	if len(p) > MaxBlockSize {
		return 0, errors.New(fmt.Sprintf("block too large %d > %d", len(p), MaxBlockSize))
	}

	w.dst, err = snappy.Encode(w.dst, p)
	if err != nil {
		return 0, err
	}

	if !w.sentStreamID {
		_, err := w.writer.Write(streamID)
		if err != nil {
			return 0, err
		}
		w.sentStreamID = true
	}

	length := uint32(len(w.dst)) + 4 // +4 for checksum

	w.hdr[0] = 0x00 // compressed frame ID

	// 3 byte little endian length
	w.hdr[1] = byte(length)
	w.hdr[2] = byte(length >> 8)
	w.hdr[3] = byte(length >> 16)

	// 4 byte little endian CRC32 checksum
	checksum := maskChecksum(crc32.Checksum(p, crcTable))
	w.hdr[4] = byte(checksum)
	w.hdr[5] = byte(checksum >> 8)
	w.hdr[6] = byte(checksum >> 16)
	w.hdr[7] = byte(checksum >> 24)

	_, err = w.writer.Write(w.hdr)
	if err != nil {
		return 0, err
	}

	_, err = w.writer.Write(w.dst)
	if err != nil {
		return 0, err
	}

	return len(p), nil
}
func (m *Message) encode(pe packetEncoder) error {
	pe.push(&crc32Field{})

	pe.putInt8(messageFormat)

	attributes := int8(m.Codec) & compressionCodecMask
	pe.putInt8(attributes)

	err := pe.putBytes(m.Key)
	if err != nil {
		return err
	}

	var payload []byte

	if m.compressedCache != nil {
		payload = m.compressedCache
		m.compressedCache = nil
	} else {
		switch m.Codec {
		case CompressionNone:
			payload = m.Value
		case CompressionGZIP:
			var buf bytes.Buffer
			writer := gzip.NewWriter(&buf)
			writer.Write(m.Value)
			writer.Close()
			m.compressedCache = buf.Bytes()
			payload = m.compressedCache
		case CompressionSnappy:
			tmp, err := snappy.Encode(nil, m.Value)
			if err != nil {
				return err
			}
			m.compressedCache = tmp
			payload = m.compressedCache
		default:
			return EncodingError
		}
	}

	err = pe.putBytes(payload)
	if err != nil {
		return err
	}

	return pe.pop()
}
Exemple #18
0
// Put adds the package documentation to the database.
func (db *Database) Put(pdoc *doc.Package, nextCrawl time.Time) error {
	c := db.Pool.Get()
	defer c.Close()

	score := documentScore(pdoc)
	terms := documentTerms(pdoc, score)

	var gobBuf bytes.Buffer
	if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil {
		return err
	}

	// Truncate large documents.
	if gobBuf.Len() > 700000 {
		pdocNew := *pdoc
		pdoc = &pdocNew
		pdoc.Truncated = true
		pdoc.Vars = nil
		pdoc.Funcs = nil
		pdoc.Types = nil
		pdoc.Consts = nil
		pdoc.Examples = nil
		gobBuf.Reset()
		if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil {
			return err
		}
	}

	gobBytes, err := snappy.Encode(nil, gobBuf.Bytes())
	if err != nil {
		return err
	}

	kind := "p"
	switch {
	case pdoc.Name == "":
		kind = "d"
	case pdoc.IsCmd:
		kind = "c"
	}

	t := int64(0)
	if !nextCrawl.IsZero() {
		t = nextCrawl.Unix()
	}
	_, err = putScript.Do(c, pdoc.ImportPath, pdoc.Synopsis, score, gobBytes, strings.Join(terms, " "), pdoc.Etag, kind, t)
	return err
}
Exemple #19
0
func writeRequest(w io.Writer, id uint64, method string, request proto.Message) error {
	// marshal request
	pbRequest := []byte{}
	if request != nil {
		var err error
		pbRequest, err = proto.Marshal(request)
		if err != nil {
			return err
		}
	}

	// compress serialized proto data
	compressedPbRequest, err := snappy.Encode(nil, pbRequest)
	if err != nil {
		return err
	}

	// generate header
	header := &wire.RequestHeader{
		Id:                         id,
		Method:                     method,
		RawRequestLen:              uint32(len(pbRequest)),
		SnappyCompressedRequestLen: uint32(len(compressedPbRequest)),
		Checksum:                   crc32.ChecksumIEEE(compressedPbRequest),
	}

	// check header size
	pbHeader, err := proto.Marshal(header)
	if err != err {
		return err
	}
	if uint32(len(pbHeader)) > wire.Default_Const_MaxHeaderLen {
		return fmt.Errorf("protorpc.writeRequest: header larger than max_header_len: %d.", len(pbHeader))
	}

	// send header (more)
	if err := sendFrame(w, pbHeader); err != nil {
		return err
	}

	// send body (end)
	if err := sendFrame(w, compressedPbRequest); err != nil {
		return err
	}

	return nil
}
Exemple #20
0
func KVWrite(db *rpcplus.Client, table, uid string, value interface{}) (result bool, err error) {
	//ts("KVWrite", table, uid)
	//defer te("KVWrite", table, uid)

	var buf []byte

	switch value.(type) {
	case gp.Message:

		buf, err = gp.Marshal(value.(gp.Message))
		if err != nil {
			logger.Error("KVWrite Error On Marshal %s : %s (%s)", table, uid, err.Error())
			return
		}

	case []byte:

		buf = value.([]byte)

	default:
		logger.Error("KVWrite args type error %v", value)
		return
	}

	dst, err := snappy.Encode(nil, buf)

	if err != nil {
		logger.Error("KVWrite Error On snappy.Encode %s : %s (%s)", table, uid, err.Error())
		return
	}

	var reply protobuf.DBWriteResult
	err = db.Call("DBServer.Write", protobuf.DBWrite{table, uid, dst}, &reply)

	if err != nil {
		logger.Error("KVWrite Error On Create %s: %s (%s)", table, uid, err.Error())
		return
	}

	if reply.Code != protobuf.Ok {
		logger.Error("KVWrite Error On Create %s: %s code (%d)", table, uid, reply.Code)
		return
	}

	result = true
	return
}
// compressedChunk encodes b returning a single, compressed, snappy-framed
// block. compressedChunk can encode source data larger than allowed in the
// specification.
func compressedChunk(t *testing.T, src []byte) []byte {
	encoded, err := snappy.Encode(nil, src)
	if err != nil {
		t.Errorf("snappy: %v", err)
		return nil
	}

	if len(encoded) > (1<<24)-5 { // account for the 4-byte checksum
		t.Errorf("block data too large %d", len(src))
		return nil
	}

	chunk := make([]byte, len(encoded)+8)
	writeHeader(chunk[:8], blockCompressed, encoded, src)
	copy(chunk[8:], encoded)
	return chunk
}
Exemple #22
0
func (ev *CompressEventV2) Encode(buffer *bytes.Buffer) {
	if ev.CompressType != COMPRESSOR_NONE && ev.CompressType != COMPRESSOR_SNAPPY {
		ev.CompressType = COMPRESSOR_NONE
	}
	EncodeUInt64Value(buffer, uint64(ev.CompressType))
	var buf bytes.Buffer
	EncodeEvent(&buf, ev.Ev)
	switch ev.CompressType {
	case COMPRESSOR_NONE:
		EncodeUInt64Value(buffer, uint64(buf.Len()))
		buffer.Write(buf.Bytes())
	case COMPRESSOR_SNAPPY:
		evbuf := make([]byte, 0)
		newbuf, _ := snappy.Encode(evbuf, buf.Bytes())
		EncodeUInt64Value(buffer, uint64(len(newbuf)))
		buffer.Write(newbuf)
	}
	buf.Reset()
}
Exemple #23
0
func RunTestSnappy(data []byte) {
	log.Printf("encoding/RunTestSnappy: Testing comprssion Snappy\n")

	now := time.Now()
	e, err := snappy.Encode(nil, data)
	if err != nil {
		log.Fatalf("encoding/RunTestSnappy: encoding error: %v\n", err)
	}
	log.Printf("encoding/RunTestSnappy: Compressed from %d bytes to %d bytes in %d ns\n", len(data), len(e), time.Since(now).Nanoseconds())

	d, err := snappy.Decode(nil, e)
	if err != nil {
		log.Fatalf("encoding/RunTestSnappy: decoding error: %v\n", err)
	}
	log.Printf("encoding/RunTestSnappy: Uncompressed from %d bytes to %d bytes in %d ns\n", len(e), len(d), time.Since(now).Nanoseconds())

	if !bytes.Equal(data, d) {
		log.Fatalf("encoding/RunTestSnappy: roundtrip mismatch\n")
	}
}
Exemple #24
0
func (ev *CompressEvent) Encode(buffer *bytes.Buffer) {
	if ev.CompressType != COMPRESSOR_NONE && ev.CompressType != COMPRESSOR_SNAPPY {
		ev.CompressType = COMPRESSOR_NONE
	}
	EncodeUInt64Value(buffer, uint64(ev.CompressType))
	var buf bytes.Buffer
	EncodeEvent(&buf, ev.Ev)
	switch ev.CompressType {
	case COMPRESSOR_NONE:
		buffer.Write(buf.Bytes())
	case COMPRESSOR_SNAPPY:
		evbuf := make([]byte, 0)
		newbuf, _ := snappy.Encode(evbuf, buf.Bytes())
		buffer.Write(newbuf)
		//	case COMPRESSOR_LZ4:
		//		newbuf, _ := ioutil.ReadAll(lz4.NewWriter(&buf))
		//		buffer.Write(newbuf)
	}
	buf.Reset()
}
Exemple #25
0
// Put adds the package documentation to the database.
func (db *Database) Put(pdoc *doc.Package) error {
	c := db.Pool.Get()
	defer c.Close()

	projectRoot := pdoc.ProjectRoot
	if projectRoot == "" {
		projectRoot = "go"
	}

	rank := documentRank(pdoc)
	terms := documentTerms(pdoc, rank)

	var gobBuf bytes.Buffer
	if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil {
		return err
	}

	// Truncate large documents.
	if gobBuf.Len() > 700000 {
		pdocNew := *pdoc
		pdoc = &pdocNew
		pdoc.Truncated = true
		pdoc.Vars = nil
		pdoc.Funcs = nil
		pdoc.Types = nil
		pdoc.Consts = nil
		pdoc.Examples = nil
		gobBuf.Reset()
		if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil {
			return err
		}
	}

	gobBytes, err := snappy.Encode(nil, gobBuf.Bytes())
	if err != nil {
		return err
	}

	_, err = putScript.Do(c, pdoc.ImportPath, pdoc.Synopsis, rank, gobBytes, strings.Join(terms, " "), pdoc.Updated.Unix())
	return err
}
Exemple #26
0
func (db *BlockDB) BlockAdd(height uint32, bl *Block) (e error) {
	var pos int64
	var flagz [4]byte

	pos, e = db.blockdata.Seek(0, os.SEEK_END)
	if e != nil {
		panic(e.Error())
	}

	flagz[0] |= BLOCK_COMPRSD | BLOCK_SNAPPED // gzip compression is deprecated
	cbts, _ := snappy.Encode(nil, bl.Raw)

	blksize := uint32(len(cbts))

	_, e = db.blockdata.Write(cbts)
	if e != nil {
		panic(e.Error())
	}

	ipos, _ := db.blockindx.Seek(0, os.SEEK_CUR) // at this point the file shall always be at its end

	if bl.Trusted {
		flagz[0] |= BLOCK_TRUSTED
	}
	db.blockindx.Write(flagz[:])
	db.blockindx.Write(bl.Hash.Hash[0:32])
	db.blockindx.Write(bl.Raw[4:36])
	binary.Write(db.blockindx, binary.LittleEndian, uint32(height))
	binary.Write(db.blockindx, binary.LittleEndian, uint32(bl.BlockTime))
	binary.Write(db.blockindx, binary.LittleEndian, uint32(bl.Bits))
	binary.Write(db.blockindx, binary.LittleEndian, uint64(pos))
	binary.Write(db.blockindx, binary.LittleEndian, blksize)

	db.mutex.Lock()
	db.blockIndex[bl.Hash.BIdx()] = &oneBl{fpos: uint64(pos),
		blen: blksize, ipos: ipos, trusted: bl.Trusted, compressed: true, snappied: true}
	db.addToCache(bl.Hash, bl.Raw)
	db.mutex.Unlock()
	return
}
Exemple #27
0
func TestCmp(t *testing.T) {
	var ts, tz, to int
	for i := 0; i <= 17; i++ {
		filename := filepath.Join("testdata", testFiles[i].filename)
		if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
			if !*download {
				t.Fatal("test data not found; skipping test without the -download flag")
			}
			// Download the official snappy C++ implementation reference test data
			// files for benchmarking.
			if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
				t.Fatalf("failed to create testdata: %s", err)
			}
			for _, tf := range testFiles {
				if err := downloadTestdata(tf.filename); err != nil {
					t.Fatalf("failed to download testdata: %s", err)
				}
			}
		}
		data := readFile2(t, filename)
		orig := len(data)
		to += orig
		senc, err := snappy.Encode(nil, data)
		if err != nil {
			t.Fatal(err)
		}

		ns := len(senc)
		zenc, err := Encode(nil, data)
		if err != nil {
			t.Fatal(err)
		}

		nz := len(zenc)
		t.Logf("%35s: snappy %7d, zappy %7d, %.3f, orig %7d", filename, ns, nz, float64(nz)/float64(ns), orig)
		ts += ns
		tz += nz
	}
	t.Logf("%35s: snappy %7d, zappy %7d, %.3f, orig %7d", "TOTAL", ts, tz, float64(tz)/float64(ts), to)
}
Exemple #28
0
func (t *Writer) write(buf []byte, bi *bInfo, raw bool) (err error) {
	compression := opt.NoCompression
	if !raw {
		compression = t.o.GetCompressionType()
	}
	switch compression {
	case opt.SnappyCompression:
		buf, err = snappy.Encode(nil, buf)
		if err != nil {
			return
		}
	}

	if bi != nil {
		bi.offset = uint64(t.off)
		bi.size = uint64(len(buf))
	}

	_, err = t.w.Write(buf)
	if err != nil {
		return
	}

	compbit := []byte{byte(compression)}
	_, err = t.w.Write(compbit)
	if err != nil {
		return
	}

	crc := hash.NewCRC32C()
	crc.Write(buf)
	crc.Write(compbit)
	err = binary.Write(t.w, binary.LittleEndian, hash.MaskCRC32(crc.Sum32()))
	if err != nil {
		return
	}

	t.off += len(buf) + 5
	return
}
Exemple #29
0
func keyTweet(tw *tweetItem) (rout *tweetItem) {
	et, err := snappy.Encode(nil, *tw)
	if err != nil {
		panic(err)
	}
	var tweet struct {
		Created_At string
	}
	if err := json.Unmarshal(*tw, &tweet); err == nil {
		if t, err := time.Parse(time.RubyDate, tweet.Created_At); err == nil {
			l := len(et)
			out := make(tweetItem, 12+l)

			key := t.UnixNano()
			out[0] = byte(key)
			out[1] = byte(key >> 8)
			out[2] = byte(key >> 16)
			out[3] = byte(key >> 24)
			out[4] = byte(key >> 32)
			out[5] = byte(key >> 40)
			out[6] = byte(key >> 48)
			out[7] = byte(key >> 56)

			out[8] = byte(l)
			out[9] = byte(l >> 8)
			out[10] = byte(l >> 16)
			out[11] = byte(l >> 24)

			copy(out[12:12+l], et)

			rout = &out
		} else {
			log.Fatal("Could not parse time:", err)
		}
	} else {
		log.Fatal("Could not unmarshal:", string(*tw))
	}
	return
}
Exemple #30
0
func snappify(b []byte) ([]byte, error) {
	b[4] |= (2 << 4) // set the document type to '2' (incr snappy)

	optHeaderLength, optHeaderSize := varintdecode(b[5:])
	optHeaderLength += optHeaderSize

	// XXX this could be more efficient!  I'm creating a new buffer to
	//     store the compressed document, which isn't necessary.  You
	//     could probably write directly to the slice after the header
	//     and after the varint holding the length
	compressed, err := snappy.Encode(nil, b[5+optHeaderLength:])
	if err != nil {
		return nil, err
	}

	// resize b to just the original header
	b = b[:5+optHeaderLength]
	b = varint(b, uint(len(compressed)))
	b = append(b, compressed...)

	return b, nil
}