func Fuzz(data []byte) int { n, err := snappy.DecodedLen(data) if err != nil || n > 1e6 { return 0 } if n < 0 { panic("negative decoded len") } dec, err := snappy.Decode(nil, data) if err != nil { if dec != nil { panic("dec is not nil") } return 0 } if len(dec) != n { println(len(dec), n) panic("bad decoded len") } n = snappy.MaxEncodedLen(len(dec)) enc, err := snappy.Encode(nil, dec) if err != nil { panic(err) } if len(enc) > n { panic("bad encoded len") } return 1 }
func compressor(fw *Writer, toCompress <-chan *writerBlock, toWrite chan<- *writerBlock) { switch fw.CompressionCodec { case CompressionDeflate: bb := new(bytes.Buffer) comp, _ := flate.NewWriter(bb, flate.DefaultCompression) for block := range toCompress { _, block.err = comp.Write(block.encoded.Bytes()) block.err = comp.Close() if block.err == nil { block.compressed = bb.Bytes() toWrite <- block } bb = new(bytes.Buffer) comp.Reset(bb) } case CompressionNull: for block := range toCompress { block.compressed = block.encoded.Bytes() toWrite <- block } case CompressionSnappy: for block := range toCompress { block.compressed, block.err = snappy.Encode(block.compressed, block.encoded.Bytes()) if block.err != nil { block.err = fmt.Errorf("cannot compress: %v", block.err) } toWrite <- block } } close(toWrite) }
func TestSnappyCompressor(t *testing.T) { c := SnappyCompressor{} if c.Name() != "snappy" { t.Fatalf("expected name to be 'snappy', got %v", c.Name()) } str := "My Test String" //Test Encoding if expected, err := snappy.Encode(nil, []byte(str)); err != nil { t.Fatalf("failed to encode '%v' with error %v", str, err) } else if res, err := c.Encode([]byte(str)); err != nil { t.Fatalf("failed to encode '%v' with error %v", str, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected encoded value with the result encoded value.") } val, err := c.Encode([]byte(str)) if err != nil { t.Fatalf("failed to encode '%v' with error '%v'", str, err) } //Test Decoding if expected, err := snappy.Decode(nil, val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if res, err := c.Decode(val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected decoded value with the result decoded value.") } }
// Encode encodes the message with Snappy compression func (codec *SnappyPayloadCodec) Encode(data []byte) []byte { encoded, err := snappy.Encode(nil, data) if nil != err { panic("Could not encode message: " + err.Error()) } return encoded }
// finishBlock finishes the current block and returns its block handle, which is // its offset and length in the table. func (w *Writer) finishBlock() (blockHandle, error) { // Write the restart points to the buffer. if w.nEntries == 0 { // Every block must have at least one restart point. w.restarts = w.restarts[:1] w.restarts[0] = 0 } tmp4 := w.tmp[:4] for _, x := range w.restarts { binary.LittleEndian.PutUint32(tmp4, x) w.buf.Write(tmp4) } binary.LittleEndian.PutUint32(tmp4, uint32(len(w.restarts))) w.buf.Write(tmp4) // Compress the buffer, discarding the result if the improvement // isn't at least 12.5%. b := w.buf.Bytes() w.tmp[0] = noCompressionBlockType if w.compression == db.SnappyCompression { compressed, err := snappy.Encode(w.compressedBuf, b) if err != nil { return blockHandle{}, err } w.compressedBuf = compressed[:cap(compressed)] if len(compressed) < len(b)-len(b)/8 { w.tmp[0] = snappyCompressionBlockType b = compressed } } // Calculate the checksum. checksum := crc.New(b).Update(w.tmp[:1]).Value() binary.LittleEndian.PutUint32(w.tmp[1:5], checksum) // Write the bytes to the file. if _, err := w.writer.Write(b); err != nil { return blockHandle{}, err } if _, err := w.writer.Write(w.tmp[:5]); err != nil { return blockHandle{}, err } bh := blockHandle{w.offset, uint64(len(b))} w.offset += uint64(len(b)) + blockTrailerLen // Reset the per-block state. w.buf.Reset() w.nEntries = 0 w.restarts = w.restarts[:0] return bh, nil }
func (s *hopCipher) encrypt(msg []byte) []byte { cmsg := make([]byte, snappy.MaxEncodedLen(len(msg))) cmsg, _ = snappy.Encode(cmsg, msg) pmsg := PKCS5Padding(cmsg, cipherBlockSize) buf := make([]byte, len(pmsg)+cipherBlockSize) iv := buf[:cipherBlockSize] rand.Read(iv) encrypter := _cipher.NewCBCEncrypter(s.block, iv) encrypter.CryptBlocks(buf[cipherBlockSize:], pmsg) return buf }
func TestBgSave(t *testing.T) { //连接mongodb client, err := cluster.NewCluster(DEFAULT_REDIS_HOST) if err != nil { t.Fatal(err) } //序列化数据 bin, _ := msgpack.Marshal(&TestStruct{3721, "hello", 18, 999, 1.1, 2.2, []byte("world")}) //压缩 if env := os.Getenv(ENV_SNAPPY); env != "" { if enc, err := snappy.Encode(nil, bin); err == nil { bin = enc } else { t.Fatal(err) } } //存入 reply := client.Cmd("set", test_key, bin) if reply.Err != nil { t.Fatal(err) } //调用bg save conn, err := grpc.Dial(address) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewBgSaveServiceClient(conn) //传入需要落地的key _, err = c.MarkDirty(context.Background(), &pb.BgSave_Key{Name: test_key}) if err != nil { t.Fatalf("could not query: %v", err) } }
func (db *BlockDB) BlockAdd(height uint32, bl *btc.Block) (e error) { var pos int64 var flagz [4]byte pos, e = db.blockdata.Seek(0, os.SEEK_END) if e != nil { panic(e.Error()) } flagz[0] |= BLOCK_COMPRSD | BLOCK_SNAPPED // gzip compression is deprecated cbts, _ := snappy.Encode(nil, bl.Raw) blksize := uint32(len(cbts)) _, e = db.blockdata.Write(cbts) if e != nil { panic(e.Error()) } ipos, _ := db.blockindx.Seek(0, os.SEEK_CUR) // at this point the file shall always be at its end if bl.Trusted { flagz[0] |= BLOCK_TRUSTED } db.blockindx.Write(flagz[:]) db.blockindx.Write(bl.Hash.Hash[0:32]) binary.Write(db.blockindx, binary.LittleEndian, uint32(height)) binary.Write(db.blockindx, binary.LittleEndian, uint64(pos)) binary.Write(db.blockindx, binary.LittleEndian, blksize) binary.Write(db.blockindx, binary.LittleEndian, uint32(bl.TxCount)) db.blockindx.Write(bl.Raw[:80]) db.mutex.Lock() db.blockIndex[bl.Hash.BIdx()] = &oneBl{fpos: uint64(pos), blen: blksize, ipos: ipos, trusted: bl.Trusted, compressed: true, snappied: true} db.addToCache(bl.Hash, bl.Raw) db.mutex.Unlock() return }
func TestBgSave(t *testing.T) { //t.Skip() // start connection to redis cluster client, err := cluster.NewCluster(DEFAULT_REDIS_HOST) if err != nil { t.Fatal(err) } // mset data into redis bin, _ := msgpack.Marshal(&TestStruct{3721, "hello", 18, 999, 1.1, 2.2, []byte("world")}) // snappy if env := os.Getenv(ENV_SNAPPY); env != "" { if enc, err := snappy.Encode(nil, bin); err == nil { bin = enc } else { t.Fatal(err) } } reply := client.Cmd("set", test_key, bin) if reply.Err != nil { t.Fatal(reply.Err) } // Set up a connection to the server. conn, err := grpc.Dial(address) if err != nil { t.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewBgSaveServiceClient(conn) // Contact the server and print out its response. _, err = c.MarkDirty(context.Background(), &pb.BgSave_Key{Name: test_key}) if err != nil { t.Fatalf("could not query: %v", err) } }
func getRemoteURL(url string) ([]byte, error) { item, err := cacher.Get(url) if err != nil && err != memcache.ErrCacheMiss { return nil, err } if err == memcache.ErrCacheMiss { res, err := http.Get(url) if err != nil { return nil, err } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } content, err := snappy.Encode(nil, body) if err != nil { return nil, err } err = cacher.Set(&memcache.Item{Key: url, Value: content, Expiration: config.MCTTL}) if err != nil { return nil, err } return body, nil } content, err := snappy.Decode(nil, item.Value) if err != nil { return nil, err } return content, nil }
// SnappyEncode encodes binary data func snappyEncode(src []byte) ([]byte, error) { return snappy.Encode(nil, src) }
func (s SnappyCompressor) Encode(data []byte) ([]byte, error) { return snappy.Encode(nil, data) }
// Put adds the package documentation to the database. func (db *Database) Put(pdoc *doc.Package, nextCrawl time.Time, hide bool) error { c := db.Pool.Get() defer c.Close() score := 0.0 if !hide { score = documentScore(pdoc) } terms := documentTerms(pdoc, score) var gobBuf bytes.Buffer if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil { return err } gobBytes, err := snappy.Encode(nil, gobBuf.Bytes()) if err != nil { return err } // Truncate large documents. if len(gobBytes) > 400000 { pdocNew := *pdoc pdoc = &pdocNew pdoc.Truncated = true pdoc.Vars = nil pdoc.Funcs = nil pdoc.Types = nil pdoc.Consts = nil pdoc.Examples = nil gobBuf.Reset() if err := gob.NewEncoder(&gobBuf).Encode(pdoc); err != nil { return err } gobBytes, err = snappy.Encode(nil, gobBuf.Bytes()) if err != nil { return err } } kind := "p" switch { case pdoc.Name == "": kind = "d" case pdoc.IsCmd: kind = "c" } t := int64(0) if !nextCrawl.IsZero() { t = nextCrawl.Unix() } _, err = putScript.Do(c, pdoc.ImportPath, pdoc.Synopsis, score, gobBytes, strings.Join(terms, " "), pdoc.Etag, kind, t) if err != nil { return err } if nextCrawl.IsZero() { // Skip crawling related packages if this is not a full save. return nil } paths := make(map[string]bool) for _, p := range pdoc.Imports { if gosrc.IsValidRemotePath(p) { paths[p] = true } } for _, p := range pdoc.TestImports { if gosrc.IsValidRemotePath(p) { paths[p] = true } } for _, p := range pdoc.XTestImports { if gosrc.IsValidRemotePath(p) { paths[p] = true } } if pdoc.ImportPath != pdoc.ProjectRoot && pdoc.ProjectRoot != "" { paths[pdoc.ProjectRoot] = true } for _, p := range pdoc.Subdirectories { paths[pdoc.ImportPath+"/"+p] = true } args := make([]interface{}, 0, len(paths)) for p := range paths { args = append(args, p) } _, err = addCrawlScript.Do(c, args...) return err }