Пример #1
0
func TestSnappyCompressor(t *testing.T) {
	c := SnappyCompressor{}
	if c.Name() != "snappy" {
		t.Fatalf("expected name to be 'snappy', got %v", c.Name())
	}

	str := "My Test String"
	//Test Encoding
	if expected, err := snappy.Encode(nil, []byte(str)); err != nil {
		t.Fatalf("failed to encode '%v' with error %v", str, err)
	} else if res, err := c.Encode([]byte(str)); err != nil {
		t.Fatalf("failed to encode '%v' with error %v", str, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected encoded value with the result encoded value.")
	}

	val, err := c.Encode([]byte(str))
	if err != nil {
		t.Fatalf("failed to encode '%v' with error '%v'", str, err)
	}

	//Test Decoding
	if expected, err := snappy.Decode(nil, val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if res, err := c.Decode(val); err != nil {
		t.Fatalf("failed to decode '%v' with error %v", val, err)
	} else if bytes.Compare(expected, res) != 0 {
		t.Fatal("failed to match the expected decoded value with the result decoded value.")
	}
}
Пример #2
0
func Fuzz(data []byte) int {
	n, err := snappy.DecodedLen(data)
	if err != nil || n > 1e6 {
		return 0
	}
	if n < 0 {
		panic("negative decoded len")
	}
	dec, err := snappy.Decode(nil, data)
	if err != nil {
		if dec != nil {
			panic("dec is not nil")
		}
		return 0
	}
	if len(dec) != n {
		println(len(dec), n)
		panic("bad decoded len")
	}
	n = snappy.MaxEncodedLen(len(dec))
	enc, err := snappy.Encode(nil, dec)
	if err != nil {
		panic(err)
	}
	if len(enc) > n {
		panic("bad encoded len")
	}
	return 1
}
Пример #3
0
func (db *Database) getDoc(c redis.Conn, path string) (*doc.Package, time.Time, error) {
	r, err := redis.Values(getDocScript.Do(c, path))
	if err == redis.ErrNil {
		return nil, time.Time{}, nil
	} else if err != nil {
		return nil, time.Time{}, err
	}

	var p []byte
	var t int64

	if _, err := redis.Scan(r, &p, &t); err != nil {
		return nil, time.Time{}, err
	}

	p, err = snappy.Decode(nil, p)
	if err != nil {
		return nil, time.Time{}, err
	}

	var pdoc doc.Package
	if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pdoc); err != nil {
		return nil, time.Time{}, err
	}

	nextCrawl := pdoc.Updated
	if t != 0 {
		nextCrawl = time.Unix(t, 0).UTC()
	}

	return &pdoc, nextCrawl, err
}
Пример #4
0
func (re *SnappyDecoder) Decode(pack *PipelinePack) (packs []*PipelinePack, err error) {
	output, decodeErr := snappy.Decode(nil, pack.MsgBytes)

	packs = []*PipelinePack{pack}
	if decodeErr == nil {
		// Replace bytes with decoded data
		pack.MsgBytes = output
	}
	// If there is an error decoding snappy, maybe it wasn't compressed. We'll
	// return the original data and try to proceed.
	return
}
Пример #5
0
func (s *hopCipher) decrypt(iv []byte, ctext []byte) []byte {
	defer func() {
		if err := recover(); err != nil {
			logger.Error("%v", err)
		}
	}()
	decrypter := _cipher.NewCBCDecrypter(s.block, iv)
	buf := make([]byte, len(ctext))
	decrypter.CryptBlocks(buf, ctext)
	cmsg := PKCS5UnPadding(buf)

	msg, _ := snappy.Decode(nil, cmsg)
	return msg
}
Пример #6
0
func (s *server) dump(dirty map[string]bool) {
	//遍历dirty, 取出key
	for k := range dirty {
		raw, err := s.redis_client.Cmd("GET", k).Bytes()
		if err != nil {
			log.Critical(err)
			continue
		}

		// 解压缩
		if s.enable_snappy {
			if dec, err := snappy.Decode(nil, raw); err == nil {
				raw = dec
			} else {
				log.Critical(err)
				continue
			}
		}

		var record map[string]interface{}
		//反序列化
		err = msgpack.Unmarshal(raw, &record)
		if err != nil {
			log.Critical(err)
			continue
		}

		//分隔拿到key和value
		strs := strings.Split(k, ":")
		if len(strs) != 2 {
			log.Critical("canot split key", k)
			continue
		}

		tblname, id_str := strs[0], strs[1]

		id, err := strconv.Atoi(id_str)
		if err != nil {
			log.Critical(err)
			continue
		}

		// save data to mongodb
		_, err = s.db.C(tblname).Upsert(bson.M{"Id": id}, record)
		if err != nil {
			log.Critical(err)
			continue
		}
	}
}
Пример #7
0
// Decode decodes the message with Snappy compression
func (codec *SnappyPayloadCodec) Decode(data []byte) []byte {
	if bytes.Equal(data[:8], snappyMagic) {
		var pos = uint32(16)
		var max = uint32(len(data))
		var decoded []byte
		for pos < max {
			size := binary.BigEndian.Uint32(data[pos : pos+4])
			pos = pos + 4
			chunk, err := snappy.Decode(nil, data[pos:pos+size])
			if nil != err {
				panic("Could not decode Snappy-encoded message: " + err.Error())
			}
			pos = pos + size
			decoded = append(decoded, chunk...)
		}
		return decoded
	}
	decoded, err := snappy.Decode(nil, data)
	if nil != err {
		panic("Could not decode Snappy-encoded message: " + err.Error())
	}
	return decoded
}
Пример #8
0
// SnappyDecode decodes snappy data
func snappyDecode(src []byte) ([]byte, error) {
	if bytes.Equal(src[:8], snappyMagic) {
		var (
			pos   = uint32(16)
			max   = uint32(len(src))
			dst   = make([]byte, 0, len(src))
			chunk []byte
			err   error
		)
		for pos < max {
			size := binary.BigEndian.Uint32(src[pos : pos+4])
			pos += 4

			chunk, err = snappy.Decode(chunk, src[pos:pos+size])
			if err != nil {
				return nil, err
			}
			pos += size
			dst = append(dst, chunk...)
		}
		return dst, nil
	}
	return snappy.Decode(nil, src)
}
Пример #9
0
func decompress(fr *Reader, toDecompress <-chan *readerBlock, toDecode chan<- *readerBlock) {
	switch fr.CompressionCodec {
	case CompressionDeflate:
		var rc io.ReadCloser
		var bits []byte
		for block := range toDecompress {
			rc = flate.NewReader(block.r)
			bits, block.err = ioutil.ReadAll(rc)
			if block.err != nil {
				block.err = newReaderError("cannot read from deflate", block.err)
				toDecode <- block
				_ = rc.Close() // already have the read error; ignore the close error
				continue
			}
			block.err = rc.Close()
			if block.err != nil {
				block.err = newReaderError("cannot close deflate", block.err)
				toDecode <- block
				continue
			}
			block.r = bytes.NewReader(bits)
			toDecode <- block
		}
	case CompressionNull:
		for block := range toDecompress {
			toDecode <- block
		}
	case CompressionSnappy:
		var src, dst []byte
		for block := range toDecompress {
			src, block.err = ioutil.ReadAll(block.r)
			if block.err != nil {
				block.err = newReaderError("cannot read", block.err)
				toDecode <- block
				continue
			}
			dst, block.err = snappy.Decode(dst, src)
			if block.err != nil {
				block.err = newReaderError("cannot decompress", block.err)
				toDecode <- block
				continue
			}
			block.r = bytes.NewReader(dst)
			toDecode <- block
		}
	}
	close(toDecode)
}
Пример #10
0
// dump all dirty data into backend database
func (s *server) dump(dirty map[string]bool) {
	for k := range dirty {
		raw, err := s.redis_client.Cmd("GET", k).Bytes()
		if err != nil {
			log.Critical(err)
			continue
		}

		// snappy
		if s.enable_snappy {
			if dec, err := snappy.Decode(nil, raw); err == nil {
				raw = dec
			} else {
				log.Critical(err)
				continue
			}
		}

		// unpack message from msgpack format
		var record map[string]interface{}
		err = msgpack.Unmarshal(raw, &record)
		if err != nil {
			log.Critical(err)
			continue
		}

		// split key into TABLE NAME and RECORD ID
		strs := strings.Split(k, ":")
		if len(strs) != 2 { // log the wrong key
			log.Critical("cannot split key", k)
			continue
		}
		tblname, id_str := strs[0], strs[1]
		// save data to mongodb
		id, err := strconv.Atoi(id_str)
		if err != nil {
			log.Critical(err)
			continue
		}

		_, err = s.db.C(tblname).Upsert(bson.M{"Id": id}, record)
		if err != nil {
			log.Critical(err)
			continue
		}
	}
}
Пример #11
0
func (db *BlockDB) BlockGet(hash *btc.Uint256) (bl []byte, trusted bool, e error) {
	db.mutex.Lock()
	rec, ok := db.blockIndex[hash.BIdx()]
	if !ok {
		db.mutex.Unlock()
		e = errors.New("btc.Block not in the index")
		return
	}

	trusted = rec.trusted
	if crec, hit := db.cache[hash.BIdx()]; hit {
		bl = crec.data
		crec.used = time.Now()
		db.mutex.Unlock()
		return
	}
	db.mutex.Unlock()

	bl = make([]byte, rec.blen)

	// we will re-open the data file, to not spoil the writting pointer
	f, e := os.Open(db.dirname + "blockchain.dat")
	if e != nil {
		return
	}

	_, e = f.Seek(int64(rec.fpos), os.SEEK_SET)
	if e == nil {
		_, e = f.Read(bl[:])
	}
	f.Close()

	if rec.compressed {
		if rec.snappied {
			bl, _ = snappy.Decode(nil, bl)
		} else {
			gz, _ := gzip.NewReader(bytes.NewReader(bl))
			bl, _ = ioutil.ReadAll(gz)
			gz.Close()
		}
	}

	db.addToCache(hash, bl)

	return
}
Пример #12
0
// Read the next outer message from the buffer.
func (p *Parser) readOuterMessage() (*outerMessage, error) {
	// Read a command header, which includes both the message type
	// well as a flag to determine whether or not whether or not the
	// message is compressed with snappy.
	command := dota.EDemoCommands(p.reader.readVarUint32())

	// Extract the type and compressed flag out of the command
	msgType := int32(command & ^dota.EDemoCommands_DEM_IsCompressed)
	msgCompressed := (command & dota.EDemoCommands_DEM_IsCompressed) == dota.EDemoCommands_DEM_IsCompressed

	// Read the tick that the message corresponds with.
	tick := p.reader.readVarUint32()

	// This appears to actually be an int32, where a -1 means pre-game.
	if tick == 4294967295 {
		tick = 0
	}

	// Read the size and following buffer.
	size := int(p.reader.readVarUint32())
	buf := p.reader.readBytes(size)

	// If the buffer is compressed, decompress it with snappy.
	if msgCompressed {
		var err error
		if buf, err = snappy.Decode(nil, buf); err != nil {
			return nil, err
		}
	}

	// Return the message
	msg := &outerMessage{
		tick:   tick,
		typeId: msgType,
		data:   buf,
	}
	return msg, nil
}
Пример #13
0
func getRemoteURL(url string) ([]byte, error) {
	item, err := cacher.Get(url)
	if err != nil && err != memcache.ErrCacheMiss {
		return nil, err
	}

	if err == memcache.ErrCacheMiss {
		res, err := http.Get(url)
		if err != nil {
			return nil, err
		}
		defer res.Body.Close()

		body, err := ioutil.ReadAll(res.Body)
		if err != nil {
			return nil, err
		}

		content, err := snappy.Encode(nil, body)
		if err != nil {
			return nil, err
		}

		err = cacher.Set(&memcache.Item{Key: url, Value: content, Expiration: config.MCTTL})
		if err != nil {
			return nil, err
		}
		return body, nil
	}

	content, err := snappy.Decode(nil, item.Value)
	if err != nil {
		return nil, err
	}

	return content, nil
}
Пример #14
0
// readBlock reads and decompresses a block from disk into memory.
func (r *Reader) readBlock(bh blockHandle) (block, error) {
	b := make([]byte, bh.length+blockTrailerLen)
	if _, err := r.file.ReadAt(b, int64(bh.offset)); err != nil {
		return nil, err
	}
	if r.verifyChecksums {
		checksum0 := binary.LittleEndian.Uint32(b[bh.length+1:])
		checksum1 := crc.New(b[:bh.length+1]).Value()
		if checksum0 != checksum1 {
			return nil, errors.New("leveldb/table: invalid table (checksum mismatch)")
		}
	}
	switch b[bh.length] {
	case noCompressionBlockType:
		return b[:bh.length], nil
	case snappyCompressionBlockType:
		b, err := snappy.Decode(nil, b[:bh.length])
		if err != nil {
			return nil, err
		}
		return b, nil
	}
	return nil, fmt.Errorf("leveldb/table: unknown block compression: %d", b[bh.length])
}
Пример #15
0
func (s SnappyCompressor) Decode(data []byte) ([]byte, error) {
	return snappy.Decode(nil, data)
}
Пример #16
0
// Do executes function f for each document in the database.
func (db *Database) Do(f func(*PackageInfo) error) error {
	c := db.Pool.Get()
	defer c.Close()
	cursor := 0
	c.Send("SCAN", cursor, "MATCH", "pkg:*")
	c.Flush()
	for {
		// Recieve previous SCAN.
		values, err := redis.Values(c.Receive())
		if err != nil {
			return err
		}
		var keys [][]byte
		if _, err := redis.Scan(values, &cursor, &keys); err != nil {
			return err
		}
		if cursor == 0 {
			break
		}
		for _, key := range keys {
			c.Send("HMGET", key, "gob", "score", "kind", "path", "terms", "synopis")
		}
		c.Send("SCAN", cursor, "MATCH", "pkg:*")
		c.Flush()
		for _ = range keys {
			values, err := redis.Values(c.Receive())
			if err != nil {
				return err
			}

			var (
				pi       PackageInfo
				p        []byte
				path     string
				terms    string
				synopsis string
			)

			if _, err := redis.Scan(values, &p, &pi.Score, &pi.Kind, &path, &terms, &synopsis); err != nil {
				return err
			}

			if p == nil {
				continue
			}

			pi.Size = len(path) + len(p) + len(terms) + len(synopsis)

			p, err = snappy.Decode(nil, p)
			if err != nil {
				return fmt.Errorf("snappy decoding %s: %v", path, err)
			}

			if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pi.PDoc); err != nil {
				return fmt.Errorf("gob decoding %s: %v", path, err)
			}
			if err := f(&pi); err != nil {
				return fmt.Errorf("func %s: %v", path, err)
			}
		}
	}
	return nil
}
Пример #17
0
// TODO: at some point this function will become obsolete
func BlockDBConvertIndexFile(dir string) {
	f, _ := os.Open(dir + "blockchain.idx")
	if f == nil {
		if fi, _ := os.Stat(dir + "blockchain_backup.idx"); fi != nil && fi.Size() > 0 {
			fmt.Println("If you don't plan to go back to a version prior 0.9.8, delete this file:\n", dir+"blockchain_backup.idx")
		}
		return // nothing to convert
	}
	fmt.Println("Converting btc.Block Database to the new format - please be patient!")
	id, _ := ioutil.ReadAll(f)
	f.Close()

	fmt.Println(len(id)/92, "blocks in the index")

	f, _ = os.Open(dir + "blockchain.dat")
	if f == nil {
		panic("blockchain.dat not found")
	}
	defer f.Close()

	var (
		datlen, sofar, sf2, tmp int64
		fl, le, he              uint32
		po                      uint64
		buf                     [2 * 1024 * 1024]byte // pre-allocate two 2MB buffers
		blk                     []byte
	)

	if fi, _ := f.Stat(); fi != nil {
		datlen = fi.Size()
	} else {
		panic("Stat() failed on blockchain.dat")
	}

	nidx := new(bytes.Buffer)

	for i := 0; i+92 <= len(id); i += 92 {
		fl = binary.LittleEndian.Uint32(id[i : i+4])
		he = binary.LittleEndian.Uint32(id[i+68 : i+72])
		po = binary.LittleEndian.Uint64(id[i+80 : i+88])
		le = binary.LittleEndian.Uint32(id[i+88 : i+92])

		f.Seek(int64(po), os.SEEK_SET)
		if _, er := f.Read(buf[:le]); er != nil {
			panic(er.Error())
		}
		if (fl & BLOCK_COMPRSD) != 0 {
			if (fl & BLOCK_SNAPPED) != 0 {
				blk, _ = snappy.Decode(nil, buf[:le])
			} else {
				gz, _ := gzip.NewReader(bytes.NewReader(buf[:le]))
				blk, _ = ioutil.ReadAll(gz)
				gz.Close()
			}
		} else {
			blk = buf[:le]
		}

		tx_n, _ := btc.VLen(blk[80:])

		binary.Write(nidx, binary.LittleEndian, fl)
		nidx.Write(id[i+4 : i+36])
		binary.Write(nidx, binary.LittleEndian, he)
		binary.Write(nidx, binary.LittleEndian, po)
		binary.Write(nidx, binary.LittleEndian, le)
		binary.Write(nidx, binary.LittleEndian, uint32(tx_n))
		nidx.Write(blk[:80])

		sf2 += int64(len(blk))
		tmp = sofar + int64(le)
		if ((tmp ^ sofar) >> 20) != 0 {
			fmt.Printf("\r%d / %d MB processed so far (%d)  ", tmp>>20, datlen>>20, sf2>>20)
		}
		sofar = tmp
	}
	fmt.Println()

	fmt.Println("Almost there - just save the new index file... don't you dare to stop now!")
	ioutil.WriteFile(dir+"blockchain.new", nidx.Bytes(), 0666)
	os.Rename(dir+"blockchain.idx", dir+"blockchain_backup.idx")
	fmt.Println("The old index backed up at blockchain_backup.dat")
	fmt.Println("Conversion done and will not be neded again, unless you downgrade.")
}