// readAll read entire referenced block. func (p *bInfo) readAll(r io.ReaderAt, checksum bool) (b []byte, err error) { raw := make([]byte, p.size+5) _, err = readFullAt(r, raw, int64(p.offset)) if err != nil { return } crcb := raw[len(raw)-4:] raw = raw[:len(raw)-4] if checksum { sum := binary.LittleEndian.Uint32(crcb) sum = hash.UnmaskCRC32(sum) crc := hash.NewCRC32C() crc.Write(raw) if crc.Sum32() != sum { err = errors.ErrCorrupt("block checksum mismatch") return } } compression := raw[len(raw)-1] b = raw[:len(raw)-1] switch compression { case kNoCompression: case kSnappyCompression: return snappy.Decode(nil, b) default: err = errors.ErrCorrupt("bad block type") } return }
func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) { data := r.bpool.Get(int(bh.length + blockTrailerLen)) if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { return nil, err } if checksum || r.checksum { if !verifyChecksum(data) { return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") } } switch data[bh.length] { case blockTypeNoCompression: data = data[:bh.length] case blockTypeSnappyCompression: decLen, err := snappy.DecodedLen(data[:bh.length]) if err != nil { return nil, err } tmp := data data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length]) r.bpool.Put(tmp) if err != nil { return nil, err } default: return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length]) } return data, nil }
func readRequestBody(r io.Reader, header *wire.RequestHeader, request proto.Message) error { // recv body (end) compressedPbRequest, err := recvFrame(r) if err != nil { return err } // checksum if crc32.ChecksumIEEE(compressedPbRequest) != header.GetChecksum() { return fmt.Errorf("protorpc.readRequestBody: unexpected checksum.") } // decode the compressed data pbRequest, err := snappy.Decode(nil, compressedPbRequest) if err != nil { return err } // check wire header: rawMsgLen if uint32(len(pbRequest)) != header.GetRawRequestLen() { return fmt.Errorf("protorpc.readRequestBody: Unexcpeted header.RawRequestLen.") } // Unmarshal to proto message if request != nil { err = proto.Unmarshal(pbRequest, request) if err != nil { return err } } return nil }
func (ev *CompressEvent) Decode(buffer *bytes.Buffer) (err error) { ev.CompressType, err = DecodeUInt32Value(buffer) if err != nil { return } switch ev.CompressType { case COMPRESSOR_NONE: err, ev.Ev = DecodeEvent(buffer) return err case COMPRESSOR_SNAPPY: b := make([]byte, 0, 0) b, err = snappy.Decode(b, buffer.Bytes()) if err != nil { b = nil return } tmpbuf := bytes.NewBuffer(b) err, ev.Ev = DecodeEvent(tmpbuf) tmpbuf.Reset() return err // case COMPRESSOR_LZ4: // lz4r := lz4.NewReader(buffer) // data, _ := ioutil.ReadAll(lz4r) // tmpbuf := bytes.NewBuffer(data) // err, ev.Ev = DecodeEvent(tmpbuf) // tmpbuf.Reset() // return err default: return errors.New("Not supported compress type:" + strconv.Itoa(int(ev.CompressType))) } return nil }
func Decompress(src []byte) ([]byte, bool) { dst, err := snappy.Decode(nil, src) if err != nil { return nil, false } return dst, true }
// Do executes function f for each document in the database. func (db *Database) Do(f func(*doc.Package, []Package) error) error { c := db.Pool.Get() defer c.Close() keys, err := redis.Values(c.Do("KEYS", "pkg:*")) if err != nil { return err } for _, key := range keys { p, err := redis.Bytes(c.Do("HGET", key, "gob")) if err == redis.ErrNil { continue } if err != nil { return err } p, err = snappy.Decode(nil, p) if err != nil { return err } var pdoc doc.Package if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pdoc); err != nil { return err } pkgs, err := db.getSubdirs(c, pdoc.ImportPath, &pdoc) if err != nil { return err } if err := f(&pdoc, pkgs); err != nil { return err } } return nil }
func (db *Database) getDoc(c redis.Conn, path string) (*doc.Package, time.Time, error) { r, err := redis.Values(getDocScript.Do(c, path)) if err == redis.ErrNil { return nil, time.Time{}, nil } else if err != nil { return nil, time.Time{}, err } var p []byte var t int64 if _, err := redis.Scan(r, &p, &t); err != nil { return nil, time.Time{}, err } p, err = snappy.Decode(nil, p) if err != nil { return nil, time.Time{}, err } var pdoc doc.Package if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pdoc); err != nil { return nil, time.Time{}, err } nextCrawl := pdoc.Updated if t != 0 { nextCrawl = time.Unix(t, 0).UTC() } return &pdoc, nextCrawl, err }
func (this *SnappyCodec) Inverse(src, dst []byte) (uint, uint, error) { if src == nil { return uint(0), uint(0), errors.New("Invalid null source buffer") } if dst == nil { return uint(0), uint(0), errors.New("Invalid null destination buffer") } if kanzi.SameByteSlices(src, dst, false) { return 0, 0, errors.New("Input and output buffers cannot be equal") } count := this.size if this.size == 0 { count = uint(len(src)) } res, err := snappy.Decode(dst, src[0:count]) if err != nil { return 0, 0, fmt.Errorf("Decoding error: %v", err) } if len(res) > len(dst) { // Encode returns a newly allocated slice if the provided 'dst' array is too small. // There is no way to return this new slice, so treat it as an error return 0, 0, fmt.Errorf("Output buffer is too small - size: %d, required %d", len(res), len(dst)) } return count, uint(len(res)), nil }
func TestSnappyCompressor(t *testing.T) { c := SnappyCompressor{} if c.Name() != "snappy" { t.Fatalf("expected name to be 'snappy', got %v", c.Name()) } str := "My Test String" //Test Encoding if expected, err := snappy.Encode(nil, []byte(str)); err != nil { t.Fatalf("failed to encode '%v' with error %v", str, err) } else if res, err := c.Encode([]byte(str)); err != nil { t.Fatalf("failed to encode '%v' with error %v", str, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected encoded value with the result encoded value.") } val, err := c.Encode([]byte(str)) if err != nil { t.Fatalf("failed to encode '%v' with error '%v'", str, err) } //Test Decoding if expected, err := snappy.Decode(nil, val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if res, err := c.Decode(val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected decoded value with the result decoded value.") } }
func SnappyUncompress(compressed []byte) []byte { dst := make([]byte, 0, len(compressed)) out, err := snappy.Decode(dst, compressed) if err != nil { panic(err) } return out }
func (t tweetItem) Write(out io.Writer) { v := t.Value() t, err := snappy.Decode(nil, v) if err != nil { panic(err) } out.Write(t) out.Write([]byte("\n")) }
func (cn *connection) recv() (byte, []byte, error) { if cn.c == nil { return 0, nil, driver.ErrBadConn } header := make([]byte, 8) if _, err := io.ReadFull(cn.c, header); err != nil { cn.close() // better assume that the connection is broken (may have read some bytes) return 0, nil, err } // verify that the frame starts with version==1 and req/resp flag==response // this may be overly conservative in that future versions may be backwards compatible // in that case simply amend the check... if header[0] != protoResponse { cn.close() return 0, nil, fmt.Errorf("unsupported frame version or not a response: 0x%x (header=%v)", header[0], header) } // verify that the flags field has only a single flag set, again, this may // be overly conservative if additional flags are backwards-compatible if header[1] > 1 { cn.close() return 0, nil, fmt.Errorf("unsupported frame flags: 0x%x (header=%v)", header[1], header) } opcode := header[3] if opcode > opLAST { cn.close() return 0, nil, fmt.Errorf("unknown opcode: 0x%x (header=%v)", opcode, header) } length := binary.BigEndian.Uint32(header[4:8]) var body []byte if length > 0 { if length > 256*1024*1024 { // spec says 256MB is max cn.close() return 0, nil, fmt.Errorf("frame too large: %d (header=%v)", length, header) } body = make([]byte, length) if _, err := io.ReadFull(cn.c, body); err != nil { cn.close() // better assume that the connection is broken return 0, nil, err } } if header[1]&flagCompressed != 0 && cn.compression == "snappy" { var err error body, err = snappy.Decode(nil, body) if err != nil { cn.close() return 0, nil, err } } if opcode == opError { code := binary.BigEndian.Uint32(body[0:4]) msglen := binary.BigEndian.Uint16(body[4:6]) msg := string(body[6 : 6+msglen]) return opcode, body, Error{Code: int(code), Msg: msg} } return opcode, body, nil }
func (r *Reader) loadBlock() error { ref := r.refs[0] r.refs = r.refs[1:] path := blockPath(ref) f, err := os.Open(path) if err != nil { return err } defer f.Close() n, err := f.Read(r.box) if err != nil && err != io.EOF { return err } //if err != io.EOF { // //TODO instead we can read the actual block stored disregarding its size. // return fmt.Errorf("didn't read full block: BlockSize is smaller than stored?") //} if n < minBoxSize { return fmt.Errorf("block on disk is too short: %s", ref) } // Decrypt. var nonce [24]byte if err := readNonce(&nonce, r.box); err != nil { return err } encryptedBlock := r.box[len(nonce):n] decryptedData, ok := secretbox.Open(r.cdata[:0], encryptedBlock, &nonce, &config.Keys.BlockEnc) if !ok { return fmt.Errorf("failed to decrypt block %s", ref) } // Load block kind. r.kind = decryptedData[0] // Load length of compressed data. compressedLen := binary.BigEndian.Uint32(decryptedData[1:]) decryptedData = decryptedData[headerSize : headerSize+compressedLen] // Decompress. // TODO avoid allocation. decompressedData, err := snappy.Decode(nil, decryptedData) if err != nil { return err } // Verify hash. contentHash := calculateRef(r.h, decompressedData) if !ref.Equal(contentHash) { return fmt.Errorf("block ref %s doesn't match content %s", ref, contentHash) } // Set block. r.block = decompressedData return nil }
// SnappyDecode decodes snappy data func SnappyDecode(src []byte) ([]byte, error) { if bytes.Equal(src[:8], snappyMagic) { pos := uint32(16) max := uint32(len(src)) dst := make([]byte, 0) for pos < max { size := binary.BigEndian.Uint32(src[pos : pos+4]) pos = pos + 4 chunk, err := snappy.Decode(nil, src[pos:pos+size]) if err != nil { return nil, err } pos = pos + size dst = append(dst, chunk...) } return dst, nil } return snappy.Decode(nil, src) }
func KVQuery(db *rpcplus.Client, table, uid string, value interface{}) (exist bool, err error) { //ts("KVQuery", table, uid) //defer te("KVQuery", table, uid) var reply protobuf.DBQueryResult err = db.Call("DBServer.Query", protobuf.DBQuery{table, uid}, &reply) if err != nil { logger.Error("KVQuery Error On Query %s : %s (%s)", table, uid, err.Error()) return } switch reply.Code { case protobuf.Ok: var dst []byte dst, err = snappy.Decode(nil, reply.Value) if err != nil { logger.Error("KVQuery Unmarshal Error On snappy.Decode %s : %s (%s)", table, uid, err.Error()) return } switch value.(type) { case gp.Message: err = gp.Unmarshal(dst, value.(gp.Message)) if err != nil { logger.Error("KVQuery Unmarshal Error On Query %s : %s (%s)", table, uid, err.Error()) return } case *[]byte: *(value.(*[]byte)) = dst default: logger.Error("KVQuery args type error %v", value) return } exist = true return case protobuf.NoExist: return } logger.Error("KVQuery Unknow DBReturn %d", reply.Code) return false, fmt.Errorf("KVQuery Unknow DBReturn %d", reply.Code) }
func (c *SnappyCompressor) NewReader(r io.Reader) (io.ReadCloser, error) { src, err := ioutil.ReadAll(r) if err != nil { return nil, err } dst, err := snappy.Decode(nil, src) if err != nil { return nil, err } return ioutil.NopCloser(bytes.NewReader(dst)), nil }
func NewSnappyDecodeOp() stream.Operator { generator := func() interface{} { fn := func(in []byte) [][]byte { decompressed, err := snappy.Decode(nil, in) if err != nil { log.Printf("Error in snappy decompression %v", err) } return [][]byte{decompressed} } return fn } return mapper.NewOpFactory(generator, "NewSnappyDecodeOp") }
func (r *reader) nextFrame() error { for { _, err := io.ReadFull(r.reader, r.hdr) if err != nil { return err } buf, err := r.readBlock() if err != nil { return err } switch r.hdr[0] { case 0x00, 0x01: // compressed or uncompressed bytes // first 4 bytes are the little endian crc32 checksum checksum := unmaskChecksum(uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24) b := buf[4:] if r.hdr[0] == 0x00 { // compressed bytes r.dst, err = snappy.Decode(r.dst, b) if err != nil { return err } b = r.dst } if r.verifyChecksum { actualChecksum := crc32.Checksum(b, crcTable) if checksum != actualChecksum { return errors.New(fmt.Sprintf("invalid checksum %x != %x", checksum, actualChecksum)) } } _, err = r.buf.Write(b) return err case 0xff: // stream identifier if !bytes.Equal(buf, []byte{0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}) { return errors.New("invalid stream ID") } // continue... default: return errors.New("invalid frame identifier") } } panic("should never happen") }
func main() { var md5sum string var rip = "10.242.87.153" var lastImg *image.RGBA http.HandleFunc("/connect", func(w http.ResponseWriter, r *http.Request) { serialNo := r.URL.Query().Get("serialno") fmt.Println(strings.Split(r.RemoteAddr, ":")[0], serialNo) rip = strings.Split(r.RemoteAddr, ":")[0] io.WriteString(w, "connected") }) http.HandleFunc("/screen.png", func(w http.ResponseWriter, r *http.Request) { resp, err := http.Get(fmt.Sprintf("http://%s:21000/patch.snappy?md5sum="+md5sum, rip)) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { png.Encode(w, lastImg) return } data, _ := ioutil.ReadAll(resp.Body) rawPixes, _ := snappy.Decode(nil, data) _ = rawPixes fmt.Println(resp.Header) hdr := resp.Header md5sum = hdr.Get("X-Md5sum") var isPatch = hdr.Get("X-Patch") == "true" var width, height int fmt.Sscanf(hdr.Get("X-Width")+" "+hdr.Get("X-Height"), "%d %d", &width, &height) img := image.NewRGBA(image.Rectangle{image.ZP, image.Point{width, height}}) img.Pix = rawPixes if isPatch { lastImg, _ = pngdiff.Patch(lastImg, img) start := time.Now() bmp.Encode(w, lastImg) //png.Encode(w, lastImg) fmt.Println("patch:", time.Now().Sub(start)) return } else { lastImg = img //png.Encode(w, img) bmp.Encode(w, lastImg) } }) log.Fatal(http.ListenAndServe(":9000", nil)) }
func decompress(fr *Reader, toDecompress <-chan *readerBlock, toDecode chan<- *readerBlock) { switch fr.CompressionCodec { case CompressionDeflate: var rc io.ReadCloser var bits []byte for block := range toDecompress { rc = flate.NewReader(block.r) bits, block.err = ioutil.ReadAll(rc) if block.err != nil { block.err = newReaderError("cannot read from deflate", block.err) toDecode <- block _ = rc.Close() // already have the read error; ignore the close error continue } block.err = rc.Close() if block.err != nil { block.err = newReaderError("cannot close deflate", block.err) toDecode <- block continue } block.r = bytes.NewReader(bits) toDecode <- block } case CompressionNull: for block := range toDecompress { toDecode <- block } case CompressionSnappy: var src, dst []byte for block := range toDecompress { src, block.err = ioutil.ReadAll(block.r) if block.err != nil { block.err = newReaderError("cannot read", block.err) toDecode <- block continue } dst, block.err = snappy.Decode(dst, src) if block.err != nil { block.err = newReaderError("cannot decompress", block.err) toDecode <- block continue } block.r = bytes.NewReader(dst) toDecode <- block } } close(toDecode) }
func (db *BlockDB) BlockGet(hash *Uint256) (bl []byte, trusted bool, e error) { db.mutex.Lock() rec, ok := db.blockIndex[hash.BIdx()] if !ok { db.mutex.Unlock() e = errors.New("Block not in the index") return } trusted = rec.trusted if crec, hit := db.cache[hash.BIdx()]; hit { bl = crec.data crec.used = time.Now() db.mutex.Unlock() return } db.mutex.Unlock() bl = make([]byte, rec.blen) // we will re-open the data file, to not spoil the writting pointer f, e := os.Open(db.dirname + "blockchain.dat") if e != nil { return } _, e = f.Seek(int64(rec.fpos), os.SEEK_SET) if e == nil { _, e = f.Read(bl[:]) } f.Close() if rec.compressed { if rec.snappied { bl, _ = snappy.Decode(nil, bl) } else { gz, _ := gzip.NewReader(bytes.NewReader(bl)) bl, _ = ioutil.ReadAll(gz) gz.Close() } } db.addToCache(hash, bl) return }
// Do executes function f for each document in the database. func (db *Database) Do(f func(*PackageInfo) error) error { c := db.Pool.Get() defer c.Close() keys, err := redis.Values(c.Do("KEYS", "pkg:*")) if err != nil { return err } for _, key := range keys { values, err := redis.Values(c.Do("HMGET", key, "gob", "score", "kind", "path")) if err != nil { return err } var ( pi PackageInfo p []byte path string ) if _, err := redis.Scan(values, &p, &pi.Score, &pi.Kind, &path); err != nil { return err } if p == nil { continue } p, err = snappy.Decode(nil, p) if err != nil { return fmt.Errorf("snappy decoding %s: %v", path, err) } if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pi.PDoc); err != nil { return fmt.Errorf("gob decoding %s: %v", path, err) } pi.Pkgs, err = db.getSubdirs(c, pi.PDoc.ImportPath, pi.PDoc) if err != nil { return fmt.Errorf("get subdirs %s: %v", path, err) } if err := f(&pi); err != nil { return fmt.Errorf("func %s: %v", path, err) } } return nil }
// Do executes function f for each document in the database. func (db *Database) Do(f func(*PackageInfo) error) error { c := db.Pool.Get() defer c.Close() keys, err := redis.Values(c.Do("KEYS", "pkg:*")) if err != nil { return err } for _, key := range keys { values, err := redis.Values(c.Do("HMGET", key, "gob", "rank", "kind")) if err != nil { return err } var ( pi PackageInfo p []byte ) if _, err := redis.Scan(values, &p, &pi.Rank, &pi.Kind); err != nil { return err } if p == nil { continue } p, err = snappy.Decode(nil, p) if err != nil { return err } if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pi.PDoc); err != nil { return err } pi.Pkgs, err = db.getSubdirs(c, pi.PDoc.ImportPath, pi.PDoc) if err != nil { return err } if err := f(&pi); err != nil { return err } } return nil }
func RunTestSnappy(data []byte) { log.Printf("encoding/RunTestSnappy: Testing comprssion Snappy\n") now := time.Now() e, err := snappy.Encode(nil, data) if err != nil { log.Fatalf("encoding/RunTestSnappy: encoding error: %v\n", err) } log.Printf("encoding/RunTestSnappy: Compressed from %d bytes to %d bytes in %d ns\n", len(data), len(e), time.Since(now).Nanoseconds()) d, err := snappy.Decode(nil, e) if err != nil { log.Fatalf("encoding/RunTestSnappy: decoding error: %v\n", err) } log.Printf("encoding/RunTestSnappy: Uncompressed from %d bytes to %d bytes in %d ns\n", len(e), len(d), time.Since(now).Nanoseconds()) if !bytes.Equal(data, d) { log.Fatalf("encoding/RunTestSnappy: roundtrip mismatch\n") } }
// decodeDataBlock assumes r.hdr[0] to be either blockCompressed or // blockUncompressed. func (r *reader) decodeBlock() error { // read compressed block data and determine if uncompressed data is too // large. buf, err := r.readBlock() if err != nil { return err } declen := len(buf[4:]) if r.hdr[0] == blockCompressed { declen, err = snappy.DecodedLen(buf[4:]) if err != nil { return err } } if declen > MaxBlockSize { return fmt.Errorf("decoded block data too large %d > %d", declen, MaxBlockSize) } // decode data and verify its integrity using the little-endian crc32 // preceding encoded data crc32le, blockdata := buf[:4], buf[4:] if r.hdr[0] == blockCompressed { r.dst, err = snappy.Decode(r.dst, blockdata) if err != nil { return err } blockdata = r.dst } if r.verifyChecksum { checksum := unmaskChecksum(uint32(crc32le[0]) | uint32(crc32le[1])<<8 | uint32(crc32le[2])<<16 | uint32(crc32le[3])<<24) actualChecksum := crc32.Checksum(blockdata, crcTable) if checksum != actualChecksum { return fmt.Errorf("checksum does not match %x != %x", checksum, actualChecksum) } } _, err = r.buf.Write(blockdata) return err }
// readBlock reads and decompresses a block from disk into memory. func (r *Reader) readBlock(bh blockHandle) (block, error) { b := make([]byte, bh.length+blockTrailerLen) if _, err := r.file.ReadAt(b, int64(bh.offset)); err != nil { return nil, err } if r.verifyChecksums { checksum0 := binary.LittleEndian.Uint32(b[bh.length+1:]) checksum1 := crc.New(b[:bh.length+1]).Value() if checksum0 != checksum1 { return nil, errors.New("leveldb/table: invalid table (checksum mismatch)") } } switch b[bh.length] { case noCompressionBlockType: return b[:bh.length], nil case snappyCompressionBlockType: b, err := snappy.Decode(nil, b[:bh.length]) if err != nil { return nil, err } return b, nil } return nil, fmt.Errorf("leveldb/table: unknown block compression: %d", b[bh.length]) }
func performGetBlock(host string, port int, isSnappy bool) { remote := startClient(host, port) defer remote.Close() // var reply DataChunk var err error blockSize := 512 * 1024 // 512 KB // Retrieve the block if isSnappy { err = remote.Call("DFS.GetSnappyBlock", blockSize, &reply) handleError(err) reply.Chunk, err = snappy.Decode(reply.Chunk, reply.Chunk) handleError(err) } else { err = remote.Call("DFS.GetBlock", blockSize, &reply) handleError(err) } // Calculate the MD5 hash and ensure it's equal h := md5.New() h.Write(reply.Chunk) if !bytes.Equal(reply.Hash, h.Sum(nil)) { handleError(errors.New("Hash did not match")) } }
func (s SnappyCompressor) Decode(data []byte) ([]byte, error) { return snappy.Decode(nil, data) }
// TODO: at some point this function will become obsolete func BlockDBConvertIndexFile(dir string) { f, _ := os.Open(dir + "blockchain.idx") if f == nil { if fi, _ := os.Stat(dir + "blockchain_backup.idx"); fi != nil && fi.Size() > 0 { fmt.Println("If you don't plan to go back to a version prior 0.9.8, delete this file:\n", dir+"blockchain_backup.idx") } return // nothing to convert } fmt.Println("Converting Block Database to the new format - please be patient!") id, _ := ioutil.ReadAll(f) f.Close() fmt.Println(len(id)/92, "blocks in the index") f, _ = os.Open(dir + "blockchain.dat") if f == nil { panic("blockchain.dat not found") } defer f.Close() var ( datlen, sofar, sf2, tmp int64 fl, le, he uint32 po uint64 buf [2 * 1024 * 1024]byte // pre-allocate two 2MB buffers blk []byte ) if fi, _ := f.Stat(); fi != nil { datlen = fi.Size() } else { panic("Stat() failed on blockchain.dat") } nidx := new(bytes.Buffer) for i := 0; i+92 <= len(id); i += 92 { fl = binary.LittleEndian.Uint32(id[i : i+4]) he = binary.LittleEndian.Uint32(id[i+68 : i+72]) po = binary.LittleEndian.Uint64(id[i+80 : i+88]) le = binary.LittleEndian.Uint32(id[i+88 : i+92]) f.Seek(int64(po), os.SEEK_SET) if _, er := f.Read(buf[:le]); er != nil { panic(er.Error()) } if (fl & BLOCK_COMPRSD) != 0 { if (fl & BLOCK_SNAPPED) != 0 { blk, _ = snappy.Decode(nil, buf[:le]) } else { gz, _ := gzip.NewReader(bytes.NewReader(buf[:le])) blk, _ = ioutil.ReadAll(gz) gz.Close() } } else { blk = buf[:le] } tx_n, _ := VLen(blk[80:]) binary.Write(nidx, binary.LittleEndian, fl) nidx.Write(id[i+4 : i+36]) binary.Write(nidx, binary.LittleEndian, he) binary.Write(nidx, binary.LittleEndian, po) binary.Write(nidx, binary.LittleEndian, le) binary.Write(nidx, binary.LittleEndian, uint32(tx_n)) nidx.Write(blk[:80]) sf2 += int64(len(blk)) tmp = sofar + int64(le) if ((tmp ^ sofar) >> 20) != 0 { fmt.Printf("\r%d / %d MB processed so far (%d) ", tmp>>20, datlen>>20, sf2>>20) } sofar = tmp } fmt.Println() fmt.Println("Almost there - just save the new index file... don't you dare to stop now!") ioutil.WriteFile(dir+"blockchain.new", nidx.Bytes(), 0666) os.Rename(dir+"blockchain.idx", dir+"blockchain_backup.idx") fmt.Println("The old index backed up at blockchain_backup.dat") fmt.Println("Conversion done and will not be neded again, unless you downgrade.") }
regularCache = iota nilCache ) var snappyGobCodec = memcache.Codec{ Marshal: func(i interface{}) (compressed []byte, err error) { uncompressed, err := memcache.Gob.Marshal(i) if err != nil { return } compressed, err = snappy.Encode(nil, uncompressed) return }, Unmarshal: func(compressed []byte, i interface{}) (err error) { var uncompressed []byte if uncompressed, err = snappy.Decode(nil, compressed); err != nil { return } err = memcache.Gob.Unmarshal(uncompressed, i) return }, } var MemCodec = snappyGobCodec func isNil(v reflect.Value) bool { k := v.Kind() if k == reflect.Chan { return v.IsNil() } if k == reflect.Func {