func TestNoMergeExtensionMerge(t *testing.T) { bigm := prototests.AContainer m := &prototests.Small{SmallField: proto.Int64(1)} data, err := proto.Marshal(bigm) if err != nil { panic(err) } mdata, err := proto.Marshal(m) if err != nil { panic(err) } key := uint32(101)<<3 | uint32(2) datakey := make([]byte, 10) n := binary.PutUvarint(datakey, uint64(key)) datakey = datakey[:n] datalen := make([]byte, 10) n = binary.PutUvarint(datalen, uint64(len(mdata))) datalen = datalen[:n] data = append(data, append(datakey, append(datalen, mdata...)...)...) err = noMerge(data, bigm.Description(), "prototests", "Container") if err == nil || !strings.Contains(err.Error(), "FieldB requires merging") { t.Fatalf("FieldB should require merging, but error is %v", err) } t.Log(err) }
func (b *Batch) appendRec(kt kType, key, value []byte) { n := 1 + binary.MaxVarintLen32 + len(key) if kt == ktVal { n += binary.MaxVarintLen32 + len(value) } b.grow(n) off := len(b.data) data := b.data[:off+n] data[off] = byte(kt) off += 1 off += binary.PutUvarint(data[off:], uint64(len(key))) copy(data[off:], key) off += len(key) if kt == ktVal { off += binary.PutUvarint(data[off:], uint64(len(value))) copy(data[off:], value) off += len(value) } b.data = data[:off] ======= off := len(b.buf) if off == 0 { // include headers off = kBatchHdrLen n += off } if cap(b.buf)-off >= n { return } buf := make([]byte, 2*cap(b.buf)+n) copy(buf, b.buf) b.buf = buf[:off] }
func writeMetaData(outPath string) { outFile, err := os.Create(outPath) defer outFile.Close() if err != nil { panic(err) } var buf []byte = make([]byte, 4) var posBuf []byte = make([]byte, 8) binary.PutUvarint(buf, uint64(uniqueTerms)) outFile.Write(buf) buf = []byte{0, 0, 0, 0} binary.PutUvarint(buf, uint64(docId)) outFile.Write(buf) buf = []byte{0, 0, 0, 0} var docIdInt int = int(docId) for i := 0; i < docIdInt; i++ { binary.PutUvarint(buf, uint64(iDocInfos[i].length)) outFile.Write(buf) buf = []byte{0, 0, 0, 0} binary.PutUvarint(posBuf, uint64(iDocInfos[i].pos)) outFile.Write(posBuf) posBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} } }
func writeIndex(outPath string) { outFile, err := os.Create(outPath) defer outFile.Close() if err != nil { panic(err) } writer := bufio.NewWriterSize(io.Writer(outFile), writerBufSize) var v *list.List dBuf := make([]byte, 4) tfBuf := make([]byte, 4) pBuf := make([]byte, 4) for _, k := range dictionary.keys { v = dictionary.m[k] writer.WriteString(k + ",") var posting *Posting for el := v.Front(); el != nil; el = el.Next() { posting = el.Value.(*Posting) binary.PutUvarint(dBuf, uint64(posting.doc)) writer.Write(dBuf) dBuf = []byte{0, 0, 0, 0} binary.PutUvarint(tfBuf, uint64(posting.tf)) writer.Write(tfBuf) tfBuf = []byte{0, 0, 0, 0} for posEl := posting.pos.Front(); posEl != nil; posEl = posEl.Next() { pos := posEl.Value.(uint32) binary.PutUvarint(pBuf, uint64(pos)) writer.Write(pBuf) pBuf = []byte{0, 0, 0, 0} } } writer.Write([]byte{0, 0, 0, 0}) } writer.Flush() }
func (b *Batch) appendRec(kt keyType, key, value []byte) { n := 1 + binary.MaxVarintLen32 + len(key) if kt == keyTypeVal { n += binary.MaxVarintLen32 + len(value) } b.grow(n) index := batchIndex{keyType: kt} o := len(b.data) data := b.data[:o+n] data[o] = byte(kt) o++ o += binary.PutUvarint(data[o:], uint64(len(key))) index.keyPos = o index.keyLen = len(key) o += copy(data[o:], key) if kt == keyTypeVal { o += binary.PutUvarint(data[o:], uint64(len(value))) index.valuePos = o index.valueLen = len(value) o += copy(data[o:], value) } b.data = data[:o] b.index = append(b.index, index) b.internalLen += index.keyLen + index.valueLen + 8 }
// ToBuf serializes a frame into a byte array func (f FrameWindowUpdate) ToBuf() ([]byte, error) { buf := make([]byte, 1+4+8) buf[0] = WindowUpdateFrame binary.PutUvarint(buf[1:5], f.StreamID) binary.PutUvarint(buf[5:13], f.ByteOffset) return buf, nil }
// WriteTo writes the header to w and returns the number of bytes // actually written to w. Header size can be 16 and it will be // recalculated if the size is bigger. func (header *Header) WriteTo(w io.Writer) (n int64, err error) { buf := bytes.NewBuffer(nil) bin := make([]byte, binary.MaxVarintLen64) // Write uint64 binary encoding of the snapshot size to buf. buf.Write(bin[0:binary.PutUvarint(bin, uint64(len(header.Snapshots)))]) // Write each snapshot to buf. for _, snapshot := range header.Snapshots { binary.Write(buf, binary.BigEndian, snapshot.Timestamp) buf.Write(bin[0:binary.PutUvarint(bin, snapshot.ByteSize)]) } // Find the variable size of header size. headerSizeSize := uint64(binary.PutUvarint(bin, header.ByteSize)) // Recalculate header byte size until it gets right. for header.ByteSize < headerSizeSize+uint64(buf.Len()) { header.ByteSize = headerSizeSize + uint64(buf.Len()) headerSizeSize = uint64(binary.PutUvarint(bin, header.ByteSize)) } n1, err := w.Write(bin[0:headerSizeSize]) n += int64(n1) if err != nil { return } n1, err = w.Write(buf.Bytes()) n += int64(n1) for uint64(n) < header.ByteSize { n1, err = w.Write([]byte{0}) n += int64(n1) if err != nil { return } } return }
func (mc *memoryCache) inc(c context.Context, key string, delta int64, initialValue uint64) (uint64, error) { mc.Lock() defer mc.Unlock() item, ok := mc.items[key] if !ok { var z time.Time b := make([]byte, binary.Size(initialValue)) binary.PutUvarint(b, initialValue) item = &cacheItem{b, z} mc.items[key] = item } v, n := binary.Uvarint(item.data) if n <= 0 { return 0, fmt.Errorf("inc: binary.Uvarint error: %d", n) } switch { case delta < 0 && v < uint64(delta): v = 0 case delta < 0: v -= uint64(delta) case delta > 0: v += uint64(delta) } binary.PutUvarint(item.data, v) return v, nil }
func TestNoMergeExtensionMerge(t *testing.T) { r := rand.New(rand.NewSource(time.Now().UnixNano())) bigm := test.NewPopulatedMyExtendable(r, true) m := test.NewPopulatedNinOptNative(r, true) err := proto.SetExtension(bigm, test.E_FieldB, m) if err != nil { panic(err) } data, err := proto.Marshal(bigm) if err != nil { panic(err) } key := uint32(101)<<3 | uint32(2) data2 := make([]byte, 10) n := binary.PutUvarint(data2, uint64(key)) data2 = data2[:n] data = append(data, data2...) data4, err := proto.Marshal(test.NewPopulatedNinOptNative(r, true)) if err != nil { panic(err) } data3 := make([]byte, 10) n = binary.PutUvarint(data3, uint64(len(data4))) data3 = data3[:n] data = append(data, data3...) data = append(data, data4...) err = fieldpath.NoMerge(data, test.ThetestDescription(), "test", "MyExtendable") if err == nil || !strings.Contains(err.Error(), "requires merging") { t.Fatalf("should require merging") } }
func (x *msgDial) write(w io.Writer) (err error) { q := make([]byte, maxMsgDialLen) n1 := binary.PutUvarint(q, uint64(x.ID)) n2 := binary.PutUvarint(q[n1:], uint64(x.SeqNo)) _, err = w.Write(q[:n1+n2]) return err }
// MarshalBinary implements binary marshalling for Addresses. // // A marshalled Address only carries its identifier. When unmarshalled on // the same node, the unmarshalled address will be reconnected to the // original Mailbox. If unmarshalled on a different node, a reference to // the remote mailbox will be unmarshaled. func (a Address) MarshalBinary() ([]byte, error) { address := a.getAddress() if address == nil { return nil, ErrIllegalAddressFormat } switch mbox := address.(type) { case *Mailbox: b := make([]byte, 10, 10) written := binary.PutUvarint(b, uint64(mbox.id)) return append([]byte("<"), b[:written]...), nil case noMailbox: return []byte("X"), nil case boundRemoteAddress: b := make([]byte, 10, 10) written := binary.PutUvarint(b, uint64(mbox.mailboxID)) return append([]byte("<"), b[:written]...), nil case registryMailbox: return []byte("\"" + string(mbox)), nil default: return nil, ErrIllegalAddressFormat } }
// Diff computes a delta from data1 to data2. The // result is such that Patch(data1, Diff(data1, data2)) == data2. func Diff(data1, data2 []byte) []byte { // Store lengths of inputs. patch := make([]byte, 32) n1 := binary.PutUvarint(patch, uint64(len(data1))) n2 := binary.PutUvarint(patch[n1:], uint64(len(data2))) patch = patch[:n1+n2] // First hash chunks of data1. hashes := hashChunks(data1) // Compute rolling hashes of data2 and see whether // we recognize parts of data1. var p uint32 lastmatch := -1 for i := 0; i < len(data2); i++ { b := data2[i] if i < _W { p = (p << 8) ^ uint32(b) ^ _T[uint8(p>>(degree-8))] continue } // Invariant: i >= W and p == hashRabin(data2[i-W:i]) //if p != hashRabin(data2[i-_W:i]) { // println(p, hashRabin(data2[i-_W:i])) // panic("p != hashRabin(data2[i-_W:i])") //} refi, ok := hashes.Get(p) if ok && bytes.Equal(data1[refi:refi+_W], data2[i-_W:i]) { // We have a match! Try to extend it left and right. testi := i - _W for refi > 0 && testi > lastmatch+1 && data1[refi-1] == data2[testi-1] { refi-- testi-- } refj, testj := refi+i-testi, i for refj < len(data1) && testj < len(data2) && data1[refj] == data2[testj] { refj++ testj++ } // Now data1[refi:refj] == data2[testi:testj] patch = appendInlineData(patch, data2[lastmatch+1:testi]) patch = appendRefData(patch, uint32(refi), uint32(refj-refi)) // Skip bytes and update hash. i = testj + _W - 1 lastmatch = testj - 1 if i >= len(data2) { break } p = hashRabin(data2[testj : testj+_W]) continue } // Cancel out data2[i-W] and take data2[i] p ^= _U[data2[i-_W]] p = (p << 8) ^ uint32(b) ^ _T[uint8(p>>(degree-8))] } patch = appendInlineData(patch, data2[lastmatch+1:]) return patch }
func init() { var buffer bytes.Buffer b := make([]byte, 16) // Get the current user name. osU, err := user.Current() user := "******" if err == nil { user = osU.Username } buffer.WriteString(user) // Create the constant to make build a unique ID. start := uint64(time.Now().UnixNano()) binary.PutUvarint(b, start) buffer.Write(b) pid := uint64(os.Getpid()) binary.PutUvarint(b, pid) buffer.Write(b) // Set the node. if !uuid.SetNodeID(buffer.Bytes()) { os.Exit(-1) } // Initialize the channel and blank node type. nextVal, tBlank = make(chan uuid.UUID, chanSize), Type("/_") go func() { for { nextVal <- uuid.NewRandom() } }() }
// Return a signature writer. The call itself does not write anything. // Use with a Signature header. func (w *Writer) SignatureWriter() rsync.SignatureWriter { if w.t != TypeSignature { // This is a program structure issue, so panic. panic(ErrInvalidCall) } buffer := make([]byte, 2048) return func(block rsync.BlockHash) error { var n int var err error n = binary.PutUvarint(buffer, block.Index) binary.BigEndian.PutUint32(buffer[n:], block.WeakHash) n += 4 n += binary.PutUvarint(buffer[n:], uint64(len(block.StrongHash))) _, err = w.body.Write(buffer[:n]) if err != nil { return err } _, err = w.body.Write(block.StrongHash) if err != nil { return err } return nil } }
// writeProto writes a uvarint size and then a protobuf to w. // If the data takes no space (like rpc.InvalidRequest), // only a zero size is written. func writeProto(w io.Writer, pb proto.Message) error { // Allocate enough space for the biggest uvarint var size [binary.MaxVarintLen64]byte if pb == nil { n := binary.PutUvarint(size[:], uint64(0)) if _, err := w.Write(size[:n]); err != nil { return err } return nil } // Marshal the protobuf data, err := proto.Marshal(pb) if err != nil { return err } // Write the size and data n := binary.PutUvarint(size[:], uint64(len(data))) if _, err = w.Write(size[:n]); err != nil { return err } if _, err = w.Write(data); err != nil { return err } return nil }
// Put a new key-value pair. func (ht *HashTable) Put(key, val uint64) { var bucket, entry uint64 = ht.hashKey(key), 0 region := bucket / HASH_TABLE_REGION_SIZE mutex := ht.regionRWMutex[region] mutex.Lock() for { entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE if entryAddr > ht.File.Append-ENTRY_SIZE { mutex.Unlock() return } if ht.File.Buf[entryAddr] != ENTRY_VALID { ht.File.Buf[entryAddr] = ENTRY_VALID binary.PutUvarint(ht.File.Buf[entryAddr+1:entryAddr+11], key) binary.PutUvarint(ht.File.Buf[entryAddr+11:entryAddr+21], val) mutex.Unlock() return } if entry++; entry == ht.PerBucket { mutex.Unlock() entry = 0 if bucket = ht.nextBucket(bucket); bucket == 0 || bucket >= ht.File.Append-BUCKET_HEADER_SIZE { ht.grow(ht.hashKey(key)) ht.Put(key, val) return } region = bucket / HASH_TABLE_REGION_SIZE mutex = ht.regionRWMutex[region] mutex.Lock() } } }
// ToBuf serializes a frame into a byte array func (f FrameResetStream) ToBuf() ([]byte, error) { buf := make([]byte, 1+4+4) buf[0] = ResetStreamFrame binary.PutUvarint(buf[1:5], f.StreamID) binary.PutUvarint(buf[5:9], f.ErrorCode) return buf, nil }
// Grow a new bucket on the chain of buckets. func (ht *HashTable) grow(bucket uint64) { // lock both bucket creation and the bucket affected ht.tableGrowMutex.Lock() // when file is full, we have to lock down everything before growing the file if !ht.File.CheckSize(ht.BucketSize) { originalMutexes := ht.regionRWMutex for _, region := range originalMutexes { region.Lock() } ht.File.CheckSizeAndEnsure(ht.BucketSize) // make more mutexes moreMutexes := make([]*sync.RWMutex, HASH_TABLE_GROWTH/HASH_TABLE_REGION_SIZE+1) for i := range moreMutexes { moreMutexes[i] = new(sync.RWMutex) } // merge mutexes together ht.regionRWMutex = append(ht.regionRWMutex, moreMutexes...) for _, region := range originalMutexes { region.Unlock() } } lastBucketAddr := ht.lastBucket(bucket) * ht.BucketSize binary.PutUvarint(ht.File.Buf[lastBucketAddr:lastBucketAddr+8], ht.numberBuckets()) // mark the new bucket newBucket := ht.File.Append binary.PutUvarint(ht.File.Buf[newBucket:newBucket+10], BUCKET_HEADER_NEW) ht.File.Append += ht.BucketSize ht.tableGrowMutex.Unlock() }
// Format of a simple task: // 2 bytes : magic number // 8 bytes : id length : l // 8 bytes : data length : n // 32 bytes : target // 32 bytes : action // l bytes : id // n bytes : data func (t *SimpleTask) Serialize() []byte { serialized := make([]byte, 2+8+8+32+32+len(t.id)+len(t.data)) // Magic number serialized[0] = 0 serialized[1] = 1 // Task ID length binary.PutUvarint(serialized[2:], uint64(len(t.id))) // Task data length binary.PutUvarint(serialized[10:], uint64(len(t.data))) // Target copy(serialized[18:], []byte(t.target)) // Action copy(serialized[50:], []byte(t.action)) // Data copy(serialized[82:], t.id) // Data copy(serialized[82+len(t.id):], t.data) return serialized }
/* NewID ... */ func NewID() string { tb := make([]byte, 64) tc := binary.PutUvarint(tb, uint64(time.Now().UnixNano())) rb := make([]byte, 64) rc := binary.PutUvarint(rb, uint64(rand.Int63())) b := append(tb[:tc], rb[:rc]...) return fmt.Sprintf("%x", md5.Sum(b)) }
func newStreamResponse(cmd ctrlCommands, param uint32) []byte { buf := make([]byte, binary.MaxVarintLen16+binary.MaxVarintLen32) // Control command binary.PutUvarint(buf[:binary.MaxVarintLen16], uint64(cmd)) // Parameter binary.PutUvarint(buf[binary.MaxVarintLen16:binary.MaxVarintLen16+binary.MaxVarintLen32], uint64(param)) return buf }
// ToBuf serializes a FrameConnectionClose into a byte array func (f FrameConnectionClose) ToBuf() ([]byte, error) { buf := make([]byte, 1+4+2+len(f.Reason)) buf[0] = ConnectionCloseFrame binary.PutUvarint(buf[1:5], f.ErrorCode) binary.PutUvarint(buf[5:7], uint64(len(f.Reason))) copy(buf[7:], f.Reason) return buf, nil }
func createValidPacket() []byte { buf := make([]byte, 9) buf[0] = 0x01 binary.PutUvarint(buf[2:4], 5) binary.PutUvarint(buf[4:6], 0x0000) binary.PutUvarint(buf[6:8], 1) buf[8] = byte(1) return buf }
// ToBuf serializes a frame into a byte array func (f FrameAck) ToBuf() ([]byte, error) { buf := make([]byte, 1+1+6+2+1) buf[0] = AckFrame buf[1] = f.ReceivedEntropy binary.PutUvarint(buf[2:8], f.LargestObserved) binary.PutUvarint(buf[8:10], f.LargestObservedDeltaTime) // TODO rest of this shit. return buf, errors.New("frame FrameAck not fully implemented") }
func Encode(w io.Writer, f *BloomFilter) { maxsize := 2 * binary.MaxVarintLen64 dump := make([]byte, maxsize) //pack m and k pos := binary.PutUvarint(dump, uint64(f.m)) pos += binary.PutUvarint(dump[pos:], uint64(f.k)) w.Write(dump[0:pos]) bitset.Encode(w, f.b) }
// Serialize the CHD. The serialized form is conducive to mmapped access. See // the Mmap function for details. func (c *CHD) Write(w io.Writer) error { write := func(nd ...interface{}) error { for _, d := range nd { if err := binary.Write(w, binary.LittleEndian, d); err != nil { return err } } return nil } var storeKeys uint32 if c.StoreKeys { storeKeys = 1 } data := []interface{}{ uint32(len(c.r)), c.r, uint32(len(c.indices)), c.indices, uint32(c.el), storeKeys, } if err := write(data...); err != nil { return err } vb := make([]byte, binary.MaxVarintLen64) for i := 0; i < int(c.el); i++ { if c.StoreKeys { k := c.keys[i] n := binary.PutUvarint(vb, uint64(len(k))) if _, err := w.Write(vb[:n]); err != nil { return err } if _, err := w.Write(k); err != nil { return err } } if c.ValuesAreVarints { v := c.valueVarints[i] n := binary.PutUvarint(vb, v) if _, err := w.Write(vb[:n]); err != nil { return err } } else { v := c.values[i] n := binary.PutUvarint(vb, uint64(len(v))) if _, err := w.Write(vb[:n]); err != nil { return err } if _, err := w.Write(v); err != nil { return err } } } return nil }
// ToBuf serializes a FrameGoAway into a byte array func (f FrameGoAway) ToBuf() ([]byte, error) { buf := make([]byte, 1+4+4+2+len(f.Reason)) buf[0] = GoAwayFrame binary.PutUvarint(buf[1:5], f.ErrorCode) binary.PutUvarint(buf[5:9], f.LastGoodStreamID) binary.PutUvarint(buf[8:11], uint64(len(f.Reason))) copy(buf[11:], f.Reason) return buf, nil }
func (bloom Bloom) hashKey(index int, elem uint64) uint32 { initialValue := bloom.hashes[index] buffer := make([]byte, 24, 24) binary.PutUvarint(buffer, initialValue) binary.PutUvarint(buffer[8:], elem) hash := fnv.New32() hash.Write(buffer) hashValue := hash.Sum32() return hashValue % uint32(bloom.size) }
//Dumps b in compact & restorable format func Encode(w io.Writer, b *BitSet) { //TODO: there should be an error handling dump := make([]byte, binary.MaxVarintLen64) pos := binary.PutUvarint(dump, uint64(b.length)) w.Write(dump[0:pos]) for _, v := range b.set { dump := make([]byte, binary.MaxVarintLen64) pos = binary.PutUvarint(dump, uint64(v)) w.Write(dump[0:pos]) } }
// Value serialize a BlobsPos as string // (value is encoded as uvarint: n + offset + size) func (blob BlobPos) Value() []byte { bufTmp := make([]byte, 10) var buf bytes.Buffer w := binary.PutUvarint(bufTmp[:], uint64(blob.n)) buf.Write(bufTmp[:w]) w = binary.PutUvarint(bufTmp[:], uint64(blob.offset)) buf.Write(bufTmp[:w]) w = binary.PutUvarint(bufTmp[:], uint64(blob.size)) buf.Write(bufTmp[:w]) return buf.Bytes() }