Example #1
0
func (bucketKey *bucketKey) getEncodedBytes() []byte {
	encodedBytes := []byte{}
	encodedBytes = append(encodedBytes, byte(0))
	encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.level))...)
	encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.bucketNumber))...)
	return encodedBytes
}
Example #2
0
func newTestMessage() *pb.MyMessage {
	msg := &pb.MyMessage{
		Count: proto.Int32(42),
		Name:  proto.String("Dave"),
		Quote: proto.String(`"I didn't want to go."`),
		Pet:   []string{"bunny", "kitty", "horsey"},
		Inner: &pb.InnerMessage{
			Host:      proto.String("footrest.syd"),
			Port:      proto.Int32(7001),
			Connected: proto.Bool(true),
		},
		Others: []*pb.OtherMessage{
			{
				Key:   proto.Int64(0xdeadbeef),
				Value: []byte{1, 65, 7, 12},
			},
			{
				Weight: proto.Float32(6.022),
				Inner: &pb.InnerMessage{
					Host: proto.String("lesha.mtv"),
					Port: proto.Int32(8002),
				},
			},
		},
		Bikeshed: pb.MyMessage_BLUE.Enum(),
		Somegroup: &pb.MyMessage_SomeGroup{
			GroupField: proto.Int32(8),
		},
		// One normally wouldn't do this.
		// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
		XXX_unrecognized: []byte{13<<3 | 0, 4},
	}
	ext := &pb.Ext{
		Data: proto.String("Big gobs for big rats"),
	}
	if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
		panic(err)
	}
	greetings := []string{"adg", "easy", "cow"}
	if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
		panic(err)
	}

	// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
	b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
	if err != nil {
		panic(err)
	}
	b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
	proto.SetRawExtension(msg, 201, b)

	// Extensions can be plain fields, too, so let's test that.
	b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
	proto.SetRawExtension(msg, 202, b)

	return msg
}
Example #3
0
func encodeValue(value []byte, version uint64) []byte {
	versionBytes := proto.EncodeVarint(version)
	deleteMarker := 0
	if value == nil {
		deleteMarker = 1
	}
	deleteMarkerBytes := proto.EncodeVarint(uint64(deleteMarker))
	encodedValue := append(versionBytes, deleteMarkerBytes...)
	if value != nil {
		encodedValue = append(encodedValue, value...)
	}
	return encodedValue
}
Example #4
0
func (trieNode *trieNode) computeCryptoHash() []byte {
	stateTrieLogger.Debug("Enter computeCryptoHash() for trieNode [%s]", trieNode)
	var cryptoHashContent []byte
	if trieNode.containsValue() {
		stateTrieLogger.Debug("Adding value to hash computation for trieNode [%s]", trieNode)
		key := trieNode.trieKey.getEncodedBytes()
		cryptoHashContent = append(cryptoHashContent, proto.EncodeVarint(uint64(len(key)))...)
		cryptoHashContent = append(cryptoHashContent, key...)
		cryptoHashContent = append(cryptoHashContent, trieNode.value...)
	}

	sortedChildrenIndexes := trieNode.getSortedChildrenIndex()
	for _, index := range sortedChildrenIndexes {
		childCryptoHash := trieNode.childrenCryptoHashes[index]
		stateTrieLogger.Debug("Adding hash [%#v] for child number [%d] to hash computation for trieNode [%s]", childCryptoHash, index, trieNode)
		cryptoHashContent = append(cryptoHashContent, childCryptoHash...)
	}

	if cryptoHashContent == nil {
		// node has no associated value and no associated children.
		stateTrieLogger.Debug("Returning nil as hash for trieNode = [%s]. Also, marking this key for deletion.", trieNode)
		trieNode.markedForDeletion = true
		return nil
	}

	if !trieNode.containsValue() && trieNode.getNumChildren() == 1 {
		// node has no associated value and has a single child. Propagate the child hash up
		stateTrieLogger.Debug("Returning hash as of a single child for trieKey = [%s]", trieNode.trieKey)
		return cryptoHashContent
	}

	stateTrieLogger.Debug("Recomputing hash for trieKey = [%s]", trieNode)
	return util.ComputeCryptoHash(cryptoHashContent)
}
Example #5
0
// Take n tokens from bucket t, key k
func (client *Client) Take(t string, k string, n int32) (response *limitd.Response, takeResponse *limitd.TakeResponse, err error) {
	requestID := uniuri.New()

	request := &limitd.Request{
		Id:     proto.String(requestID),
		Method: limitd.Request_TAKE.Enum(),
		Type:   proto.String(t),
		Key:    proto.String(k),
		Count:  proto.Int32(n),
	}

	// goprotobuf.EncodeVarint followed by proto.Marshal
	responseChan := make(chan *limitd.Response)
	client.PendingRequests[requestID] = responseChan

	data, _ := proto.Marshal(request)
	data = append(proto.EncodeVarint(uint64(len(data))), data...)
	client.Conn.Write(data)

	response = <-responseChan
	takeR, err := proto.GetExtension(response, limitd.E_TakeResponse_Response)
	if err != nil {
		return
	}

	if takeResponseCasted, ok := takeR.(*limitd.TakeResponse); ok {
		takeResponse = takeResponseCasted
	}

	return
}
Example #6
0
func (b *PbBuffer) WriteDelimitedBuffers(bufs ...*PbBuffer) error {
	totalLength := 0
	lens := make([][]byte, len(bufs))
	for i, v := range bufs {
		n := len(v.Bytes())
		lenb := pb.EncodeVarint(uint64(n))

		totalLength += len(lenb) + n
		lens[i] = lenb
	}

	err := b.WriteInt32(int32(totalLength))
	if err != nil {
		return errors.Trace(err)
	}

	for i, v := range bufs {
		_, err = b.Write(lens[i])
		if err != nil {
			return errors.Trace(err)
		}

		_, err = b.Write(v.Bytes())
		if err != nil {
			return errors.Trace(err)
		}
	}

	return nil
}
Example #7
0
func (e *ExecutionLogger) RecordStepEvent(step *model.Step, event *model.StepEvent) {
	e.mutex.Lock()
	defer e.mutex.Unlock()

	event.StepId = step.Id
	t := time.Now()
	ts := t.UnixNano()
	ms := ts / 1000000
	if e.lastTimestamp != 0 {
		delta := ms - e.lastTimestamp
		event.TimestampDelta = delta
	} else {
		event.TimestampAbsolute = ms
	}

	data, err := proto.Marshal(event)
	if err != nil {
		glog.Fatalf("error formatting step event: %v", err)
	}

	lengthPrefix := proto.EncodeVarint(uint64(len(data)))
	_, err = e.writer.Write(lengthPrefix)
	if err == nil {
		_, err = e.writer.Write(data)
	}
	if err != nil {
		glog.Fatalf("error writing to execution log: %v", err)
	}

	e.lastTimestamp = ms
}
Example #8
0
func TestBlockfileMgrFileRolling(t *testing.T) {
	env := newTestEnv(t)
	blocks := testutil.ConstructTestBlocks(t, 100)
	size := 0
	for _, block := range blocks {
		by, _, err := serializeBlock(block)
		testutil.AssertNoError(t, err, "Error while serializing block")
		blockBytesSize := len(by)
		encodedLen := proto.EncodeVarint(uint64(blockBytesSize))
		size += blockBytesSize + len(encodedLen)
	}

	env.conf.maxBlockfileSize = int(0.75 * float64(size))
	blkfileMgrWrapper := newTestBlockfileWrapper(t, env)
	blkfileMgrWrapper.addBlocks(blocks)
	testutil.AssertEquals(t, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum, 1)
	blkfileMgrWrapper.testGetBlockByHash(blocks)
	blkfileMgrWrapper.close()
	env.Cleanup()

	env = newTestEnv(t)
	defer env.Cleanup()
	env.conf.maxBlockfileSize = int(0.40 * float64(size))
	blkfileMgrWrapper = newTestBlockfileWrapper(t, env)
	defer blkfileMgrWrapper.close()
	blkfileMgrWrapper.addBlocks(blocks)
	testutil.AssertEquals(t, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum, 2)
	blkfileMgrWrapper.testGetBlockByHash(blocks)
}
Example #9
0
func TestBlockFileStreamUnexpectedEOF(t *testing.T) {
	partialBlockBytes := []byte{}
	dummyBlockBytes := testutil.ConstructRandomBytes(t, 100)
	lenBytes := proto.EncodeVarint(uint64(len(dummyBlockBytes)))
	partialBlockBytes = append(partialBlockBytes, lenBytes...)
	partialBlockBytes = append(partialBlockBytes, dummyBlockBytes...)
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:1])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:2])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:5])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:20])
}
Example #10
0
func expectedCryptoHashForTest(key *trieKey, value []byte, childrenHashes ...[]byte) []byte {
	expectedHash := []byte{}
	if key != nil {
		keyBytes := key.getEncodedBytes()
		expectedHash = append(expectedHash, proto.EncodeVarint(uint64(len(keyBytes)))...)
		expectedHash = append(expectedHash, keyBytes...)
		expectedHash = append(expectedHash, value...)
	}
	for _, b := range childrenHashes {
		expectedHash = append(expectedHash, b...)
	}
	return util.ComputeCryptoHash(expectedHash)
}
Example #11
0
func testBlockfileMgrCrashDuringWriting(t *testing.T, numBlocksBeforeCheckpoint int,
	numBlocksAfterCheckpoint int, numLastBlockBytes int, numPartialBytesToWrite int) {
	env := newTestEnv(t)
	defer env.Cleanup()
	blkfileMgrWrapper := newTestBlockfileWrapper(t, env)
	bg := testutil.NewBlockGenerator(t)
	blocksBeforeCP := bg.NextTestBlocks(numBlocksBeforeCheckpoint)
	blkfileMgrWrapper.addBlocks(blocksBeforeCP)
	currentCPInfo := blkfileMgrWrapper.blockfileMgr.cpInfo
	cpInfo1 := &checkpointInfo{
		currentCPInfo.latestFileChunkSuffixNum,
		currentCPInfo.latestFileChunksize,
		currentCPInfo.lastBlockNumber}

	blocksAfterCP := bg.NextTestBlocks(numBlocksAfterCheckpoint)
	blkfileMgrWrapper.addBlocks(blocksAfterCP)
	cpInfo2 := blkfileMgrWrapper.blockfileMgr.cpInfo

	// simulate a crash scenario
	lastBlockBytes := []byte{}
	encodedLen := proto.EncodeVarint(uint64(numLastBlockBytes))
	randomBytes := testutil.ConstructRandomBytes(t, numLastBlockBytes)
	lastBlockBytes = append(lastBlockBytes, encodedLen...)
	lastBlockBytes = append(lastBlockBytes, randomBytes...)
	partialBytes := lastBlockBytes[:numPartialBytesToWrite]
	blkfileMgrWrapper.blockfileMgr.currentFileWriter.append(partialBytes, true)
	blkfileMgrWrapper.blockfileMgr.saveCurrentInfo(cpInfo1, true)
	blkfileMgrWrapper.close()

	// simulate a start after a crash
	blkfileMgrWrapper = newTestBlockfileWrapper(t, env)
	defer blkfileMgrWrapper.close()
	cpInfo3 := blkfileMgrWrapper.blockfileMgr.cpInfo
	testutil.AssertEquals(t, cpInfo3, cpInfo2)

	// add fresh blocks after restart
	blocksAfterRestart := bg.NextTestBlocks(2)
	blkfileMgrWrapper.addBlocks(blocksAfterRestart)
	allBlocks := []*common.Block{}
	allBlocks = append(allBlocks, blocksBeforeCP...)
	allBlocks = append(allBlocks, blocksAfterCP...)
	allBlocks = append(allBlocks, blocksAfterRestart...)
	testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 1, len(allBlocks), allBlocks)
}
Example #12
0
func (b *PbBuffer) WriteDelimitedBuffers(bufs ...*PbBuffer) error {
	totalLength := 0
	lens := make([][]byte, len(bufs))
	for i, v := range bufs {
		n := len(v.Bytes())
		lenb := pb.EncodeVarint(uint64(n))

		totalLength += len(lenb) + n
		lens[i] = lenb
	}

	b.WriteInt32(int32(totalLength))

	for i, v := range bufs {
		b.Write(lens[i])
		b.Write(v.Bytes())
	}

	return nil
}
Example #13
0
// EncodeOrderPreservingVarUint64 returns a byte-representation for a uint64 number such that
// all zero-bits starting bytes are trimmed in order to reduce the length of the array
// For preserving the order in a default bytes-comparision, first byte contains the number of remaining bytes.
// The presence of first byte also allows to use the returned bytes as part of other larger byte array such as a
// composite-key representation in db
func EncodeOrderPreservingVarUint64(number uint64) []byte {
	bytes := make([]byte, 8)
	binary.BigEndian.PutUint64(bytes, number)
	startingIndex := 0
	size := 0
	for i, b := range bytes {
		if b != 0x00 {
			startingIndex = i
			size = 8 - i
			break
		}
	}
	sizeBytes := proto.EncodeVarint(uint64(size))
	if len(sizeBytes) > 1 {
		panic(fmt.Errorf("[]sizeBytes should not be more than one byte because the max number it needs to hold is 8. size=%d", size))
	}
	encodedBytes := make([]byte, size+1)
	encodedBytes[0] = sizeBytes[0]
	copy(encodedBytes[1:], bytes[startingIndex:])
	return encodedBytes
}
Example #14
0
// sendRPC sends an RPC out to the wire.
// Returns the response (for now, as the call is synchronous).
func (c *Client) sendRPC(rpc hrpc.Call) error {
	// Header.
	c.id++
	reqheader := &pb.RequestHeader{
		CallId:       &c.id,
		MethodName:   proto.String(rpc.GetName()),
		RequestParam: proto.Bool(true),
	}

	payload, err := rpc.Serialize()
	if err != nil {
		return fmt.Errorf("Failed to serialize RPC: %s", err)
	}
	payloadLen := proto.EncodeVarint(uint64(len(payload)))

	headerData, err := proto.Marshal(reqheader)
	if err != nil {
		return fmt.Errorf("Failed to marshal Get request: %s", err)
	}

	buf := make([]byte, 5, 4+1+len(headerData)+len(payloadLen)+len(payload))
	binary.BigEndian.PutUint32(buf, uint32(cap(buf)-4))
	buf[4] = byte(len(headerData))
	buf = append(buf, headerData...)
	buf = append(buf, payloadLen...)
	buf = append(buf, payload...)

	c.sentRPCsMutex.Lock()
	c.sentRPCs[c.id] = rpc
	c.sentRPCsMutex.Unlock()

	err = c.write(buf)
	if err != nil {
		return UnrecoverableError{err}
	}

	return nil
}
Example #15
0
func Write(conn net.Conn, messageBytes []byte) error {
	messageLen := proto.EncodeVarint(uint64(len(messageBytes)))
	data := append(messageLen, messageBytes...)
	_, err := conn.Write(data)
	return err
}
Example #16
0
func (c *bucketHashCalculator) appendSize(size int) {
	c.hashingData = append(c.hashingData, proto.EncodeVarint(uint64(size))...)
}
Example #17
0
func encodeBlockNum(blockNum uint64) []byte {
	return proto.EncodeVarint(blockNum)
}
Example #18
0
func (key *dataKey) getEncodedBytes() []byte {
	encodedBytes := proto.EncodeVarint(uint64(key.bucketKey.bucketNumber))
	encodedBytes = append(encodedBytes, byte(0))
	encodedBytes = append(encodedBytes, key.compositeKey...)
	return encodedBytes
}
Example #19
0
func encodeNumberForTest(i int) []byte {
	return proto.EncodeVarint(uint64(i))
}
Example #20
0
func (mgr *blockfileMgr) addBlock(block *common.Block) error {
	blockBytes, info, err := serializeBlock(block)
	if err != nil {
		return fmt.Errorf("Error while serializing block: %s", err)
	}
	blockHash := block.Header.Hash()
	//Get the location / offset where each transaction starts in the block and where the block ends
	txOffsets := info.txOffsets
	currentOffset := mgr.cpInfo.latestFileChunksize
	if err != nil {
		return fmt.Errorf("Error while serializing block: %s", err)
	}
	blockBytesLen := len(blockBytes)
	blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen))
	totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen)

	//Determine if we need to start a new file since the size of this block
	//exceeds the amount of space left in the current file
	if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize {
		mgr.moveToNextFile()
		currentOffset = 0
	}
	//append blockBytesEncodedLen to the file
	err = mgr.currentFileWriter.append(blockBytesEncodedLen, false)
	if err == nil {
		//append the actual block bytes to the file
		err = mgr.currentFileWriter.append(blockBytes, true)
	}
	if err != nil {
		truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize)
		if truncateErr != nil {
			panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err))
		}
		return fmt.Errorf("Error while appending block to file: %s", err)
	}

	//Update the checkpoint info with the results of adding the new block
	currentCPInfo := mgr.cpInfo
	newCPInfo := &checkpointInfo{
		latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum,
		latestFileChunksize:      currentCPInfo.latestFileChunksize + totalBytesToAppend,
		lastBlockNumber:          block.Header.Number}
	//save the checkpoint information in the database
	if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil {
		truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize)
		if truncateErr != nil {
			panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err))
		}
		return fmt.Errorf("Error while saving current file info to db: %s", err)
	}

	//Index block file location pointer updated with file suffex and offset for the new block
	blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum}
	blockFLP.offset = currentOffset
	// shift the txoffset because we prepend length of bytes before block bytes
	for _, txOffset := range txOffsets {
		txOffset.loc.offset += len(blockBytesEncodedLen)
	}
	//save the index in the database
	mgr.index.indexBlock(&blockIdxInfo{
		blockNum: block.Header.Number, blockHash: blockHash,
		flp: blockFLP, txOffsets: txOffsets})

	//update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager
	mgr.updateCheckpoint(newCPInfo)
	mgr.updateBlockchainInfo(blockHash, block)
	return nil
}
Example #21
0
func toBytes(balance int) []byte {
	return proto.EncodeVarint(uint64(balance))
}
// functions for encoding/decoding db keys/values for index data
// encode / decode BlockNumber
func encodeBlockNumber(blockNumber uint64) []byte {
	return proto.EncodeVarint(blockNumber)
}
Example #23
0
func minimumPossibleDataKeyBytesFor(bucketKey *bucketKey) []byte {
	min := proto.EncodeVarint(uint64(bucketKey.bucketNumber))
	min = append(min, byte(0))
	return min
}