コード例 #1
0
func (cmd *readCommand) parseObject(
	opCount int,
	fieldCount int,
	generation uint32,
	expiration uint32,
) error {
	receiveOffset := 0

	// There can be fields in the response (setname etc).
	// But for now, ignore them. Expose them to the API if needed in the future.
	// Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
	if fieldCount > 0 {
		// Just skip over all the fields
		for i := 0; i < fieldCount; i++ {
			// Logger.Debug("%d", receiveOffset)
			fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
			receiveOffset += (4 + fieldSize)
		}
	}

	var rv reflect.Value
	if opCount > 0 {
		rv = reflect.ValueOf(cmd.object)

		if rv.Kind() != reflect.Ptr {
			return errors.New("Invalid type for result object. It should be of type Struct Pointer.")
		}
		rv = rv.Elem()

		if !rv.CanAddr() {
			return errors.New("Invalid type for object. It should be addressable (a pointer)")
		}

		if rv.Kind() != reflect.Struct {
			return errors.New("Invalid type for object. It should be a pointer to a struct.")
		}

		// map tags
		cacheObjectTags(rv)
	}

	for i := 0; i < opCount; i++ {
		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
		particleType := int(cmd.dataBuffer[receiveOffset+5])
		nameSize := int(cmd.dataBuffer[receiveOffset+7])
		name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
		receiveOffset += 4 + 4 + nameSize

		particleBytesSize := int(opSize - (4 + nameSize))
		value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
		if err := setObjectField(rv, name, value); err != nil {
			return err
		}

		receiveOffset += particleBytesSize
	}

	return nil
}
コード例 #2
0
func (cmd *readCommand) parseRecord(
	opCount int,
	fieldCount int,
	generation uint32,
	expiration uint32,
) (*Record, error) {
	var bins BinMap
	receiveOffset := 0

	// There can be fields in the response (setname etc).
	// But for now, ignore them. Expose them to the API if needed in the future.
	// Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
	if fieldCount > 0 {
		// Just skip over all the fields
		for i := 0; i < fieldCount; i++ {
			// Logger.Debug("%d", receiveOffset)
			fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
			receiveOffset += (4 + fieldSize)
		}
	}

	if opCount > 0 {
		bins = make(BinMap, opCount)
	}

	for i := 0; i < opCount; i++ {
		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
		particleType := int(cmd.dataBuffer[receiveOffset+5])
		nameSize := int(cmd.dataBuffer[receiveOffset+7])
		name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
		receiveOffset += 4 + 4 + nameSize

		particleBytesSize := int(opSize - (4 + nameSize))
		value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
		receiveOffset += particleBytesSize

		if bins == nil {
			bins = make(BinMap, opCount)
		}

		// for operate list command results
		if prev, exists := bins[name]; exists {
			if res, ok := prev.([]interface{}); ok {
				// List already exists.  Add to it.
				bins[name] = append(res, value)
			} else {
				// Make a list to store all values.
				bins[name] = []interface{}{prev, value}
			}
		} else {
			bins[name] = value
		}
	}

	return newRecord(cmd.node, cmd.key, bins, generation, expiration), nil
}
コード例 #3
0
// Parses the given byte buffer and populate the result object.
// Returns the number of bytes that were parsed from the given buffer.
func (cmd *batchCommandGet) parseRecord(key *Key, opCount int, generation int, expiration int) (*Record, error) {
	bins := make(map[string]interface{}, opCount)

	for i := 0; i < opCount; i++ {
		if err := cmd.readBytes(8); err != nil {
			return nil, err
		}
		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
		particleType := int(cmd.dataBuffer[5])
		nameSize := int(cmd.dataBuffer[7])

		if err := cmd.readBytes(nameSize); err != nil {
			return nil, err
		}
		name := string(cmd.dataBuffer[:nameSize])

		particleBytesSize := int(opSize - (4 + nameSize))
		if err := cmd.readBytes(particleBytesSize); err != nil {
			return nil, err
		}
		value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
		if err != nil {
			return nil, err
		}

		bins[name] = value
	}

	return newRecord(cmd.node, key, bins, generation, expiration), nil
}
コード例 #4
0
// Parse all results in the batch.  Add records to shared list.
// If the record was not found, the bins will be nil.
func (cmd *batchCommandGet) parseRecordResults(ifc command, receiveSize int) (bool, error) {
	//Parse each message response and add it to the result array
	cmd.dataOffset = 0

	for cmd.dataOffset < receiveSize {
		if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
			return false, err
		}
		resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF)

		// The only valid server return codes are "ok" and "not found".
		// If other return codes are received, then abort the batch.
		if resultCode != 0 && resultCode != KEY_NOT_FOUND_ERROR {
			return false, NewAerospikeError(resultCode)
		}

		info3 := int(cmd.dataBuffer[3])

		// If cmd is the end marker of the response, do not proceed further
		if (info3 & _INFO3_LAST) == _INFO3_LAST {
			return false, nil
		}

		generation := int(Buffer.BytesToUint32(cmd.dataBuffer, 6))
		expiration := TTL(int(Buffer.BytesToUint32(cmd.dataBuffer, 10)))
		fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
		opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
		key, err := cmd.parseKey(fieldCount)
		if err != nil {
			return false, err
		}

		offset := cmd.batchNamespace.offsets[cmd.index] //cmd.keyMap[string(key.digest)]
		cmd.index++

		if bytes.Equal(key.digest, cmd.keys[offset].digest) {
			if resultCode == 0 {
				if cmd.records[offset], err = cmd.parseRecord(key, opCount, generation, expiration); err != nil {
					return false, err
				}
			}
		} else {
			Logger.Debug("Unexpected batch key returned: " + string(key.namespace) + "," + Buffer.BytesToHexString(key.digest))
		}
	}
	return true, nil
}
コード例 #5
0
func (cmd *readCommand) parseObject(
	opCount int,
	fieldCount int,
	generation int,
	expiration int,
) error {
	receiveOffset := 0

	// There can be fields in the response (setname etc).
	// But for now, ignore them. Expose them to the API if needed in the future.
	// Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
	if fieldCount > 0 {
		// Just skip over all the fields
		for i := 0; i < fieldCount; i++ {
			// Logger.Debug("%d", receiveOffset)
			fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
			receiveOffset += (4 + fieldSize)
		}
	}

	var rv reflect.Value
	if opCount > 0 {
		rv = reflect.ValueOf(cmd.object).Elem()

		// map tags
		cacheObjectTags(rv)
	}

	for i := 0; i < opCount; i++ {
		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
		particleType := int(cmd.dataBuffer[receiveOffset+5])
		nameSize := int(cmd.dataBuffer[receiveOffset+7])
		name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
		receiveOffset += 4 + 4 + nameSize

		particleBytesSize := int(opSize - (4 + nameSize))
		value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
		if err := cmd.setObjectField(rv, name, value); err != nil {
			return err
		}

		receiveOffset += particleBytesSize
	}

	return nil
}
コード例 #6
0
func (cmd *readCommand) parseRecord(
	opCount int,
	fieldCount int,
	generation int,
	expiration int,
) (*Record, error) {
	var bins BinMap
	receiveOffset := 0

	// There can be fields in the response (setname etc).
	// But for now, ignore them. Expose them to the API if needed in the future.
	// Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
	if fieldCount > 0 {
		// Just skip over all the fields
		for i := 0; i < fieldCount; i++ {
			// Logger.Debug("%d", receiveOffset)
			fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
			receiveOffset += (4 + fieldSize)
		}
	}

	for i := 0; i < opCount; i++ {
		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
		particleType := int(cmd.dataBuffer[receiveOffset+5])
		nameSize := int(cmd.dataBuffer[receiveOffset+7])
		name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
		receiveOffset += 4 + 4 + nameSize

		particleBytesSize := int(opSize - (4 + nameSize))
		value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
		receiveOffset += particleBytesSize

		if bins == nil {
			bins = make(BinMap, opCount)
		}
		bins[name] = value
	}

	return newRecord(cmd.node, cmd.key, bins, generation, expiration), nil
}
コード例 #7
0
func (cmd *serverCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) {
	// Server commands (Query/Execute UDF) should only send back a return code.
	// Keep parsing logic to empty socket buffer just in case server does
	// send records back.
	cmd.dataOffset = 0

	for cmd.dataOffset < receiveSize {
		if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
			return false, err
		}
		resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF)

		if resultCode != 0 {
			if resultCode == KEY_NOT_FOUND_ERROR {
				return false, nil
			}
			return false, NewAerospikeError(resultCode)
		}

		info3 := int(cmd.dataBuffer[3])

		// If cmd is the end marker of the response, do not proceed further
		if (info3 & _INFO3_LAST) == _INFO3_LAST {
			return false, nil
		}

		fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
		opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))

		if _, err := cmd.parseKey(fieldCount); err != nil {
			return false, err
		}

		for i := 0; i < opCount; i++ {
			if err := cmd.readBytes(8); err != nil {
				return false, err
			}
			opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
			nameSize := int(cmd.dataBuffer[7])

			if err := cmd.readBytes(nameSize); err != nil {
				return false, err
			}

			particleBytesSize := int((opSize - (4 + nameSize)))
			if err := cmd.readBytes(particleBytesSize); err != nil {
				return false, err
			}
		}
	}
	return true, nil
}
コード例 #8
0
func (cmd *readHeaderCommand) parseResult(ifc command, conn *Connection) error {
	// Read header.
	if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
		return err
	}

	resultCode := cmd.dataBuffer[13] & 0xFF

	if resultCode == 0 {
		generation := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
		expiration := TTL(int(Buffer.BytesToUint32(cmd.dataBuffer, 18)))
		cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration)
	} else {
		if ResultCode(resultCode) == KEY_NOT_FOUND_ERROR {
			cmd.record = nil
		} else {
			return NewAerospikeError(ResultCode(resultCode))
		}
	}
	if err := cmd.emptySocket(conn); err != nil {
		return err
	}
	return nil
}
コード例 #9
0
func (cmd *baseMultiCommand) parseObject(
	obj reflect.Value,
	opCount int,
	fieldCount int,
	generation uint32,
	expiration uint32,
) error {
	for i := 0; i < opCount; i++ {
		if err := cmd.readBytes(8); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return err
		}

		opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
		particleType := int(cmd.dataBuffer[5])
		nameSize := int(cmd.dataBuffer[7])

		if err := cmd.readBytes(nameSize); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return err
		}
		name := string(cmd.dataBuffer[:nameSize])

		particleBytesSize := int((opSize - (4 + nameSize)))
		if err := cmd.readBytes(particleBytesSize); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return err
		}
		value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
		if err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return err
		}

		iobj := reflect.Indirect(obj)
		for iobj.Kind() == reflect.Ptr {
			iobj = reflect.Indirect(iobj)
		}

		if err := setObjectField(cmd.resObjMappings, iobj, name, value); err != nil {
			return err
		}
	}

	return nil
}
コード例 #10
0
func (upckr *unpacker) UnpackMap() (map[interface{}]interface{}, error) {
	if upckr.length <= 0 {
		return nil, nil
	}

	theType := upckr.buffer[upckr.offset] & 0xff
	upckr.offset++
	var count int

	if (theType & 0xf0) == 0x80 {
		count = int(theType & 0x0f)
	} else if theType == 0xde {
		count = int(Buffer.BytesToUint16(upckr.buffer, upckr.offset))
		upckr.offset += 2
	} else if theType == 0xdf {
		count = int(Buffer.BytesToUint32(upckr.buffer, upckr.offset))
		upckr.offset += 4
	} else {
		return make(map[interface{}]interface{}), nil
	}
	return upckr.unpackMap(count)
}
コード例 #11
0
func (upckr *unpacker) UnpackList() ([]interface{}, error) {
	if upckr.length <= 0 {
		return nil, nil
	}

	theType := upckr.buffer[upckr.offset] & 0xff
	upckr.offset++
	var count int

	if (theType & 0xf0) == 0x90 {
		count = int(theType & 0x0f)
	} else if theType == 0xdc {
		count = int(Buffer.BytesToUint16(upckr.buffer, upckr.offset))
		upckr.offset += 2
	} else if theType == 0xdd {
		count = int(Buffer.BytesToUint32(upckr.buffer, upckr.offset))
		upckr.offset += 4
	} else {
		return nil, nil
	}

	return upckr.unpackList(count)
}
コード例 #12
0
func (cmd *baseMultiCommand) parseKey(fieldCount int) (*Key, error) {
	var digest []byte
	var namespace, setName string
	var userKey Value
	var err error

	for i := 0; i < fieldCount; i++ {
		if err = cmd.readBytes(4); err != nil {
			return nil, err
		}

		fieldlen := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
		if err = cmd.readBytes(fieldlen); err != nil {
			return nil, err
		}

		fieldtype := FieldType(cmd.dataBuffer[0])
		size := fieldlen - 1

		switch fieldtype {
		case DIGEST_RIPE:
			digest = make([]byte, size, size)
			copy(digest, cmd.dataBuffer[1:size+1])
		case NAMESPACE:
			namespace = string(cmd.dataBuffer[1 : size+1])
		case TABLE:
			setName = string(cmd.dataBuffer[1 : size+1])
		case KEY:
			if userKey, err = bytesToKeyValue(int(cmd.dataBuffer[1]), cmd.dataBuffer, 2, size-1); err != nil {
				return nil, err
			}
		}
	}

	return &Key{namespace: namespace, setName: setName, digest: digest, userKey: userKey}, nil
}
コード例 #13
0
func (cmd *scanCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) {
	// Read/parse remaining message bytes one record at a time.
	cmd.dataOffset = 0

	for cmd.dataOffset < receiveSize {
		if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}
		resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF)

		if resultCode != 0 {
			if resultCode == KEY_NOT_FOUND_ERROR {
				// consume the rest of the input buffer from the socket
				if cmd.dataOffset < receiveSize {
					if err := cmd.readBytes(receiveSize - cmd.dataOffset); err != nil {
						return false, err
					}
				}
				return false, nil
			}
			err := NewAerospikeError(resultCode)
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		info3 := int(cmd.dataBuffer[3])

		// If cmd is the end marker of the response, do not proceed further
		if (info3 & _INFO3_LAST) == _INFO3_LAST {
			return false, nil
		}

		generation := int(Buffer.BytesToUint32(cmd.dataBuffer, 6))
		expiration := TTL(int(Buffer.BytesToUint32(cmd.dataBuffer, 10)))
		fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
		opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))

		key, err := cmd.parseKey(fieldCount)
		if err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		// Parse bins.
		var bins BinMap

		for i := 0; i < opCount; i++ {
			if err := cmd.readBytes(8); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
			particleType := int(cmd.dataBuffer[5])
			nameSize := int(cmd.dataBuffer[7])

			if err := cmd.readBytes(nameSize); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}
			name := string(cmd.dataBuffer[:nameSize])

			particleBytesSize := int(opSize - (4 + nameSize))
			if err := cmd.readBytes(particleBytesSize); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
			if err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			if bins == nil {
				bins = BinMap{}
			}
			bins[name] = value
		}

		// If the channel is full and it blocks, we don't want this command to
		// block forever, or panic in case the channel is closed in the meantime.
		select {
		// send back the result on the async channel
		case cmd.recordset.Records <- newRecord(cmd.node, key, bins, generation, expiration):
		case <-cmd.recordset.cancelled:
			return false, NewAerospikeError(SCAN_TERMINATED)
		}
	}

	return true, nil
}
コード例 #14
0
func (cmd *baseMultiCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) {
	// Read/parse remaining message bytes one record at a time.
	cmd.dataOffset = 0

	for cmd.dataOffset < receiveSize {
		if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}
		resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF)

		if resultCode != 0 {
			if resultCode == KEY_NOT_FOUND_ERROR {
				// consume the rest of the input buffer from the socket
				if cmd.dataOffset < receiveSize {
					if err := cmd.readBytes(receiveSize - cmd.dataOffset); err != nil {
						cmd.recordset.Errors <- newNodeError(cmd.node, err)
						return false, err
					}
				}
				return false, nil
			}
			err := NewAerospikeError(resultCode)
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		info3 := int(cmd.dataBuffer[3])

		// If cmd is the end marker of the response, do not proceed further
		if (info3 & _INFO3_LAST) == _INFO3_LAST {
			return false, nil
		}

		generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
		expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
		fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
		opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))

		key, err := cmd.parseKey(fieldCount)
		if err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		// if there is a recordset, process the record traditionally
		// otherwise, it is supposed to be a record channel
		if cmd.selectCases == nil {
			// Parse bins.
			var bins BinMap

			for i := 0; i < opCount; i++ {
				if err := cmd.readBytes(8); err != nil {
					cmd.recordset.Errors <- newNodeError(cmd.node, err)
					return false, err
				}

				opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
				particleType := int(cmd.dataBuffer[5])
				nameSize := int(cmd.dataBuffer[7])

				if err := cmd.readBytes(nameSize); err != nil {
					cmd.recordset.Errors <- newNodeError(cmd.node, err)
					return false, err
				}
				name := string(cmd.dataBuffer[:nameSize])

				particleBytesSize := int((opSize - (4 + nameSize)))
				if err = cmd.readBytes(particleBytesSize); err != nil {
					cmd.recordset.Errors <- newNodeError(cmd.node, err)
					return false, err
				}
				value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
				if err != nil {
					cmd.recordset.Errors <- newNodeError(cmd.node, err)
					return false, err
				}

				if bins == nil {
					bins = make(BinMap, opCount)
				}
				bins[name] = value
			}

			// If the channel is full and it blocks, we don't want this command to
			// block forever, or panic in case the channel is closed in the meantime.
			select {
			// send back the result on the async channel
			case cmd.recordset.Records <- newRecord(cmd.node, key, bins, generation, expiration):
			case <-cmd.recordset.cancelled:
				return false, NewAerospikeError(cmd.terminationErrorType)
			}
		} else {
			obj := reflect.New(cmd.resObjType)
			if err := cmd.parseObject(obj, opCount, fieldCount, generation, expiration); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			// set the object to send
			cmd.selectCases[0].Send = obj

			chosen, _, _ := reflect.Select(cmd.selectCases)
			switch chosen {
			case 0: // object sent
			case 1: // cancel channel is closed
				return false, NewAerospikeError(cmd.terminationErrorType)
			}
		}
	}

	return true, nil
}
コード例 #15
0
func (cmd *readCommand) parseResult(ifc command, conn *Connection) error {
	// Read header.
	_, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE))
	if err != nil {
		Logger.Warn("parse result error: " + err.Error())
		return err
	}

	// A number of these are commented out because we just don't care enough to read
	// that section of the header. If we do care, uncomment and check!
	sz := Buffer.BytesToInt64(cmd.dataBuffer, 0)
	headerLength := int(cmd.dataBuffer[8])
	resultCode := ResultCode(cmd.dataBuffer[13] & 0xFF)
	generation := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
	expiration := TTL(int(Buffer.BytesToUint32(cmd.dataBuffer, 18)))
	fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 26)) // almost certainly 0
	opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 28))
	receiveSize := int((sz & 0xFFFFFFFFFFFF) - int64(headerLength))

	// Read remaining message bytes.
	if receiveSize > 0 {
		if err = cmd.sizeBufferSz(receiveSize); err != nil {
			return err
		}
		_, err = conn.Read(cmd.dataBuffer, receiveSize)
		if err != nil {
			Logger.Warn("parse result error: " + err.Error())
			return err
		}

	}

	if resultCode != 0 {
		if resultCode == KEY_NOT_FOUND_ERROR && cmd.object == nil {
			return nil
		}

		if resultCode == UDF_BAD_RESPONSE {
			cmd.record, _ = cmd.parseRecord(opCount, fieldCount, generation, expiration)
			err := cmd.handleUdfError(resultCode)
			Logger.Warn("UDF execution error: " + err.Error())
			return err
		}

		return NewAerospikeError(resultCode)
	}

	if cmd.object == nil {
		if opCount == 0 {
			// data Bin was not returned.
			cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration)
			return nil
		}

		cmd.record, err = cmd.parseRecord(opCount, fieldCount, generation, expiration)
		if err != nil {
			return err
		}
	} else {
		cmd.parseObject(opCount, fieldCount, generation, expiration)
	}

	return nil
}
コード例 #16
0
func (upckr *unpacker) unpackObject(isMapKey bool) (interface{}, error) {
	theType := upckr.buffer[upckr.offset] & 0xff
	upckr.offset++

	switch theType {
	case 0xc0:
		return nil, nil

	case 0xc3:
		return true, nil

	case 0xc2:
		return false, nil

	case 0xca:
		val := Buffer.BytesToFloat32(upckr.buffer, upckr.offset)
		upckr.offset += 4
		return val, nil

	case 0xcb:
		val := Buffer.BytesToFloat64(upckr.buffer, upckr.offset)
		upckr.offset += 8
		return val, nil

	case 0xcc:
		r := upckr.buffer[upckr.offset] & 0xff
		upckr.offset++

		return int(r), nil

	case 0xcd:
		val := uint16(Buffer.BytesToInt16(upckr.buffer, upckr.offset))
		upckr.offset += 2
		return int(val), nil

	case 0xce:
		val := uint32(Buffer.BytesToInt32(upckr.buffer, upckr.offset))
		upckr.offset += 4

		if Buffer.Arch64Bits {
			return int(val), nil
		}
		return int64(val), nil

	case 0xcf:
		val := Buffer.BytesToInt64(upckr.buffer, upckr.offset)
		upckr.offset += 8
		return uint64(val), nil

	case 0xd0:
		r := int8(upckr.buffer[upckr.offset])
		upckr.offset++
		return int(r), nil

	case 0xd1:
		val := Buffer.BytesToInt16(upckr.buffer, upckr.offset)
		upckr.offset += 2
		return int(val), nil

	case 0xd2:
		val := Buffer.BytesToInt32(upckr.buffer, upckr.offset)
		upckr.offset += 4
		return int(val), nil

	case 0xd3:
		val := Buffer.BytesToInt64(upckr.buffer, upckr.offset)
		upckr.offset += 8
		if Buffer.Arch64Bits {
			return int(val), nil
		}
		return int64(val), nil

	case 0xda:
		count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset))
		upckr.offset += 2
		return upckr.unpackBlob(count, isMapKey)

	case 0xdb:
		count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset))
		upckr.offset += 4
		return upckr.unpackBlob(count, isMapKey)

	case 0xdc:
		count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset))
		upckr.offset += 2
		return upckr.unpackList(count)

	case 0xdd:
		count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset))
		upckr.offset += 4
		return upckr.unpackList(count)

	case 0xde:
		count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset))
		upckr.offset += 2
		return upckr.unpackMap(count)

	case 0xdf:
		count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset))
		upckr.offset += 4
		return upckr.unpackMap(count)

	default:
		if (theType & 0xe0) == 0xa0 {
			return upckr.unpackBlob(int(theType&0x1f), isMapKey)
		}

		if (theType & 0xf0) == 0x80 {
			return upckr.unpackMap(int(theType & 0x0f))
		}

		if (theType & 0xf0) == 0x90 {
			count := int(theType & 0x0f)
			return upckr.unpackList(count)
		}

		if theType < 0x80 {
			return int(theType), nil
		}

		if theType >= 0xe0 {
			return int(int(theType) - 0xe0 - 32), nil
		}
	}

	return nil, NewAerospikeError(SERIALIZE_ERROR)
}
コード例 #17
0
func (cmd *queryAggregateCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) {
	// Read/parse remaining message bytes one record at a time.
	cmd.dataOffset = 0

	for cmd.dataOffset < receiveSize {
		if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}
		resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF)

		if resultCode != 0 {
			if resultCode == KEY_NOT_FOUND_ERROR {
				// consume the rest of the input buffer from the socket
				if cmd.dataOffset < receiveSize {
					if err := cmd.readBytes(receiveSize - cmd.dataOffset); err != nil {
						cmd.recordset.Errors <- newNodeError(cmd.node, err)
						return false, err
					}
				}
				return false, nil
			}
			err := NewAerospikeError(resultCode)
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		info3 := int(cmd.dataBuffer[3])

		// If cmd is the end marker of the response, do not proceed further
		if (info3 & _INFO3_LAST) == _INFO3_LAST {
			return false, nil
		}

		// generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
		// expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
		fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
		opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))

		if opCount != 1 {
			err := fmt.Errorf("Query aggregate command expects exactly only one bin. Received: %d", opCount)
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		_, err := cmd.parseKey(fieldCount)
		if err != nil {
			cmd.recordset.Errors <- newNodeError(cmd.node, err)
			return false, err
		}

		// if there is a recordset, process the record traditionally
		// otherwise, it is supposed to be a record channel

		// Parse bins.
		var bins BinMap

		for i := 0; i < opCount; i++ {
			if err := cmd.readBytes(8); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
			particleType := int(cmd.dataBuffer[5])
			nameSize := int(cmd.dataBuffer[7])

			if err := cmd.readBytes(nameSize); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}
			name := string(cmd.dataBuffer[:nameSize])

			particleBytesSize := int((opSize - (4 + nameSize)))
			if err = cmd.readBytes(particleBytesSize); err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}
			value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
			if err != nil {
				cmd.recordset.Errors <- newNodeError(cmd.node, err)
				return false, err
			}

			if bins == nil {
				bins = make(BinMap, opCount)
			}
			bins[name] = value
		}

		recs, exists := bins["SUCCESS"]
		if !exists {
			if errStr, exists := bins["FAILURE"]; exists {
				cmd.recordset.Errors <- NewAerospikeError(QUERY_GENERIC, errStr.(string))
				return false, err
			} else {
				cmd.recordset.Errors <- NewAerospikeError(QUERY_GENERIC, fmt.Sprintf("QueryAggregate's expected result was not returned. Received: %v", bins))
				return false, err
			}
		}

		// If the channel is full and it blocks, we don't want this command to
		// block forever, or panic in case the channel is closed in the meantime.
		select {
		// send back the result on the async channel
		case cmd.inputChan <- recs:
		case <-cmd.recordset.cancelled:
			return false, NewAerospikeError(QUERY_TERMINATED)
		}
	}

	return true, nil
}