Ejemplo n.º 1
0
func directoryGet(ctx *restContext, dirHash []byte, filename string) (
	entry *directoryEntry,
	found bool,
	status retcode.Status) {

	getEntry := &operations.BucketGetIn{
		BucketOperation: operations.BucketOperation{
			BucketId: ctx.caprica.GlobalDirectoryId(),
		},
		Key: [][]byte{dirHash, []byte(filename)},
	}
	var out interface{}
	out, status = ctx.execute(getEntry, operations.BucketGet)

	if !status.IsOk() {
		return
	}
	if status.Code == retcode.OkNotFound {
		return
	}
	found = true

	getEntryReturn := out.(operations.BucketGetOut)
	value := getEntryReturn.Value.([][]byte)

	var targetHash []byte
	err := encoding.Cbor().Decode(value[0], &targetHash)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Unable to decode hash")
		return
	}
	entry = new(directoryEntry)
	entry.targetHash = targetHash
	if len(value) == 1 {
		// Directory
		entry.isDirectory = true
	} else {
		entry.isDirectory = false
		// Mime
		var mime string
		err := encoding.Cbor().Decode(value[1], &mime)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer, "Unable to decode mime")
			return
		}
		entry.mimeType = mime
	}

	status = retcode.NewStatusOk()
	return
}
Ejemplo n.º 2
0
func (self *directoryBucketInstance) indexDirectory(operation *operations.Put,
	state *minos.OperationState,
	hash []byte,
	directory *directoryStruct) (ret bucket.BucketReturn) {

	var status retcode.Status

	mimeTypes := directory.mimeEntries
	for _, dirEntry := range directory.fileEntries {
		key := types.Key{hash, []byte(dirEntry.name)}

		cborTargetHash, err := encoding.Cbor().Encode(dirEntry.hashPointer)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer, "Error cbor encoding: %v",
				err)
			ret = &operations.GenericReturn{status}
			return
		}

		var value types.Array
		if dirEntry.mimePointer == -1 {
			// It's a directory
			value = types.Array{cborTargetHash}
		} else {
			mimeEntry := mimeTypes[dirEntry.mimePointer]
			cborMimeType, err := encoding.Cbor().Encode(mimeEntry.typeName)
			if err != nil {
				status = retcode.NewStatusFmt(retcode.ErrorServer, "Error cbor encoding: %v",
					err)
				ret = &operations.GenericReturn{status}
				return
			}
			value = types.Array{cborTargetHash, cborMimeType}
		}

		opPut := &operations.Put{
			Key:   key,
			Value: value,
		}
		putReturn := self.cstore.Op_put(opPut)
		if !putReturn.GetCode().IsOk() {
			// Error put-ing
			ret = &operations.GenericReturn{putReturn.GetStatus()}
			return
		}
	}
	return
}
Ejemplo n.º 3
0
func createScanEntry(key string,
	operation *operations.Scan,
	gettersetter metadata.GetterSetter) (
	entry operations.ScanEntry, status retcode.Status) {

	// Get the value
	var value []byte
	if operation.ReturnConfig.ReturnValues {
		var err error
		value, err = gettersetter.Get(key)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer, "Unable to get the metadata "+
				"key value while scanning: %v", err)
			return
		}
	}

	returnconfig.ProcessScanEntry(&operation.ReturnConfig,
		types.Key{[]byte(key)},
		types.Array{value},
		hashes.Hashes{},
		&entry)

	return
}
Ejemplo n.º 4
0
func (self *restContext) ReadBucketId(value *httpserver.Values, key string, optional bool) (
	bucketId bucket.Id,
	bucketIdAsString string,
	status retcode.Status) {

	if optional {
		bucketIdAsString = value.OptionalString(key)
		if bucketIdAsString == "" {
			return
		}
	} else {
		bucketIdAsString, status = value.NonEmptyString(key)
		if !status.IsOk() {
			return
		}
	}

	bucketIdBytes, err := encoding.Base32Decode(bucketIdAsString)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Invalid bucket id given. The id "+
			"has to be base32 without padding. Err: %v", err)
		return
	}
	bucketId = bucket.Id(bucketIdBytes)
	status = retcode.NewStatusOk()
	return
}
Ejemplo n.º 5
0
func directoryOutputAsJson(ctx *restContext, entry *directoryEntry) {
	//ctx.WriteETag(string(encoding.Base32Encode(entry.targetHash)))
	//ctx.WriteContentType("application/json")

	var status retcode.Status
	status = retcode.NewStatusFmt(retcode.Ok, "TODO: Return directory as json")
	ctx.WriteErrorStatus(status)
}
Ejemplo n.º 6
0
func assertNotConst(key string) (status retcode.Status) {
	if strings.HasPrefix(key, metadata.ConstPrefix) {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The given key cannot be modified "+
			"after the creation of the bucket. The key is prefixed with '%v'. Given key is '%v'.",
			metadata.ConstPrefix, key)
		return
	}
	return
}
Ejemplo n.º 7
0
func (self *Values) NonEmptyString(key string) (value string, status retcode.Status) {
	value = self.valueReader(key)
	if value == "" {
		status = retcode.NewStatusFmt(retcode.ErrorInputFormat, "%v: "+
			"Expecting %v to have a non-empty string - but string is empty.", key)
		return
	}
	status = retcode.NewStatusOk()
	return
}
Ejemplo n.º 8
0
func extractMetadataKey(key types.Key) (metadataKey string, status retcode.Status) {
	numberOfKeyElements := len(key)
	if numberOfKeyElements != 1 {
		status = retcode.NewStatusFmt(retcode.ErrorClient,
			"Operation for metadata take exactly _ONE_ key, you supplied %v key elements.",
			numberOfKeyElements)
		return
	}
	metadataKey = string(key[0])
	return
}
Ejemplo n.º 9
0
func (self *restServer) directoryGet(ctx *restContext) {
	var status retcode.Status

	hashAsString, status := ctx.UrlParams.NonEmptyString("hash")
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}
	hash, err := encoding.Base32Decode(hashAsString)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Hash does not seem to be a base32 hash")
		ctx.WriteErrorStatus(status)
		return
	}

	filenames := directorySubPath(ctx.UrlParams, ctx)

	curEntry := &directoryEntry{
		isDirectory: true,
		targetHash:  hash,
	}
	finalEntry, found, status := directoryGetMultiple(ctx, curEntry, filenames)
	if !found {
		status = retcode.NewStatusFmt(retcode.OkNotFound, "Entry not found")
		ctx.WriteErrorStatus(status)
		return
	}

	if finalEntry.isDirectory {
		// Output all files from directory as json
		directoryOutputAsJson(ctx, finalEntry)
	} else {
		// Redirect to blobget
		hash := finalEntry.targetHash
		hashAsString := encoding.Base32Encode(hash)
		mime := finalEntry.mimeType
		self.blobGetWithParams(ctx, hash, hashAsString, nil, mime)
	}
}
Ejemplo n.º 10
0
func (self *restContext) ReadShaHashAsBinary(value *httpserver.Values, key string) (
	hashBytes []byte,
	hashString string,
	status retcode.Status) {

	hashString, status = value.NonEmptyString(key)
	if !status.IsOk() {
		return
	}
	hashBytes, err := encoding.Base32Decode(hashString)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Invalid hash given. The hash "+
			"has to be base32 without padding. Err: %v", err)
		return
	}
	return
}
Ejemplo n.º 11
0
func (self *metadataStruct) Perform(state *minos.OperationState) (
	bucketReturn ares.Return) {

	operation := state.Operation
	instance, ret := self.createInstance(state)
	if !ret.IsOk() {
		return &bucket.ReturnGeneric{ret.GetStatus()}
	}

	switch operation := operation.(type) {
	case *operations.Put:
		return instance.opPut(operation, state)
	case *operations.Get:
		return instance.opGet(operation, state)
	case *operations.Scan:
		return instance.opScan(operation, state)
	default:
		// Does not support other operations
		status := retcode.NewStatusFmt(retcode.ErrorOperationNotSupported,
			"Metadata does not support other operations")
		return &bucket.ReturnGeneric{status}
	}
}
Ejemplo n.º 12
0
func (self *directoryBucketInstance) readDirectory(operation *operations.Put,
	state *minos.OperationState) (directory *directoryStruct, hash []byte, ret bucket.BucketReturn) {
	var status retcode.Status

	key := operation.Key
	if len(key) != 1 || string(key[0]) != putIndexKey {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Expecting exactly one key "+
			"element called %v",
			putIndexKey)
		ret = &operations.GenericReturn{status}
		return
	}

	value := operation.Value
	if len(value) != 1 {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Expecting exactly one value "+
			"element containig the cbor hash, have %v elements",
			len(value))
		ret = &operations.GenericReturn{status}
		return
	}

	err := encoding.Cbor().Decode(value[0], &hash)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Error cbor decoding the "+
			"supplied hash in value index 0 (should be cbor encoded 256 bit hash)",
			err)
		ret = &operations.GenericReturn{status}
		return
	}

	blobReader := &blobstore.Reader{
		Dispatcher: state.Dispatcher,
		Context:    state.Context,
		BucketId:   self.backendId,
		Hash:       hash,
	}

	encodedDirByteWriter := &bytes.Buffer{}
	entireLength, tooLong, readerStatus := blobReader.Read(encodedDirByteWriter,
		directoryAboutMaxBytesLenOfDirectory)
	// Error or not found
	if !readerStatus.IsOk() || readerStatus.Code == retcode.OkNotFound {
		ret = &operations.GenericReturn{readerStatus}
		return
	}
	if tooLong {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The supplied hash "+
			"is a binary entry that looks to large for a directory entry. It has %v bytes "+
			" - a directory cannot be larger than about %v bytes.",
			entireLength, directoryAboutMaxBytesLenOfDirectory)
		ret = &operations.GenericReturn{status}
		return
	}

	directoryNonPtr, err := decodeToDirectoryStruct(encodedDirByteWriter.Bytes())
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The supplied hash "+
			"is a binary that seems to be an invalid directory: %v",
			err)
		ret = &operations.GenericReturn{status}
		return
	}
	directory = &directoryNonPtr

	return
}
Ejemplo n.º 13
0
func (self *metadataInstance) opPut(operation *operations.Put,
	state *minos.OperationState) (bucketReturn ares.Return) {

	key, status := extractMetadataKey(operation.Key)
	if !status.IsOk() {
		return &bucket.ReturnGeneric{status}
	}

	status = assertNotConst(key)
	if !status.IsOk() {
		return &bucket.ReturnGeneric{status}
	}

	var valueToSet []byte
	var existingValue []byte
	var optimisticLocking bool

	numberOfValueElements := len(operation.Value)
	switch numberOfValueElements {
	case 0:
		optimisticLocking = false
		valueToSet = nil
	case 1:
		optimisticLocking = false
		valueToSet = operation.Value[0]
	case 2:
		optimisticLocking = true
		valueToSet = operation.Value[0]
		existingValue = operation.Value[1]
	default:
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Expecting 0 value elements (remove), one "+
			"value element (the new value to set) or two value elements (the value to set and the existing "+
			"Value for optimistic locking. You've supplied %v value elements.",
			numberOfValueElements)
		return &bucket.ReturnGeneric{status}
	}

	singleValueGetterSetter := metadata.SingleValueGetterSetter{
		Key:   key,
		Value: valueToSet,
	}

	systemKey := isSystemKey(key)
	var validationError error
	if systemKey {
		// System validate
		validationError = validateSystemChange(singleValueGetterSetter)
	} else {
		// Let the bucket type validate the change
		bucketType := state.BucketType(state.BucketId.TypeId())
		if bucketType == nil {
			validationError = errors.New(fmt.Sprintf("The bucket type %v is not "+
				"known to the system. Cannot perform metadata change.", state.BucketId.TypeId()))
		}
		validationError = bucketType.ValidateMetadata(singleValueGetterSetter)
	}

	// Ok to perform the change?
	if validationError != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient,
			"The requested change of key %v was rejected, reason: %v", key, validationError)
		return &bucket.ReturnGeneric{status}
	}

	// Lock while performing change
	metadataId, err := state.BucketId.ConvertToMetadataId()
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer,
			"Unable to convert bucket ID to metadata ID: %v", err)
		return &bucket.ReturnGeneric{status}
	}
	state.Locker.BucketIdWriteLock(metadataId)
	defer state.Locker.BucketIdWriteUnlock(metadataId)

	// Check current value if optimistic locking is enabled
	if optimisticLocking {
		currentValue, err := state.MetadataGetterSetter.Get(key)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Error getting current metadata value for optimistic locking: %v", err)
			return &bucket.ReturnGeneric{status}
		}
		if bytes.Compare(currentValue, existingValue) != 0 {
			status = retcode.NewStatusFmt(retcode.ErrorOptimisticLocking,
				"Optimistic locking failure: The supplied 'existing' value is no longer "+
					"the current value. Refusing to perform the change.")
			return &bucket.ReturnGeneric{status}
		}
	}

	// Everything is OK, now perform the change
	err = state.MetadataGetterSetter.Set(key, valueToSet)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer,
			"Could not write metadata: %v", err)
		return &bucket.ReturnGeneric{status}
	}

	return &operations.PutReturn{
		Status: retcode.NewStatusOk(),
	}
}
Ejemplo n.º 14
0
func (self *metadataInstance) opScan(operation *operations.Scan,
	state *minos.OperationState) (bucketReturn ares.Return) {

	// Note: No problem if you supply more key elements - will just be ignored
	var start []byte
	var startExclusive bool
	var end []byte = nil
	var endExclusive bool
	if len(operation.From.Key) > 0 {
		start = operation.From.Key.Serialize()
		startExclusive = operation.From.Exclusive
	}
	if operation.To != nil {
		end = operation.To.Key.Serialize()
		endExclusive = operation.To.Exclusive
	}

	allKeys, err := state.MetadataGetterSetter.ListKeys()
	if err != nil {
		status := retcode.NewStatusFmt(retcode.ErrorServer, "Unable to list metadata keys: %v",
			err)
		return &bucket.ReturnGeneric{status}
	}

	var scanEntries []operations.ScanEntry
	index := 0
	numberProcessed := 0
	limited := false
	stillToSkip := operation.Skip
	for _, singleKey := range allKeys {
		rangeOk := isWithingRange(singleKey,
			start, !startExclusive,
			end, !endExclusive)
		if rangeOk {
			// Skip?
			if stillToSkip > 0 {
				if uint32(numberProcessed) >= operation.Limit {
					// Limit reached
					limited = true
					break
				}

				// Ok, take that result
				scanEntry, status := createScanEntry(singleKey,
					operation, state.MetadataGetterSetter)
				if !status.IsOk() {
					return &bucket.ReturnGeneric{status}
				}
				scanEntries = append(scanEntries, scanEntry)
				numberProcessed++
			} else {
				stillToSkip--
			}
		}

		index++
	}

	return &operations.ScanReturn{
		Status:  retcode.NewStatusOk(),
		Entries: scanEntries,
		HasMore: limited,
	}
}
Ejemplo n.º 15
0
func (self *restServer) blobPost(ctx *restContext) {
	var status retcode.Status

	var isGlobalBlobStore bool
	bucketId, bucketIdAsString, status := ctx.ReadBucketId(&ctx.UrlParams, "bucketId", true)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}
	if len(bucketId) == 0 {
		// Take the global blob store if no bucket id is supplied
		bucketId = self.caprica.GlobalBlobStoreId()
		isGlobalBlobStore = true
	}

	var out interface{}

	// Begin
	beginUpload := &operations.BucketPutIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key:   "incubation/from_rest_api/new",
		Value: [][]byte{},
	}
	out, status = ctx.execute(beginUpload, operations.BucketPut)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}

	// Now put the entire payload
	for true {
		readBuffer := make([]byte, maxReadPerElement)
		numReadFromBody, err := ctx.Request.Body.Read(readBuffer)
		defer ctx.Request.Body.Close()

		if err != nil && err != io.EOF {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "Could not read body from client: "+
				"%v.", err)
			ctx.WriteErrorStatus(status)
			return
		}

		if numReadFromBody > 0 {
			uploadOneElement := &operations.BucketPutIn{
				BucketOperation: operations.BucketOperation{
					BucketId: bucketId,
				},
				Key:   "incubation/from_rest_api/append",
				Value: [][]byte{readBuffer[:numReadFromBody]},
			}
			out, status = ctx.execute(uploadOneElement, operations.BucketPut)
			if !status.IsOk() {
				ctx.WriteErrorStatus(status)
				return
			}
		}
		if err == io.EOF {
			// End, have read everything
			break
		}
	}

	// Now read the hash code
	readHashCode := &operations.BucketGetIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key: "incubation/from_rest_api/sum",
	}
	out, status = ctx.execute(readHashCode, operations.BucketGet)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}
	readHashCodeRet := out.(operations.BucketGetOut)
	hashCodeValues := readHashCodeRet.Value.([][]byte)
	if len(hashCodeValues) < 2 {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Could not get calculated hash value."+
			" Blob bucket returned less than 2 value elements.")
		ctx.WriteErrorStatus(status)
		return
	}
	var hashAsBinary []byte
	err := encoding.Cbor().Decode(hashCodeValues[0], &hashAsBinary)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Cbor decoding of hash code "+
			"failed: %v.", err)
		ctx.WriteErrorStatus(status)
		return
	}
	hashAsBase32 := encoding.Base32Encode(hashAsBinary)
	var location string
	if isGlobalBlobStore {
		location = fmt.Sprintf("/data/%v", hashAsBase32)
	} else {
		location = fmt.Sprintf("/data/%v/%v", bucketIdAsString, hashAsBase32)
	}

	// And now commit that thing
	commit := &operations.BucketPutIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key:   "incubation/from_rest_api/finish",
		Value: [][]byte{},
	}
	out, status = ctx.execute(commit, operations.BucketPut)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}

	// Ok, everything ok
	ctx.WriteLocation(location)
	ctx.Writer.WriteHeader(201)
}
Ejemplo n.º 16
0
func (self *Reader) Read(writer io.Writer, maxAllowedLength uint64) (entireLength uint64,
	errorTooLong bool,
	status retcode.Status) {

	var skip int = 0
	var limit int = readerBlockSize
	var entireLengthProcessed bool

	for {
		var err error
		skipEncoded := encoding.UIntToUVarInt(uint64(skip))
		limitEncoded := encoding.UIntToUVarInt(uint64(limit))

		//hash/[HASH]/content/VUI(skip_optional)/VUI(limit_optional)
		key := types.Key{[]byte("hash"), self.Hash, []byte("content"), skipEncoded, limitEncoded}
		opGet := &operations.Get{
			BucketId: self.BucketId,
			Key:      key,
		}
		performRet := self.Dispatcher.Perform(self.Context, opGet)
		if !performRet.GetCode().IsOk() || performRet.GetCode() == retcode.OkNotFound {
			// Error or not found
			status = performRet.GetStatus()
			return
		}
		getReturn, ok := performRet.(*operations.GetReturn)
		if !ok {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Got invalid get return type %T", getReturn)
			return

		}
		// values = [data, CBOR(entire_length)]
		if len(getReturn.Value) != 2 {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Got invalid get from bucket / expecting 2 elements in value. Have %v",
				len(getReturn.Value))
			return
		}
		data := getReturn.Value[0]

		// Set entire length
		if !entireLengthProcessed {
			entireLengthEncoded := getReturn.Value[1]
			err = encoding.Cbor().Decode(entireLengthEncoded, &entireLength)
			entireLengthProcessed = true
			if err != nil {
				status = retcode.NewStatusFmt(retcode.ErrorServer,
					"Error decoding entire length %v", err)
				return
			}
			// Check entire length
			if entireLength > maxAllowedLength {
				errorTooLong = true
				return
			}
		}

		_, err = writer.Write(data)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Unable to write to writer: %v", err)
			return
		}

		skip += readerBlockSize

		// Next one? End if we got less than requested or would exceed entire length
		if uint64(len(data)) < readerBlockSize || uint64(skip) >= entireLength {
			// No, end here
			return
		}
	}
	return
}
Ejemplo n.º 17
0
func (self *directoryBucketInstance) opScan(operation *operations.Scan,
	state *minos.OperationState) (ret bucket.BucketReturn) {

	var status retcode.Status

	// Check from key
	fromKey := operation.From.Key
	if len(fromKey) < 1 {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The from key has to contain "+
			"at least the %v byte hash as first element. Number of elements: %v",
			keyLengthForScanning, len(fromKey))
		return &operations.GenericReturn{status}
	}
	fromKeyHash := fromKey[0]
	if len(fromKeyHash) != keyLengthForScanning {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "First element in the from "+
			"key has to contain the directory hash. A binary of exactly %v bytes. Given: %v bytes",
			keyLengthForScanning, len(fromKeyHash))
		return &operations.GenericReturn{status}
	}

	// Check to key
	// To bound there?
	toBound := operation.To
	if toBound == nil {
		// Not allowed
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The to-key has to be set "+
			"when scanning directories. This is a security-limitation.")
		return &operations.GenericReturn{status}
	}

	if len(toBound.Key) < 1 {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The first element of the to-key "+
			"has to be set. It's missing")
		return &operations.GenericReturn{status}
	}
	toKeyHash := toBound.Key[0]

	differentFirstElementToFromKey := bytes.Compare(toKeyHash, fromKeyHash) != 0
	if differentFirstElementToFromKey {
		// Additional limitations
		if len(toKeyHash) != len(fromKeyHash)+1 {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "If the from key and the to key "+
				"first element are not identical, the to key first element must be 1 byte longer.")
			return
		}
		if bytes.Compare(toKeyHash[:keyLengthForScanning], fromKeyHash) != 0 ||
			toKeyHash[keyLengthForScanning] != 0 {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "The first %v bytes of the from key "+
				"and the to key have to be identical and the last byte of the to key has to be 0.",
				keyLengthForScanning)
			return
		}
		if len(toBound.Key) != 1 {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "If the first element of the to "+
				"key is not identical to the first element of the from key, the to key is "+
				"allowed to have exactly one element")
			return
		}
		if !toBound.Exclusive {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "If the first element of the to "+
				"key is not identical to the first element of the from key, the to-key "+
				"has to be set to exclusive")
			return
		}
	}

	// Everything looks OK, allow scanning
	return self.cstore.Perform(state)
}