Ejemplo n.º 1
0
func directoryGet(ctx *restContext, dirHash []byte, filename string) (
	entry *directoryEntry,
	found bool,
	status retcode.Status) {

	getEntry := &operations.BucketGetIn{
		BucketOperation: operations.BucketOperation{
			BucketId: ctx.caprica.GlobalDirectoryId(),
		},
		Key: [][]byte{dirHash, []byte(filename)},
	}
	var out interface{}
	out, status = ctx.execute(getEntry, operations.BucketGet)

	if !status.IsOk() {
		return
	}
	if status.Code == retcode.OkNotFound {
		return
	}
	found = true

	getEntryReturn := out.(operations.BucketGetOut)
	value := getEntryReturn.Value.([][]byte)

	var targetHash []byte
	err := encoding.Cbor().Decode(value[0], &targetHash)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Unable to decode hash")
		return
	}
	entry = new(directoryEntry)
	entry.targetHash = targetHash
	if len(value) == 1 {
		// Directory
		entry.isDirectory = true
	} else {
		entry.isDirectory = false
		// Mime
		var mime string
		err := encoding.Cbor().Decode(value[1], &mime)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer, "Unable to decode mime")
			return
		}
		entry.mimeType = mime
	}

	status = retcode.NewStatusOk()
	return
}
Ejemplo n.º 2
0
func (self *directoryBucketInstance) indexDirectory(operation *operations.Put,
	state *minos.OperationState,
	hash []byte,
	directory *directoryStruct) (ret bucket.BucketReturn) {

	var status retcode.Status

	mimeTypes := directory.mimeEntries
	for _, dirEntry := range directory.fileEntries {
		key := types.Key{hash, []byte(dirEntry.name)}

		cborTargetHash, err := encoding.Cbor().Encode(dirEntry.hashPointer)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer, "Error cbor encoding: %v",
				err)
			ret = &operations.GenericReturn{status}
			return
		}

		var value types.Array
		if dirEntry.mimePointer == -1 {
			// It's a directory
			value = types.Array{cborTargetHash}
		} else {
			mimeEntry := mimeTypes[dirEntry.mimePointer]
			cborMimeType, err := encoding.Cbor().Encode(mimeEntry.typeName)
			if err != nil {
				status = retcode.NewStatusFmt(retcode.ErrorServer, "Error cbor encoding: %v",
					err)
				ret = &operations.GenericReturn{status}
				return
			}
			value = types.Array{cborTargetHash, cborMimeType}
		}

		opPut := &operations.Put{
			Key:   key,
			Value: value,
		}
		putReturn := self.cstore.Op_put(opPut)
		if !putReturn.GetCode().IsOk() {
			// Error put-ing
			ret = &operations.GenericReturn{putReturn.GetStatus()}
			return
		}
	}
	return
}
Ejemplo n.º 3
0
func (self *incubator) writeInformation(state *minos.OperationState,
	id string, incState *incubationState) (ret retcode.Status) {

	key := self.createInformationKey(state, id)

	encodedLength, err := encoding.Cbor().Encode(incState.length)
	if err != nil {
		ret = retcode.NewStatusError(retcode.ErrorServer, err)
		return
	}
	value := types.Array{incState.hashState, encodedLength}

	apiOperation := &operations.Put{
		BucketId: self.bucketId,
		Key:      key,
		Value:    value,
	}
	opRet := state.Dispatcher.Perform(state.Context, apiOperation)
	if !opRet.GetCode().IsOk() {
		// Unable to put
		ret = opRet.GetStatus()
		return
	}

	return opRet.GetStatus()
}
Ejemplo n.º 4
0
func CborArray(payload string, forwarding ForwardingFunction) (data []byte, err error) {
	if !strings.HasPrefix(payload, cborArrayPrefix) {
		err = errors.New("A cbor array has to start with " + cborArrayPrefix)
		return
	}
	if !strings.HasSuffix(payload, cborArraySuffix) {
		err = errors.New("A cbor array has to end with " + cborArraySuffix)
		return
	}

	payload = strings.TrimPrefix(payload, cborArrayPrefix)
	payload = strings.TrimSuffix(payload, cborArraySuffix)
	splitElements := strings.SplitN(payload, cborArraySeparator, cborArrayMaxElements+1)
	if len(splitElements) > cborArrayMaxElements {
		err = errors.New("The cbor array can process at max 32 elements. Input has more")
		return
	}

	array := make([][]byte, len(splitElements))
	for index, element := range splitElements {
		array[index], err = forwarding(element)
		if err != nil {
			return
		}
	}

	// Now cbor encode that
	encodedCbor, err := encoding.Cbor().Encode(array)
	if err != nil {
		return
	}
	data = encodedCbor
	return
}
Ejemplo n.º 5
0
func (self *TypedGetterSetter) PutReceivers() (receivers []bucket.Id, err error) {
	binaryValue, err := self.Get(putReceiversKey)
	if err != nil {
		return
	}
	if binaryValue == nil || len(binaryValue) == 0 {
		return []bucket.Id{}, nil
	}

	var bucketIdsAsBinaryInterface interface{}
	err = encoding.Cbor().Decode(binaryValue, &bucketIdsAsBinaryInterface)
	if err != nil {
		return nil, errors.New(fmt.Sprintf("Error cbor decoding: %v\n", err))
	}

	av := bucketIdsAsBinaryInterface.([]interface{})

	bucketIds := make([]bucket.Id, len(av))
	for index, singleBucket := range av {
		typeOf := reflect.TypeOf(singleBucket)
		if !reflect.TypeOf([]byte{}).AssignableTo(typeOf) {
			return nil, errors.New("Expecting slices of bytes in put receivers")
		}
		bucketIds[index] = singleBucket.([]byte)
	}
	return bucketIds, nil
}
Ejemplo n.º 6
0
func (self *public) writeInformation(state *minos.OperationState,
	hash []byte, pubState *publicState) (ret retcode.Status) {

	key := self.clreatePublicKey(state, hash)

	encodedLength, err := encoding.Cbor().Encode(pubState.length)
	if err != nil {
		ret = retcode.NewStatusError(retcode.ErrorServer, err)
		return
	}
	if pubState.data == nil {
		pubState.data = []byte{}
	}
	value := types.Array{pubState.data, encodedLength}
	apiOperation := &operations.Put{
		BucketId: self.bucketId,
		Key:      key,
		Value:    value,
	}
	opRet := state.Dispatcher.Perform(state.Context, apiOperation)
	if !opRet.GetCode().IsOk() {
		// Unable to put
		ret = opRet.GetStatus()
		return
	}

	return opRet.GetStatus()
}
Ejemplo n.º 7
0
func (self *incubator) information(state *minos.OperationState,
	id string) (incState incubationState, ret retcode.Status) {

	key := self.createInformationKey(state, id)

	apiOperation := &operations.Get{
		BucketId: self.bucketId,
		Key:      key,
	}
	opRet := state.Dispatcher.Perform(state.Context, apiOperation)
	if opRet.GetCode() != retcode.Ok {
		// Not found or a different error
		ret = opRet.GetStatus()
		return
	}

	// Ok, found it, deserialize
	retCast := opRet.(*operations.GetReturn)
	value := retCast.Value

	incState = incubationState{}
	incState.hashState = value[0]
	err := encoding.Cbor().Decode(value[1], &incState.length)
	if err != nil {
		ret = retcode.NewStatusError(retcode.ErrorServer, err)
		return
	}

	return
}
Ejemplo n.º 8
0
func (self *public) information(state *minos.OperationState,
	hash []byte) (pubState publicState, ret retcode.Status) {

	key := self.clreatePublicKey(state, hash)

	apiOperation := &operations.Get{
		BucketId: self.bucketId,
		Key:      key,
	}
	opRet := state.Dispatcher.Perform(state.Context, apiOperation)
	if opRet.GetCode() != retcode.Ok {
		// Not found or a different error
		ret = opRet.GetStatus()
		return
	}

	// Ok, found it, deserialize
	retCast := opRet.(*operations.GetReturn)
	value := retCast.Value

	pubState = publicState{}
	pubState.data = value[0]
	err := encoding.Cbor().Decode(value[1], &pubState.length)
	if err != nil {
		ret = retcode.NewStatusError(retcode.ErrorServer, err)
		return
	}

	return
}
Ejemplo n.º 9
0
func (self *EncodableDirectory) PrepareAndEncode() (
	data []byte, err error) {

	if !self.prepared {
		self.prepare()
	}

	// Entries
	var dirEntries []interface{}
	for _, srcEntry := range self.Entries {
		var mimeValue int16
		if srcEntry.TargetIsDirectory {
			mimeValue = -1
		} else {
			mimeValue = self.mimeTypeToArrayEntry[srcEntry.MimeType]
		}
		targetEntry := []interface{}{mimeValue, srcEntry.TargetHash, srcEntry.Name}
		dirEntries = append(dirEntries, targetEntry)
	}

	topLevelArray := []interface{}{encodableDirectoryMagicNumber,
		dirEntries, self.mimeTypes}

	data, err = encoding.Cbor().Encode(topLevelArray)
	if err != nil {
		return
	}

	return
}
Ejemplo n.º 10
0
func CborSignedInt(payload string) (data []byte, err error) {
	u, err := strconv.ParseInt(payload, 10, 64)
	if err != nil {
		return
	}

	return encoding.Cbor().Encode(u)
}
Ejemplo n.º 11
0
func (self *capricaStruct) initializeGlobalDirectoryStore() (err error) {
	sysstorage := self.buran.SystemStorage()
	valuePtr, err := sysstorage.Get(globaldirectory_key)
	if err != nil {
		return
	}
	if valuePtr == nil {
		// First get the global blob
		globalBlobId := self.globalBlobStoreId
		if globalBlobId == nil || len(globalBlobId) == 0 {
			err = errors.New("The global directory depends on the global blob store. So need " +
				"to create that first.")
			return
		}

		var globalBlobIdCbor []byte
		globalBlobIdCbor, err = encoding.Cbor().Encode(globalBlobId)
		if err != nil {
			return
		}

		// Need to create the global blob store
		operation := operations.CreateBucket{
			TypeId: bucket.TypeId_Directory,
			Metadata: metadata.MemoryMetadata{
				"const.forwarder.blob": globalBlobIdCbor,
			},
		}
		// TODO: Real context
		context := ares.Context{
			UserId: user.Id([]byte("Test")),
		}
		ret := self.buran.Perform(&context, &operation)
		if !ret.GetCode().IsOk() {
			// Error
			err = errors.New(fmt.Sprintf("Error creating "+
				"the global directory: %v", ret.GetText()))
			return
		}
		retCast := ret.(*operations.CreateBucketReturn)
		self.globalDirectoryId = retCast.Id

		// Store it - so we have the same on next start
		err = self.buran.SystemStorage().Put(globaldirectory_key, retCast.Id)
		if err != nil {
			return
		}
	} else {
		// Already have a global blob store
		self.globalDirectoryId = bucket.Id(*valuePtr)
	}
	return
}
Ejemplo n.º 12
0
func (self *TypedGetterSetter) SetCreator(creator user.Id) (err error) {
	userIdAsBytes := []byte(creator)
	userIdEncoded, err := encoding.Cbor().Encode(userIdAsBytes)
	if err != nil {
		return
	}

	err = self.Set(creatorKey, userIdEncoded)
	if err != nil {
		return
	}
	return
}
Ejemplo n.º 13
0
func (self *TypedGetterSetter) SetTypeId(typeId bucket.TypeId) (err error) {
	var typeIdAsUint8 uint8
	typeIdAsUint8 = uint8(typeId)
	encodedTypeId, err := encoding.Cbor().Encode(typeIdAsUint8)
	if err != nil {
		return
	}
	err = self.Set(typeIdKey, encodedTypeId)
	if err != nil {
		return
	}
	return
}
Ejemplo n.º 14
0
func Download(t btesting.T, bucketId typing.BucketId, hash []byte,
	writer io.Writer) (entireLength uint64) {

	var skip int = 0
	var limit int = readerBlockSize
	var entireLengthProcessed bool

	for {
		var err error
		skipEncoded := encoding.UIntToUVarInt(uint64(skip))
		limitEncoded := encoding.UIntToUVarInt(uint64(limit))
		//hash/[HASH]/content/VUI(skip_optional)/VUI(limit_optional)
		key := typing.Key{[]byte("hash"), hash, []byte("content"), skipEncoded, limitEncoded}
		value := operations.Get(t, bucketId, key, true)
		// value = [data, CBOR(entire_length)]
		if len(value) != 2 {
			t.Errorf("Got invalid get from bucket / expecting 2 elements in value. Have %v",
				len(value))
			return
		}
		data := value[0]

		// Set entire length
		if !entireLengthProcessed {
			entireLengthEncoded := value[1]
			err = encoding.Cbor().Decode(entireLengthEncoded, &entireLength)
			entireLengthProcessed = true
			if err != nil {
				t.Errorf("Error decoding entire length %v", err)
				return
			}
		}

		_, err = writer.Write(data)
		if err != nil {
			t.Errorf("Unable to write to writer: %v", err)
			return
		}

		skip += readerBlockSize

		// Next one? End if we got less than requested or would exceed entire length
		if uint64(len(data)) < readerBlockSize || uint64(skip) >= entireLength {
			// No, end here
			return
		}
	}
	return
}
Ejemplo n.º 15
0
func (self *TypedGetterSetter) SetForwarder(constant bool,
	key string, bucketId bucket.Id) (err error) {

	var bucketIdAsBinary []byte
	bucketIdAsBinary = []byte(bucketId)
	bucketIdEncoded, err := encoding.Cbor().Encode(bucketIdAsBinary)
	if err != nil {
		return
	}

	err = self.Set(generateKeyForForwarder(constant, key), bucketIdEncoded)
	if err != nil {
		return
	}
	return
}
Ejemplo n.º 16
0
func decodeToDirectoryStruct(data []byte) (
	directory directoryStruct, err error) {

	var encoded interface{}
	err = encoding.Cbor().Decode(data, &encoded)
	if err != nil {
		return
	}

	encodedArray, ok := encoded.([]interface{})
	if !ok {
		err = errors.New("Top level is not a array")
		return
	}

	topLevelLength := len(encodedArray)
	if topLevelLength != 3 {
		err = errors.New("Expecting 2 entries in the top level array")
		return
	}

	magicNumber, err := encoding.NumberToInt64(encodedArray[0])
	if err != nil {
		err = errors.New(fmt.Sprintf("Expecting magic number top level array at index 0. %v",
			err))
		return
	}

	if magicNumber != directoryStructMagicNumber {
		err = errors.New(fmt.Sprintf("Magic number should be %v but is %v",
			directoryStructMagicNumber, magicNumber))
	}

	fileEntries, highestMimePtr, err := decodeToFileEntries(encodedArray[1])
	if err != nil {
		return
	}
	directory.fileEntries = fileEntries

	mimeEntries, err := decodeMimeEntries(encodedArray[2], highestMimePtr)
	if err != nil {
		return
	}
	directory.mimeEntries = mimeEntries

	return
}
Ejemplo n.º 17
0
func SetPutReceiver(t btesting.T, bucketId typing.BucketId, receivers ...typing.BucketId) {

	// Now configure bucket "one" to forward the puts to bucket "two"
	putReceivers := make([][]byte, len(receivers))
	for index, receiver := range receivers {
		putReceivers[index] = []byte(receiver)
	}
	newPutReceiversEncoded, err := encoding.Cbor().Encode(putReceivers)
	if err != nil {
		t.Errorf("%v", err)
		return
	}
	Put(t, bucketId.ToMetadataBucketId(),
		typing.KeyFromStringPanic("system.put_receivers"),
		typing.ValueFromInterfacePanic([][]byte{newPutReceiversEncoded}))

}
Ejemplo n.º 18
0
func (self *TypedGetterSetter) TypeId() (typeId bucket.TypeId, err error) {
	binaryValue, err := self.Get(typeIdKey)
	if err != nil {
		return
	}
	if binaryValue == nil || len(binaryValue) == 0 {
		err = errors.New("No type ID stored in metadata")
		return
	}
	var typeIdAsUint8 uint8
	err = encoding.Cbor().Decode(binaryValue, &typeIdAsUint8)
	if err != nil {
		err = errors.New(fmt.Sprintf("Error cbor decoding: %v\n", err))
		return
	}

	return bucket.TypeId(typeIdAsUint8), nil
}
Ejemplo n.º 19
0
func (self *TypedGetterSetter) SetPutReceivers(receivers []bucket.Id) (err error) {
	var arrayOfBytes [][]byte
	arrayOfBytes = make([][]byte, len(receivers))
	for index, receiver := range receivers {
		arrayOfBytes[index] = []byte(receiver)
	}

	encodedArrayOfBytes, err := encoding.Cbor().Encode(arrayOfBytes)
	if err != nil {
		return
	}

	err = self.Set(putReceiversKey, encodedArrayOfBytes)
	if err != nil {
		return
	}
	return
}
Ejemplo n.º 20
0
func (self *TypedGetterSetter) Creator() (creator user.Id, err error) {
	binaryValue, err := self.Get(creatorKey)
	if err != nil {
		return
	}
	if binaryValue == nil || len(binaryValue) == 0 {
		err = errors.New("No user ID stored in metadata")
		return
	}

	var userIdAsBytes []byte
	err = encoding.Cbor().Decode(binaryValue, &userIdAsBytes)
	if err != nil {
		err = errors.New(fmt.Sprintf("Error cbor decoding: %v\n", err))
		return
	}

	return user.Id(userIdAsBytes), nil
}
Ejemplo n.º 21
0
func (self *TypedGetterSetter) Forwarder(constant bool,
	key string) (bucketId bucket.Id, exists bool, err error) {

	binaryValue, err := self.Get(generateKeyForForwarder(constant, key))
	if err != nil {
		return
	}
	if binaryValue == nil || len(binaryValue) == 0 {
		return
	}
	exists = true

	var bucketIdAsBinary []byte
	err = encoding.Cbor().Decode(binaryValue, &bucketIdAsBinary)
	if err != nil {
		err = errors.New(fmt.Sprintf("Error cbor decoding: %v\n", err))
		return
	}

	bucketId = bucket.Id(bucketIdAsBinary)
	return
}
Ejemplo n.º 22
0
func (self *counterInstance) op_get(operation *operations.Get) (
	ret bucket.BucketReturn) {

	get_return := self.cstore.Op_get(operation)
	if get_return.GetCode() == retcode.OkNotFound {
		// This is a special case, if non existent, return 0
		zeroValue := 0
		valueBytes, err := encoding.Cbor().Encode(zeroValue)
		if err != nil {
			return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorServer,
				errors.New(fmt.Sprintf("Could not encode cbor", err)))}
		}

		value := types.Array{valueBytes}
		get_return := operations.GetReturn{
			Status: retcode.NewStatusOk(),
			Value:  value,
		}
		return &get_return
	} else {
		// Everything else is returned 1:1
		return get_return
	}
}
Ejemplo n.º 23
0
func (self *Reader) Read(writer io.Writer, maxAllowedLength uint64) (entireLength uint64,
	errorTooLong bool,
	status retcode.Status) {

	var skip int = 0
	var limit int = readerBlockSize
	var entireLengthProcessed bool

	for {
		var err error
		skipEncoded := encoding.UIntToUVarInt(uint64(skip))
		limitEncoded := encoding.UIntToUVarInt(uint64(limit))

		//hash/[HASH]/content/VUI(skip_optional)/VUI(limit_optional)
		key := types.Key{[]byte("hash"), self.Hash, []byte("content"), skipEncoded, limitEncoded}
		opGet := &operations.Get{
			BucketId: self.BucketId,
			Key:      key,
		}
		performRet := self.Dispatcher.Perform(self.Context, opGet)
		if !performRet.GetCode().IsOk() || performRet.GetCode() == retcode.OkNotFound {
			// Error or not found
			status = performRet.GetStatus()
			return
		}
		getReturn, ok := performRet.(*operations.GetReturn)
		if !ok {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Got invalid get return type %T", getReturn)
			return

		}
		// values = [data, CBOR(entire_length)]
		if len(getReturn.Value) != 2 {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Got invalid get from bucket / expecting 2 elements in value. Have %v",
				len(getReturn.Value))
			return
		}
		data := getReturn.Value[0]

		// Set entire length
		if !entireLengthProcessed {
			entireLengthEncoded := getReturn.Value[1]
			err = encoding.Cbor().Decode(entireLengthEncoded, &entireLength)
			entireLengthProcessed = true
			if err != nil {
				status = retcode.NewStatusFmt(retcode.ErrorServer,
					"Error decoding entire length %v", err)
				return
			}
			// Check entire length
			if entireLength > maxAllowedLength {
				errorTooLong = true
				return
			}
		}

		_, err = writer.Write(data)
		if err != nil {
			status = retcode.NewStatusFmt(retcode.ErrorServer,
				"Unable to write to writer: %v", err)
			return
		}

		skip += readerBlockSize

		// Next one? End if we got less than requested or would exceed entire length
		if uint64(len(data)) < readerBlockSize || uint64(skip) >= entireLength {
			// No, end here
			return
		}
	}
	return
}
Ejemplo n.º 24
0
func Upload(t btesting.T, bucketId typing.BucketId,
	reader io.Reader) (hash []byte) {

	demoId := []byte("demoId")

	for {
		buf := make([]byte, chunkSize)
		numRead, err := reader.Read(buf)
		if err != nil && err != io.EOF {
			t.Errorf("Error reading: %v", err)
			return
		}
		if numRead > 0 {
			buf = buf[:numRead]
			//##PUT: keys = incubation/[ID]/append, values = [data]
			operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("append")},
				typing.Value{buf})
			if t.Failed() {
				return
			}
		}
		if numRead != chunkSize {
			// End
			break
		}
	}

	// Get the hash
	// ##GET: keys = incubation/[ID]/sum, values = [CBOR(hash), CBOR(length)]
	value := operations.Get(t, bucketId, typing.Key{[]byte("incubation"),
		demoId, []byte("sum")}, true)
	if t.Failed() {
		return
	}
	if len(value) != 2 {
		t.Errorf("Expecting 2 value entries (cbor(hash) and cbor(length))")
		return
	}
	err := encoding.Cbor().Decode(value[0], &hash)
	if err != nil {
		t.Errorf("Error getting the hash: %v", err)
		return
	}
	var length uint64
	err = encoding.Cbor().Decode(value[1], &length)
	if err != nil {
		t.Errorf("Error getting the length: %v", err)
		return
	}

	// Ok, commit
	//##PUT: keys = incubation/[ID]/finish, values = []

	operations.Put(t, bucketId, typing.Key{[]byte("incubation"), demoId, []byte("finish")},
		typing.Value{})
	if t.Failed() {
		return
	}

	return
}
Ejemplo n.º 25
0
func (self *directoryBucketInstance) readDirectory(operation *operations.Put,
	state *minos.OperationState) (directory *directoryStruct, hash []byte, ret bucket.BucketReturn) {
	var status retcode.Status

	key := operation.Key
	if len(key) != 1 || string(key[0]) != putIndexKey {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Expecting exactly one key "+
			"element called %v",
			putIndexKey)
		ret = &operations.GenericReturn{status}
		return
	}

	value := operation.Value
	if len(value) != 1 {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Expecting exactly one value "+
			"element containig the cbor hash, have %v elements",
			len(value))
		ret = &operations.GenericReturn{status}
		return
	}

	err := encoding.Cbor().Decode(value[0], &hash)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "Error cbor decoding the "+
			"supplied hash in value index 0 (should be cbor encoded 256 bit hash)",
			err)
		ret = &operations.GenericReturn{status}
		return
	}

	blobReader := &blobstore.Reader{
		Dispatcher: state.Dispatcher,
		Context:    state.Context,
		BucketId:   self.backendId,
		Hash:       hash,
	}

	encodedDirByteWriter := &bytes.Buffer{}
	entireLength, tooLong, readerStatus := blobReader.Read(encodedDirByteWriter,
		directoryAboutMaxBytesLenOfDirectory)
	// Error or not found
	if !readerStatus.IsOk() || readerStatus.Code == retcode.OkNotFound {
		ret = &operations.GenericReturn{readerStatus}
		return
	}
	if tooLong {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The supplied hash "+
			"is a binary entry that looks to large for a directory entry. It has %v bytes "+
			" - a directory cannot be larger than about %v bytes.",
			entireLength, directoryAboutMaxBytesLenOfDirectory)
		ret = &operations.GenericReturn{status}
		return
	}

	directoryNonPtr, err := decodeToDirectoryStruct(encodedDirByteWriter.Bytes())
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorClient, "The supplied hash "+
			"is a binary that seems to be an invalid directory: %v",
			err)
		ret = &operations.GenericReturn{status}
		return
	}
	directory = &directoryNonPtr

	return
}
Ejemplo n.º 26
0
func (self *restServer) blobPost(ctx *restContext) {
	var status retcode.Status

	var isGlobalBlobStore bool
	bucketId, bucketIdAsString, status := ctx.ReadBucketId(&ctx.UrlParams, "bucketId", true)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}
	if len(bucketId) == 0 {
		// Take the global blob store if no bucket id is supplied
		bucketId = self.caprica.GlobalBlobStoreId()
		isGlobalBlobStore = true
	}

	var out interface{}

	// Begin
	beginUpload := &operations.BucketPutIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key:   "incubation/from_rest_api/new",
		Value: [][]byte{},
	}
	out, status = ctx.execute(beginUpload, operations.BucketPut)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}

	// Now put the entire payload
	for true {
		readBuffer := make([]byte, maxReadPerElement)
		numReadFromBody, err := ctx.Request.Body.Read(readBuffer)
		defer ctx.Request.Body.Close()

		if err != nil && err != io.EOF {
			status = retcode.NewStatusFmt(retcode.ErrorClient, "Could not read body from client: "+
				"%v.", err)
			ctx.WriteErrorStatus(status)
			return
		}

		if numReadFromBody > 0 {
			uploadOneElement := &operations.BucketPutIn{
				BucketOperation: operations.BucketOperation{
					BucketId: bucketId,
				},
				Key:   "incubation/from_rest_api/append",
				Value: [][]byte{readBuffer[:numReadFromBody]},
			}
			out, status = ctx.execute(uploadOneElement, operations.BucketPut)
			if !status.IsOk() {
				ctx.WriteErrorStatus(status)
				return
			}
		}
		if err == io.EOF {
			// End, have read everything
			break
		}
	}

	// Now read the hash code
	readHashCode := &operations.BucketGetIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key: "incubation/from_rest_api/sum",
	}
	out, status = ctx.execute(readHashCode, operations.BucketGet)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}
	readHashCodeRet := out.(operations.BucketGetOut)
	hashCodeValues := readHashCodeRet.Value.([][]byte)
	if len(hashCodeValues) < 2 {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Could not get calculated hash value."+
			" Blob bucket returned less than 2 value elements.")
		ctx.WriteErrorStatus(status)
		return
	}
	var hashAsBinary []byte
	err := encoding.Cbor().Decode(hashCodeValues[0], &hashAsBinary)
	if err != nil {
		status = retcode.NewStatusFmt(retcode.ErrorServer, "Cbor decoding of hash code "+
			"failed: %v.", err)
		ctx.WriteErrorStatus(status)
		return
	}
	hashAsBase32 := encoding.Base32Encode(hashAsBinary)
	var location string
	if isGlobalBlobStore {
		location = fmt.Sprintf("/data/%v", hashAsBase32)
	} else {
		location = fmt.Sprintf("/data/%v/%v", bucketIdAsString, hashAsBase32)
	}

	// And now commit that thing
	commit := &operations.BucketPutIn{
		BucketOperation: operations.BucketOperation{
			BucketId: bucketId,
		},
		Key:   "incubation/from_rest_api/finish",
		Value: [][]byte{},
	}
	out, status = ctx.execute(commit, operations.BucketPut)
	if !status.IsOk() {
		ctx.WriteErrorStatus(status)
		return
	}

	// Ok, everything ok
	ctx.WriteLocation(location)
	ctx.Writer.WriteHeader(201)
}
Ejemplo n.º 27
0
func uploadDirectoryAndGet(t btesting.T) {
	// First create the blob bucket (that's where the directories are stored, and the data too)
	var blobBucketId typing.BucketId
	operations.CreateBucket(t, typing.TypeId_BlobStore, &blobBucketId)
	if t.Failed() {
		return
	}

	// Now create the directory bucket and connect that to the blob bucket
	var bucketId typing.BucketId
	var blobBucketIdCBor []byte
	blobBucketIdCBor, err := encoding.Cbor().Encode([]byte(blobBucketId))
	if err != nil {
		t.Errorf("error cbor encoding: %v", err)
		return
	}
	operations.CreateBucketWithMetadata(t, typing.TypeId_Directory, map[string][]byte{
		"const.forwarder.blob": blobBucketIdCBor,
	}, &bucketId)
	if t.Failed() {
		return
	}

	// Both buckets now exist and are connected
	dir, err := createDemoDirectory()
	if err != nil {
		t.Errorf("Error creating directory: %v\n", err)
	}

	// Now upload the directory to the blob storage
	dirReader := bytes.NewReader(dir)
	hash := blob.Upload(t, blobBucketId, dirReader)
	if t.Failed() {
		return
	}

	// The directory is now in the blob storage - the directory bucket can now index it
	cborHash, err := encoding.Cbor().Encode(hash)
	operations.Put(t, bucketId, typing.Key{[]byte("index")}, typing.Value{cborHash})
	if t.Failed() {
		return
	}

	// Get some files
	operations.Get(t, bucketId, typing.Key{hash, []byte("file_1.txt")}, true)
	if t.Failed() {
		return
	}
	operations.Get(t, bucketId, typing.Key{hash, []byte("another_file.jpeg")}, true)
	if t.Failed() {
		return
	}

	// Now some files that do not exist
	operations.Get(t, bucketId, typing.Key{hash, []byte("<UNKNOWN_FILE>.exe")}, false)
	if t.Failed() {
		return
	}
	operations.Get(t, bucketId, typing.Key{hash, []byte("no_is_not_in_directory.com")}, false)
	if t.Failed() {
		return
	}
}
Ejemplo n.º 28
0
func TestSimpleBlobStoreWrite(t *testing.T) {
	bucketId := createBucket(t, TypeId_BlobStore)
	arguments := make(map[string]interface{})

	// Put something
	// 'VGhpcyBpcyBhIHRlc3Q' = "This is a test"
	putToBlob := &testframework.JsonTest{
		Input: fmt.Sprintf(`{
			"Operation" : "Put",
			"Data" : {
				"BucketId" : "%v",
				"Key" : "incubation/test1/append",
				"Value" : "VGhpcyBpcyBhIHRlc3Q"
			}}`, bucketId),
		Expect: `{
			"Code" : "!(retcode_ok)!"
		}`,
	}
	runner().Run(t, putToBlob)

	// Now get the sha256 hash
	getSha256Hash := &testframework.JsonTest{
		Input: fmt.Sprintf(`{
			"Operation" : "Get",
			"Data" : {
				"BucketId" : "%v",
				"Key" : "incubation/test1/sum"
			}}`, bucketId),
		Expect: `{
			"Code" : "!(retcode_ok_found)!",
			"Data" : {
				"Value" : ["!(string_not_empty:cbor_hash)!",
					 "!(string_not_empty:cbor_length)!"]
			}
		}`,
		Arguments: arguments,
	}
	runner().Run(t, getSha256Hash)
	if t.Failed() {
		return
	}

	// See if the data we got is correct
	hashCborBase64 := arguments["cbor_hash"].(string)
	lengthCBorBase64 := arguments["cbor_length"].(string)
	hashCBor, err := encoding.Base64Decode(hashCborBase64)

	lengthCBor, err := encoding.Base64Decode(lengthCBorBase64)
	var hash []byte
	err = encoding.Cbor().Decode(hashCBor, &hash)
	var length uint64
	err = encoding.Cbor().Decode(lengthCBor, &length)
	if err != nil {
		t.Fatal(err)
	}

	// Length of "This is a test"
	if length != 14 {
		t.Fatalf("Expecting the length to be 14 but it's %v\n", length)
	}
	// Just check the hash length. TODO: Also check content
	if len(hash) != 32 {
		t.Fatalf("Since it's SHA256 (256 bits) the hash has to be 32 bytes,"+
			" but is %v\n", len(hash))
	}

	// Now finish it (will make it accessible to the public)
	putFinish := &testframework.JsonTest{
		Input: fmt.Sprintf(`{
			"Operation" : "Put",
			"Data" : {
				"BucketId" : "%v",
				"Key" : "incubation/test1/finish"
			}}`, bucketId),
		Expect: `{
			"Code" : "!(retcode_ok)!"
		}`,
	}
	runner().Run(t, putFinish)

	// It now should be possible to access the data using the hash

	// First get the length
	hashAsBase64 := encoding.Base64Encode(hash)
	getLength := &testframework.JsonTest{
		Input: fmt.Sprintf(`{
			"Operation" : "Get",
			"Data" : {
				"BucketId" : "%v",
				"Key" : "hash/:base64:%v/length"
			}}`, bucketId, hashAsBase64),
		Expect: `{
			"Code" : "!(retcode_ok_found)!",
			"Data" : {
				"Value" : ["!(string_not_empty:cbor_length)!"]
			}
		}`,
		Arguments: arguments,
	}
	runner().Run(t, getLength)

	// Again, check the length
	lengthCBorBase64 = arguments["cbor_length"].(string)
	lengthCBor, err = encoding.Base64Decode(lengthCBorBase64)
	err = encoding.Cbor().Decode(lengthCBor, &length)
	if err != nil {
		t.Fatal(err)
	}
	if length != 14 {
		t.Fatalf("The length should still be 14 but it's %v\n", length)
	}

	// Now read from it
	// Skip is set to 0
	// Limit is set to 14 (should read everything we have)
	getData := &testframework.JsonTest{
		Input: fmt.Sprintf(`{
			"Operation" : "Get",
			"Data" : {
				"BucketId" : "%v",
				"Key" : "hash/:base64:%v/content/:uvarint:0/:uvarint:14"
			}}`, bucketId, hashAsBase64),
		Expect: `{
			"Code" : "!(retcode_ok_found)!",
			"Data" : {
				"Value" : [
					"!(string_not_empty:data)!",
					"!(string_not_empty:cbor_length)!"
				]
			}
		}`,
		Arguments: arguments,
	}
	runner().Run(t, getData)

	// Ok, now check if we got the correct data
	dataAsBase64 := arguments["data"].(string)
	dataAsBinary, err := encoding.Base64Decode(dataAsBase64)
	if err != nil {
		t.Fatal(err)
	}
	// The data we originally added
	if string(dataAsBinary) != "This is a test" {
		t.Fatalf("Got wrong data '%v'. Expecting 'This is a test'",
			string(dataAsBinary))
	}
}
Ejemplo n.º 29
0
func (self *blobstoreStruct) op_get(operation *operations.Get,
	state *minos.OperationState) (ret bucket.BucketReturn) {

	commonOp := new(commonOperation)
	retStatus := commonOp.fill(operation.Key, nil)
	if !retStatus.IsOk() {
		return &bucket.ReturnGeneric{retStatus}
	}

	var value types.Array
	var status retcode.Status
	switch commonOp.operation {
	case operation_incubation_sum:
		var incInfo incubationState
		incInfo, status = self.incubator.information(state, commonOp.incubationId)
		if status.Code == retcode.OkNotFound {
			// Not found
			break
		}
		if !status.IsOk() {
			break
		}
		hash := incInfo.restoreHash().Sum(nil)
		hashEncoded, err := encoding.Cbor().Encode(hash)
		if err != nil {
			panic("Error encoding hash")
		}
		length := incInfo.length
		lengthEncoded, err := encoding.Cbor().Encode(length)
		if err != nil {
			panic("Error encoding length")
		}

		value = types.Array{hashEncoded, lengthEncoded}
	case operation_public_content:
		var skipData []byte
		var limitData []byte

		if len(commonOp.restKey) > 0 {
			skipData = commonOp.restKey[0]
		}
		if len(commonOp.restKey) > 1 {
			limitData = commonOp.restKey[1]
		}

		var err error
		var skip uint64
		if skipData != nil {
			skip, err = binary.ReadUvarint(bytes.NewReader(skipData))
			if err != nil {
				panic("Error decoding 'skip' (should be a variable uint)")
			}
		}

		var limit int64
		if limitData != nil {
			var limitUint64 uint64
			limitUint64, err = binary.ReadUvarint(bytes.NewReader(limitData))
			if err != nil {
				panic("Error decoding 'limit' (should be a variable uint)")
			}
			limit = int64(limitUint64)
		} else {
			limit = -1 // No limit
		}

		var data []byte
		var entireLength uint64
		data, entireLength, status = self.public.read(state, commonOp.hash,
			skip, limit)
		if !status.IsOk() {
			break
		}
		if status.Code == retcode.OkNotFound {
			// Not found
			break
		}

		var lengthEncoded []byte
		lengthEncoded, err = encoding.Cbor().Encode(entireLength)
		if err != nil {
			panic("Error encoding length")
		}

		// Ok, found it, return the value
		value = types.Array{data, lengthEncoded}
	case operation_public_length:
		var pubInfo publicState
		if commonOp.hash == nil {
			err := errors.New("Hash is missing for getting length")
			status = retcode.NewStatusError(retcode.ErrorClient, err)
		}
		pubInfo, status = self.public.information(state, commonOp.hash)
		if status.Code == retcode.OkNotFound {
			// Not found
			break
		}
		if !status.IsOk() {
			break
		}

		length := pubInfo.length
		lengthEncoded, err := encoding.Cbor().Encode(length)
		if err != nil {
			panic("Error encoding length")
		}

		value = types.Array{lengthEncoded}
	default:
		err := errors.New(fmt.Sprintf("Unknown operation: operation-code: "+
			"%v (this usually means you called GET instead of PUT)\n",
			commonOp.operation))
		status = retcode.NewStatusError(retcode.ErrorClient, err)
	}

	// Done, return
	if status.IsOk() {
		if status.Code == retcode.OkNotFound {
			if status.Text == "" {
				status.Text = fmt.Sprintf("Given hash is: %v", commonOp.hash)
			}
			return &bucket.ReturnGeneric{status}
		} else {
			return &operations.GetReturn{
				Status: status,
				Value:  value,
			}
		}
	} else {
		return &bucket.ReturnGeneric{status}
	}
}
Ejemplo n.º 30
0
func (self *counterInstance) op_put(operation *operations.Put) (
	ret bucket.BucketReturn) {

	state := self.cstore.State

	// Extract the +/- value
	if operation.Value.Len() == 0 {
		return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorClient,
			errors.New("Expect at least one value value, have none."))}
	}
	plusMinusBytes := operation.Value.Get(0)
	var plusMinus int64
	err := encoding.Cbor().Decode(plusMinusBytes, &plusMinus)
	if err != nil {
		return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorClient,
			errors.New(
				fmt.Sprintf("The value in the first playload "+
					"cannot be cbor decoded to int: %v", err)))}
	}

	// First lock that thing
	state.Locker.BucketIdWriteLock(state.BucketId)
	defer state.Locker.BucketIdWriteUnlock(state.BucketId)

	// Get current value
	return_get := self.cstore.Op_get(&operations.Get{
		Key: operation.Key,
	})
	if !return_get.GetCode().IsOk() {
		return &bucket.ReturnGeneric{return_get.GetStatus()}
	}

	// Extract current value
	var currentValue int64
	if return_get.GetCode() == retcode.Ok {
		return_get_cast := return_get.(*operations.GetReturn)
		value := return_get_cast.Value
		if len(value) == 0 {
			// Error
			return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorServer,
				errors.New("Stored (current) value not found (no value)"))}
		}

		err := encoding.Cbor().Decode(value[0], &currentValue)
		if err != nil {
			return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorServer,
				errors.New(
					fmt.Sprintf("Error decoding current value: %v", err)))}
		}

	} else if return_get.GetCode() == retcode.OkNotFound {
		// Default is always 0
		currentValue = 0
	} else {
		// Unknown return
		return &bucket.ReturnGeneric{retcode.NewStatusError(retcode.ErrorServer,
			errors.New(
				fmt.Sprintf("Unknown return code by backing store: %v",
					return_get.GetCode())))}
	}

	// Ok, now we have current value, increase / decrease now
	//TODO: What to do on overflow? Return a client error?
	newValue := currentValue + plusMinus

	var newValueValue types.Array
	if newValue != 0 {
		newValueBytes, err := encoding.Cbor().Encode(newValue)
		if err != nil {
			return &bucket.ReturnGeneric{retcode.NewStatusError(
				retcode.ErrorServer,
				errors.New(
					fmt.Sprintf("Could not cbor encode: %v",
						err)))}
		}
		newValueValue = types.Array{newValueBytes}
	} else {
		// If value is 0, then then entry is removed
		newValueValue = nil
	}

	// Put the new value
	put_return := self.cstore.Op_put(&operations.Put{
		Key:   operation.Key,
		Value: newValueValue,
	})

	return put_return
}