// this tests "RetryBatchGet", which does NOT do intelligent splitting and re-assembling
// of requests and responses
func Test1() {
	b := batch_get_item.NewBatchGetItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = batch_get_item.NewRequestInstance()
	for i := 1; i <= 200; i++ {
		item := item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn].Keys =
			append(b.RequestItems[tn].Keys, item)

	}
	_, jerr := json.Marshal(b)
	if jerr != nil {
		fmt.Printf("%v\n", jerr)
	} else {
		//fmt.Printf("%s\n",string(json))
	}
	bs, _ := batch_get_item.Split(b)
	for _, bsi := range bs {
		body, code, err := bsi.RetryBatchGet(0)
		if err != nil || code != http.StatusOK {
			fmt.Printf("error: %v\n%v\n%v\n", string(body), code, err)
		} else {
			fmt.Printf("worked!: %v\n%v\n%v\n", string(body), code, err)
		}
	}
}
예제 #2
0
// unprocessedKeys2BatchGetItems will take a response from DynamoDB that indicates some Keys
// require resubmitting, and turns these into a BatchGetItem struct instance.
func unprocessedKeys2BatchGetItems(req *BatchGetItem, resp *Response) (*BatchGetItem, error) {
	if req == nil || resp == nil {
		return nil, errors.New("batch_get_item.unprocessedKeys2BatchGetItems: one of req or resp is nil")
	}
	b := NewBatchGetItem()
	b.ReturnConsumedCapacity = req.ReturnConsumedCapacity
	for tn := range resp.UnprocessedKeys {
		if _, tn_in_b := b.RequestItems[tn]; !tn_in_b {
			b.RequestItems[tn] = NewRequestInstance()
			b.RequestItems[tn].AttributesToGet = make(
				attributestoget.AttributesToGet,
				len(resp.UnprocessedKeys[tn].AttributesToGet))
			copy(b.RequestItems[tn].AttributesToGet,
				resp.UnprocessedKeys[tn].AttributesToGet)
			b.RequestItems[tn].ConsistentRead =
				resp.UnprocessedKeys[tn].ConsistentRead
			for _, item_src := range resp.UnprocessedKeys[tn].Keys {
				item_cp := item.NewItem()
				for k, v := range item_src {
					v_cp := attributevalue.NewAttributeValue()
					cp_err := v.Copy(v_cp)
					if cp_err != nil {
						return nil, cp_err
					}
					item_cp[k] = v_cp
				}
				b.RequestItems[tn].Keys = append(b.RequestItems[tn].Keys, item_cp)
			}
		}
	}
	return b, nil
}
// this tests "RetryBatchWrite", which does NOT do intelligent splitting and re-assembling
// of requests and responses
func Test1() {
	tn := "test-godynamo-livetest"
	b := batch_write_item.NewBatchWriteItem()
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 1; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	bs, _ := batch_write_item.Split(b)
	for _, bsi := range bs {
		body, code, err := bsi.RetryBatchWrite(0)
		if err != nil || code != http.StatusOK {
			fmt.Printf("error: %v\n%v\n%v\n", string(body), code, err)
		} else {
			fmt.Printf("worked!: %v\n%v\n%v\n", string(body), code, err)
		}
	}
}
// this tests "DoBatchWrite", which breaks up requests that are larger than the limit
// and re-assembles responses
func Test2() {
	home := os.Getenv("HOME")
	home_conf_file := home + string(os.PathSeparator) + "." + conf.CONF_NAME
	home_conf, home_conf_err := conf_file.ReadConfFile(home_conf_file)
	if home_conf_err != nil {
		panic("cannot read conf from " + home_conf_file)
	}
	home_conf.ConfLock.RLock()
	if home_conf.Initialized == false {
		panic("conf struct has not been initialized")
	}

	b := batch_write_item.NewBatchWriteItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 201; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	body, code, err := b.DoBatchWriteWithConf(home_conf)
	fmt.Printf("%v\n%v\n%v\n", string(body), code, err)
}
예제 #5
0
// NewPut will return a pointer to an initialized PutItem struct.
func NewPutItem() *PutItem {
	p := new(PutItem)
	p.Expected = expected.NewExpected()
	p.ExpressionAttributeNames = expressionattributenames.NewExpressionAttributeNames()
	p.ExpressionAttributeValues = attributevalue.NewAttributeValueMap()
	p.Item = item.NewItem()
	return p
}
// this tests "DoBatchWrite", which breaks up requests that are larger than the limit
// and re-assembles responses
func Test2() {
	b := batch_write_item.NewBatchWriteItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 201; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	body, code, err := b.DoBatchWrite()
	fmt.Printf("%v\n%v\n%v\n", string(body), code, err)
}
// this tests "DoBatchGet", which breaks up requests that are larger than the limit
// and re-assembles responses
func Test2() {
	b := batch_get_item.NewBatchGetItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = batch_get_item.NewRequestInstance()
	for i := 1; i <= 300; i++ {
		item := item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn].Keys =
			append(b.RequestItems[tn].Keys, item)

	}
	_, jerr := json.Marshal(*b)
	if jerr != nil {
		fmt.Printf("%v\n", jerr)
	} else {
		//fmt.Printf("%s\n",string(json))
	}
	body, code, err := b.DoBatchGet()
	fmt.Printf("%v\n%v\n%v\n", string(body), code, err)
}
예제 #8
0
// Add actual response data from "this" Response to "all", the eventual stitched Response.
func combineResponses(all, this *Response) error {
	if all == nil || this == nil {
		return errors.New("batch_get_item.combineResponses: all or this is nil")
	}
	for tn := range this.Responses {
		if _, tn_in_all := all.Responses[tn]; !tn_in_all {
			all.Responses[tn] = make([]item.Item, 0)
		}
		for _, item_src := range this.Responses[tn] {
			item_cp := item.NewItem()
			for k, v := range item_src {
				v_cp := attributevalue.NewAttributeValue()
				cp_err := v.Copy(v_cp)
				if cp_err != nil {
					return cp_err
				}
				item_cp[k] = v_cp
			}
			all.Responses[tn] = append(all.Responses[tn], item_cp)
		}
	}
	return nil
}
예제 #9
0
func NewResponse() *Response {
	r := new(Response)
	r.Item = item.NewItem()
	r.ConsumedCapacity = capacity.NewConsumedCapacity()
	return r
}