// this tests "RetryBatchWrite", which does NOT do intelligent splitting and re-assembling
// of requests and responses
func Test1() {
	tn := "test-godynamo-livetest"
	b := batch_write_item.NewBatchWriteItem()
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 1; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	bs, _ := batch_write_item.Split(b)
	for _, bsi := range bs {
		body, code, err := bsi.RetryBatchWrite(0)
		if err != nil || code != http.StatusOK {
			fmt.Printf("error: %v\n%v\n%v\n", string(body), code, err)
		} else {
			fmt.Printf("worked!: %v\n%v\n%v\n", string(body), code, err)
		}
	}
}
// this tests "DoBatchWrite", which breaks up requests that are larger than the limit
// and re-assembles responses
func Test2() {
	home := os.Getenv("HOME")
	home_conf_file := home + string(os.PathSeparator) + "." + conf.CONF_NAME
	home_conf, home_conf_err := conf_file.ReadConfFile(home_conf_file)
	if home_conf_err != nil {
		panic("cannot read conf from " + home_conf_file)
	}
	home_conf.ConfLock.RLock()
	if home_conf.Initialized == false {
		panic("conf struct has not been initialized")
	}

	b := batch_write_item.NewBatchWriteItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 201; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	body, code, err := b.DoBatchWriteWithConf(home_conf)
	fmt.Printf("%v\n%v\n%v\n", string(body), code, err)
}
// this tests "DoBatchWrite", which breaks up requests that are larger than the limit
// and re-assembles responses
func Test2() {
	b := batch_write_item.NewBatchWriteItem()
	tn := "test-godynamo-livetest"
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 201; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = item.NewItem()
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = &attributevalue.AttributeValue{S: k}
		p.Item["TheRangeKey"] = &attributevalue.AttributeValue{N: v}
		p.Item["SomeValue"] = &attributevalue.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	body, code, err := b.DoBatchWrite()
	fmt.Printf("%v\n%v\n%v\n", string(body), code, err)
}
Exemplo n.º 4
0
// this tests "RetryBatchWrite", which does NOT do intelligent splitting and re-assembling
// of requests and responses
func Test1() {
	conf_file.Read()
	if conf.Vals.Initialized == false {
		panic("the conf.Vals global conf struct has not been initialized")
	}

	iam_ready_chan := make(chan bool)
	go conf_iam.GoIAM(iam_ready_chan)
	iam_ready := <-iam_ready_chan
	if iam_ready {
		fmt.Printf("using iam\n")
	} else {
		fmt.Printf("not using iam\n")
	}

	tn := "test-godynamo-livetest"
	b := batch_write_item.NewBatchWriteItem()
	b.RequestItems[tn] = make([]batch_write_item.RequestInstance, 0)
	for i := 1; i <= 300; i++ {
		var p batch_write_item.PutRequest
		p.Item = make(ep.Item)
		k := fmt.Sprintf("AHashKey%d", i)
		v := fmt.Sprintf("%d", i)
		p.Item["TheHashKey"] = ep.AttributeValue{S: k}
		p.Item["TheRangeKey"] = ep.AttributeValue{N: v}
		p.Item["SomeValue"] = ep.AttributeValue{N: v}
		b.RequestItems[tn] =
			append(b.RequestItems[tn],
				batch_write_item.RequestInstance{PutRequest: &p})
	}
	bs, _ := batch_write_item.Split(*b)
	for _, bsi := range bs {
		body, code, err := bsi.RetryBatchWrite(0)
		if err != nil || code != http.StatusOK {
			fmt.Printf("error: %v\n%v\n%v\n", body, code, err)
		} else {
			fmt.Printf("worked!: %v\n%v\n%v\n", body, code, err)
		}
	}
}
Exemplo n.º 5
0
// BatchWriteItemHandler accepts arbitrarily-sized BatchWriteItem requests and relays them to Dynamo.
func BatchWriteItemHandler(w http.ResponseWriter, req *http.Request) {
	if bbpd_runinfo.BBPDAbortIfClosed(w) {
		return
	}
	start := time.Now()
	if req.Method != "POST" {
		e := "batch_write_item_route.BatchWriteItemHandler:method only supports POST"
		log.Printf(e)
		http.Error(w, e, http.StatusBadRequest)
		return
	}
	pathElts := strings.Split(req.URL.Path, "/")
	if len(pathElts) != 2 {
		e := "batch_write_item_route.BatchWriteItemHandler:cannot parse path. try /batch-get-item"
		log.Printf(e)
		http.Error(w, e, http.StatusBadRequest)
		return
	}

	bodybytes, read_err := ioutil.ReadAll(req.Body)
	if read_err != nil && read_err != io.EOF {
		e := fmt.Sprintf("batch_write_item_route.BatchWriteItemHandler err reading req body: %s", read_err.Error())
		log.Printf(e)
		http.Error(w, e, http.StatusInternalServerError)
		return
	}
	req.Body.Close()

	if len(bodybytes) > bwi.QUERY_LIM_BYTES {
		e := fmt.Sprintf("batch_write_item_route.BatchWriteItemHandler - payload over 1024kb, may be rejected by aws! splitting into segmented requests will likely mean each segment is accepted")
		log.Printf(e)
	}

	b := bwi.NewBatchWriteItem()

	um_err := json.Unmarshal(bodybytes, b)
	if um_err != nil {
		e := fmt.Sprintf("batch_write_item_route.BatchWriteItemHandler unmarshal err on %s to BatchWriteItem %s", string(bodybytes), um_err.Error())
		log.Printf(e)
		http.Error(w, e, http.StatusInternalServerError)
		return
	}

	resp_body, code, resp_err := b.DoBatchWrite()

	if resp_err != nil {
		e := fmt.Sprintf("batch_write_item_route.BatchWriteItemHandler:err %s",
			resp_err.Error())
		log.Printf(e)
		http.Error(w, e, http.StatusInternalServerError)
		return
	}

	if ep.HttpErr(code) {
		route_response.WriteError(w, code, "batch_write_item_route.BatchWriteItemHandler", resp_body)
		return
	}

	mr_err := route_response.MakeRouteResponse(
		w,
		req,
		resp_body,
		code,
		start,
		bwi.ENDPOINT_NAME)
	if mr_err != nil {
		e := fmt.Sprintf("batch_write_item_route.BatchWriteItemHandler %s", mr_err.Error())
		log.Printf(e)
	}
}