func testPutQuery(t *testing.T, table *Table, expected string) {
	var key *Key
	if table.Key.HasRange() {
		key = &Key{HashKey: "NewHashKeyVal", RangeKey: "12"}
	} else {
		key = &Key{HashKey: "NewHashKeyVal"}
	}

	data := map[string]interface{}{
		"Attr1": "Attr1Val",
		"Attr2": 12}
	item, err := dynamizer.ToDynamo(data)
	if err != nil {
		t.Fatal(err)
	}

	q := NewDynamoQuery(table)
	if err := q.AddItem(key, item); err != nil {
		t.Fatal(err)
	}

	actual, err := q.Marshal()
	if err != nil {
		t.Fatal(err)
	}
	compareJSONStrings(t, expected, actual)
}
Example #2
0
func (t *Table) BatchPutDocument(keys []*Key, v interface{}) ([]error, error) {
	numKeys := len(keys)

	rv := reflect.ValueOf(v)
	if rv.Kind() != reflect.Slice {
		return nil, fmt.Errorf("v must be a slice with the same length as keys")
	} else if rv.Len() != numKeys {
		return nil, fmt.Errorf("v must be a slice with the same length as keys")
	}

	// Create a map to track which keys have been processed, since DynamoDB
	// doesn't return items in any particular order.
	//
	// N.B. This map is of type Key - not *Key - so that equality is based on
	// the hash and range key values, not the pointer address.
	processed := make(map[Key]bool)
	errs := make([]error, numKeys)

	numRetries := 0
	target := target("BatchWriteItem")
	for {
		q := NewDynamoBatchPutQuery(t)

		// Add requested keys to the query, skipping over those for which we
		// already have responses.
		for i, key := range keys {
			if _, ok := processed[*key]; ok {
				continue
			}

			item, err := dynamizer.ToDynamo(rv.Index(i).Interface())
			if err != nil {
				return nil, err
			}
			if err := q.AddItem(key, item); err != nil {
				return nil, err
			}
		}

		jsonResponse, err := t.Server.queryServer(target, q)
		if err != nil {
			return nil, err
		}

		var response DynamoBatchPutResponse
		err = json.Unmarshal(jsonResponse, &response)
		if err != nil {
			return nil, err
		}

		// Handle unprocessed items. We return a special error code so that the
		// caller can decide how to handle the partial result. This allows callers
		// to move on from successful writes immediately.
		unprocessed := make(map[Key]bool)
		numUnprocessed := 0
		if r, ok := response.UnprocessedItems[t.Name]; ok {
			for _, item := range r {
				key, err := t.getKeyFromItem(item.PutRequest.Item)
				if err != nil {
					return nil, err
				}
				unprocessed[key] = true
				numUnprocessed++
			}
		}

		// Package the responses maintaining the original ordering as specified
		// by the caller. Set ErrNotProcessed for all unprocessed in keys in
		// case we don't retry.
		for i, key := range keys {
			if _, ok := processed[*key]; ok {
				continue
			}

			if _, ok := unprocessed[*key]; ok {
				errs[i] = ErrNotProcessed
			} else {
				errs[i] = nil
				processed[*key] = true
			}
		}

		// If we are done, or we're not going to retry, return now.
		if numUnprocessed == 0 || !t.Server.RetryPolicy.ShouldRetry(target, nil, errProvisionedThroughputExceeded, numRetries) {
			return errs, nil
		}

		// Sleep according to the retry strategy and then attempt again with the
		// remaining keys.
		time.Sleep(t.Server.RetryPolicy.Delay(target, nil, errProvisionedThroughputExceeded, numRetries))
		numRetries++
	}
}