// compareBiogoNode compares a biogo node and a range tree node to determine if both
// contain the same values in the same order.  It recursively calls itself on
// both children if they exist.
func compareBiogoNode(db *client.DB, biogoNode *llrb.Node, key *proto.Key) error {
	// Retrieve the node form the range tree.
	rtNode := &proto.RangeTreeNode{}
	if err := db.GetProto(keys.RangeTreeNodeKey(*key), rtNode); err != nil {
		return err
	}

	bNode := &proto.RangeTreeNode{
		Key:       proto.Key(biogoNode.Elem.(Key)),
		ParentKey: proto.KeyMin,
		Black:     bool(biogoNode.Color),
	}
	if biogoNode.Left != nil {
		leftKey := proto.Key(biogoNode.Left.Elem.(Key))
		bNode.LeftKey = &leftKey
	}
	if biogoNode.Right != nil {
		rightKey := proto.Key(biogoNode.Right.Elem.(Key))
		bNode.RightKey = &rightKey
	}
	if err := nodesEqual(*key, *bNode, *rtNode); err != nil {
		return err
	}
	if rtNode.LeftKey != nil {
		if err := compareBiogoNode(db, biogoNode.Left, rtNode.LeftKey); err != nil {
			return err
		}
	}
	if rtNode.RightKey != nil {
		if err := compareBiogoNode(db, biogoNode.Right, rtNode.RightKey); err != nil {
			return err
		}
	}
	return nil
}
Пример #2
0
// getConfig retrieves the configuration for the specified key. If the
// key is empty, all configurations are returned. Otherwise, the
// leading "/" path delimiter is stripped and the configuration
// matching the remainder is retrieved. Note that this will retrieve
// the default config if "key" is equal to "/", and will list all
// configs if "key" is equal to "". The body result contains a listing
// of keys and retrieval of a config. The output format is determined
// by the request header.
func getConfig(db *client.DB, configPrefix proto.Key, config gogoproto.Message,
	path string, r *http.Request) (body []byte, contentType string, err error) {
	// Scan all configs if the key is empty.
	if len(path) == 0 {
		var rows []client.KeyValue
		if rows, err = db.Scan(configPrefix, configPrefix.PrefixEnd(), maxGetResults); err != nil {
			return
		}
		if len(rows) == maxGetResults {
			log.Warningf("retrieved maximum number of results (%d); some may be missing", maxGetResults)
		}
		var prefixes []string
		for _, row := range rows {
			trimmed := bytes.TrimPrefix(row.Key, configPrefix)
			prefixes = append(prefixes, url.QueryEscape(string(trimmed)))
		}
		// Encode the response.
		body, contentType, err = util.MarshalResponse(r, prefixes, util.AllEncodings)
	} else {
		configkey := keys.MakeKey(configPrefix, proto.Key(path[1:]))
		if err = db.GetProto(configkey, config); err != nil {
			return
		}
		body, contentType, err = util.MarshalResponse(r, config, util.AllEncodings)
	}

	return
}
// compareBiogoTree walks both a biogo tree and the range tree to determine if both
// contain the same values in the same order.
func compareBiogoTree(db *client.DB, biogoTree *llrb.Tree) error {
	rt := &proto.RangeTree{}
	if err := db.GetProto(keys.RangeTreeRoot, rt); err != nil {
		return err
	}
	return compareBiogoNode(db, biogoTree.Root, &rt.RootKey)
}
Пример #4
0
func countRangeReplicas(db *client.DB) (int, error) {
	desc := &roachpb.RangeDescriptor{}
	if err := db.GetProto(keys.RangeDescriptorKey(roachpb.KeyMin), desc); err != nil {
		return 0, err
	}
	return len(desc.Replicas), nil
}
Пример #5
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(db *client.DB) (roachpb.NodeID, error) {
	r, err := db.Inc(keys.NodeIDGenerator, 1)
	if err != nil {
		return 0, util.Errorf("unable to allocate node ID: %s", err)
	}
	return roachpb.NodeID(r.ValueInt()), nil
}
Пример #6
0
func (p *planner) releaseLeases(db client.DB) {
	if p.leases != nil {
		for _, lease := range p.leases {
			if err := p.leaseMgr.Release(lease); err != nil {
				log.Warning(err)
			}
		}
		p.leases = nil
	}

	// TODO(pmattis): This is a hack. Remove when schema change operations work
	// properly.
	if p.modifiedSchemas != nil {
		for _, d := range p.modifiedSchemas {
			var lease *LeaseState
			err := db.Txn(func(txn *client.Txn) error {
				var err error
				lease, err = p.leaseMgr.Acquire(txn, d.id, d.version)
				return err
			})
			if err != nil {
				log.Warning(err)
				continue
			}
			if err := p.leaseMgr.Release(lease); err != nil {
				log.Warning(err)
			}
		}
		p.modifiedSchemas = nil
	}
}
Пример #7
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(nodeID roachpb.NodeID, inc int64, db *client.DB) (roachpb.StoreID, error) {
	r, err := db.Inc(keys.StoreIDGenerator, inc)
	if err != nil {
		return 0, util.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
	}
	return roachpb.StoreID(r.ValueInt() - inc + 1), nil
}
Пример #8
0
// getPermConfig fetches the permissions config for 'prefix'.
func getPermConfig(db *client.DB, prefix string) (*config.PermConfig, error) {
	config := &config.PermConfig{}
	if err := db.GetProto(keys.MakeKey(keys.ConfigPermissionPrefix, proto.Key(prefix)), config); err != nil {
		return nil, err
	}

	return config, nil
}
Пример #9
0
func split(db *client.DB) {
	for i := 1; i < 10; i++ {
		fmt.Printf("split key: %v\n", i)
		err := db.AdminSplit([]byte(fmt.Sprintf("%d", i)))
		if err != nil {
			panic(fmt.Sprintf("split fail. key: %v, err: %v\n", i, err))
		}
	}
}
Пример #10
0
// deleteConfig removes the config specified by key.
func deleteConfig(db *client.DB, configPrefix proto.Key, path string, r *http.Request) error {
	if len(path) == 0 {
		return util.Errorf("no path specified for config Delete")
	}
	if path == "/" {
		return util.Errorf("the default configuration cannot be deleted")
	}
	configKey := keys.MakeKey(configPrefix, proto.Key(path[1:]))
	return db.Del(configKey)
}
Пример #11
0
// loadTree loads the tree root and all of its nodes. It puts all of the nodes
// into a map.
func loadTree(t *testing.T, db *client.DB) (*roachpb.RangeTree, map[string]roachpb.RangeTreeNode) {
	tree := new(roachpb.RangeTree)
	if err := db.GetProto(keys.RangeTreeRoot, tree); err != nil {
		t.Fatal(err)
	}
	nodes := make(map[string]roachpb.RangeTreeNode)
	if tree.RootKey != nil {
		loadNodes(t, db, tree.RootKey, nodes)
	}
	return tree, nodes
}
Пример #12
0
// loadTree loads the tree root and all of its nodes. It puts all of the nodes
// into a map.
func loadTree(t *testing.T, db *client.DB) (storage.RangeTree, map[string]storage.RangeTreeNode) {
	var tree storage.RangeTree
	if err := db.GetProto(keys.RangeTreeRoot, &tree); err != nil {
		t.Fatal(err)
	}
	nodes := make(map[string]storage.RangeTreeNode)
	if tree.RootKey != nil {
		loadNodes(t, db, tree.RootKey, nodes)
	}
	return tree, nodes
}
Пример #13
0
// getRangeKeys returns the end keys of all ranges.
func getRangeKeys(db *client.DB) ([]roachpb.Key, error) {
	rows, err := db.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
	if err != nil {
		return nil, err
	}
	ret := make([]roachpb.Key, len(rows), len(rows))
	for i := 0; i < len(rows); i++ {
		ret[i] = bytes.TrimPrefix(rows[i].Key, keys.Meta2Prefix)
	}
	return ret, nil
}
Пример #14
0
// treesEqual compares the expectedTree and expectedNodes to the actual range
// tree stored in the db.
func treesEqual(db *client.DB, expected testRangeTree) error {
	// Compare the tree roots.
	actualTree := &proto.RangeTree{}
	if err := db.GetProto(keys.RangeTreeRoot, actualTree); err != nil {
		return err
	}
	if !reflect.DeepEqual(&expected.Tree, actualTree) {
		return util.Errorf("Range tree root is not as expected - expected:%+v - actual:%+v", expected.Tree, actualTree)
	}

	return treeNodesEqual(db, expected, expected.Tree.RootKey)
}
Пример #15
0
func insert(r *rand.Rand, db *client.DB, wg *sync.WaitGroup) {
	wg.Add(1)
	for i := 0; i < 1000; i++ {
		key := getKey(r)
		value := getValue(1024 * 10)
		err := db.Put(key, value)
		if err != nil {
			fmt.Printf("put fail. err: %v, key: %v\n", err, key)
		}
	}
	wg.Done()
}
Пример #16
0
// loadNodes fetches a node and recursively all of its children.
func loadNodes(t *testing.T, db *client.DB, key roachpb.RKey, nodes map[string]roachpb.RangeTreeNode) {
	node := new(roachpb.RangeTreeNode)
	if err := db.GetProto(keys.RangeTreeNodeKey(key), node); err != nil {
		t.Fatal(err)
	}
	nodes[node.Key.String()] = *node
	if node.LeftKey != nil {
		loadNodes(t, db, node.LeftKey, nodes)
	}
	if node.RightKey != nil {
		loadNodes(t, db, node.RightKey, nodes)
	}
}
Пример #17
0
// setDefaultRangeMaxBytes sets the range-max-bytes value for the default zone.
func setDefaultRangeMaxBytes(t *testing.T, db *client.DB, maxBytes int64) {
	zone := &proto.ZoneConfig{}
	if err := db.GetProto(keys.ConfigZonePrefix, zone); err != nil {
		t.Fatal(err)
	}
	if zone.RangeMaxBytes == maxBytes {
		return
	}
	zone.RangeMaxBytes = maxBytes
	if err := db.Put(keys.ConfigZonePrefix, zone); err != nil {
		t.Fatal(err)
	}
}
Пример #18
0
func waitForInitialSplits(db *client.DB) error {
	expectedRanges := ExpectedInitialRangeCount()
	return util.RetryForDuration(initialSplitsTimeout, func() error {
		// Scan all keys in the Meta2Prefix; we only need a count.
		rows, err := db.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			return err
		}
		if a, e := len(rows), expectedRanges; a != e {
			return util.Errorf("had %d ranges at startup, expected %d", a, e)
		}
		return nil
	})
}
Пример #19
0
// purgeOldLeases refreshes the leases on a table. Unused leases older than
// minVersion will be released.
// If deleted is set, minVersion is ignored; no lease is acquired and all
// existing unused leases are released. The table is further marked for
// deletion, which will cause existing in-use leases to be eagerly released once
// they're not in use any more.
// If t has no active leases, nothing is done.
func (t *tableState) purgeOldLeases(
	db *client.DB, deleted bool, minVersion sqlbase.DescriptorVersion, store LeaseStore,
) error {
	t.mu.Lock()
	empty := len(t.active.data) == 0
	t.mu.Unlock()
	if empty {
		// We don't currently have a lease on this table, so no need to refresh
		// anything.
		return nil
	}

	// Acquire and release a lease on the table at a version >= minVersion.
	var lease *LeaseState
	err := db.Txn(func(txn *client.Txn) error {
		var err error
		if !deleted {
			lease, err = t.acquire(txn, minVersion, store)
			if err == errTableDeleted {
				deleted = true
			}
		}
		if err == nil || deleted {
			t.mu.Lock()
			defer t.mu.Unlock()
			var toRelease []*LeaseState
			if deleted {
				t.deleted = true
				// If the table has been deleted, all leases are stale.
				toRelease = append([]*LeaseState(nil), t.active.data...)
			} else {
				// Otherwise, all but the lease we just took are stale.
				toRelease = append([]*LeaseState(nil), t.active.data[:len(t.active.data)-1]...)
			}
			if err := t.releaseLeasesIfNotActive(toRelease, store); err != nil {
				return err
			}
			return nil
		}
		return err
	})
	if err != nil {
		return err
	}
	if lease == nil {
		return nil
	}
	return t.release(lease, store)
}
Пример #20
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id. It will retry indefinitely on retryable
// errors.
func allocateNodeID(db *client.DB) (proto.NodeID, error) {
	var id proto.NodeID
	err := retry.WithBackoff(allocRetryOptions, func() (retry.Status, error) {
		r, err := db.Inc(keys.NodeIDGenerator, 1)
		if err != nil {
			status := retry.Break
			if _, ok := err.(util.Retryable); ok {
				status = retry.Continue
			}
			return status, util.Errorf("unable to allocate node ID: %s", err)
		}
		id = proto.NodeID(r.ValueInt())
		return retry.Break, nil
	})
	return id, err
}
Пример #21
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success. The call
// will retry indefinitely on retryable errors.
func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.DB) (proto.StoreID, error) {
	var id proto.StoreID
	err := retry.WithBackoff(allocRetryOptions, func() (retry.Status, error) {
		r, err := db.Inc(keys.StoreIDGenerator, inc)
		if err != nil {
			status := retry.Break
			if _, ok := err.(util.Retryable); ok {
				status = retry.Continue
			}
			return status, util.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
		}
		id = proto.StoreID(r.ValueInt() - inc + 1)
		return retry.Break, nil
	})
	return id, err
}
Пример #22
0
// putConfig writes a config for the specified key prefix (which is
// treated as a key). The config is parsed from the input "body". The
// config is stored proto-encoded. The specified body must validly
// parse into a config struct and must pass a given validation check (if
// validate is not nil).
func putConfig(db *client.DB, configPrefix proto.Key, config gogoproto.Message,
	path string, body []byte, r *http.Request,
	validate func(gogoproto.Message) error) error {
	if len(path) == 0 {
		return util.Errorf("no path specified for Put")
	}
	if err := util.UnmarshalRequest(r, body, config, util.AllEncodings); err != nil {
		return util.Errorf("config has invalid format: %+v: %s", config, err)
	}
	if validate != nil {
		if err := validate(config); err != nil {
			return err
		}
	}
	key := keys.MakeKey(configPrefix, proto.Key(path[1:]))
	return db.Put(key, config)
}
Пример #23
0
func (hv *historyVerifier) runCmds(cmds []*cmd, historyIdx int, db *client.DB, t *testing.T) (string, map[string]int64, error) {
	var strs []string
	env := map[string]int64{}
	err := db.Txn(func(txn *client.Txn) error {
		for _, c := range cmds {
			c.historyIdx = historyIdx
			c.env = env
			c.init(nil)
			fmtStr, err := c.execute(txn, t)
			if err != nil {
				return err
			}
			strs = append(strs, fmt.Sprintf(fmtStr, 0, 0))
		}
		return nil
	})
	return strings.Join(strs, " "), env, err
}
Пример #24
0
// startTestWriter creates a writer which initiates a sequence of
// transactions, each which writes up to 10 times to random keys with
// random values. If not nil, txnChannel is written to non-blockingly
// every time a new transaction starts.
func startTestWriter(db *client.DB, i int64, valBytes int32, wg *sync.WaitGroup, retries *int32,
	txnChannel chan struct{}, done <-chan struct{}, t *testing.T) {
	src := rand.New(rand.NewSource(i))
	defer func() {
		if wg != nil {
			wg.Done()
		}
	}()

	for j := 0; ; j++ {
		select {
		case <-done:
			return
		default:
			first := true
			err := db.Txn(func(txn *client.Txn) error {
				if first && txnChannel != nil {
					select {
					case txnChannel <- struct{}{}:
					default:
					}
				} else if !first && retries != nil {
					atomic.AddInt32(retries, 1)
				}
				first = false
				for j := 0; j <= int(src.Int31n(10)); j++ {
					key := randutil.RandBytes(src, 10)
					val := randutil.RandBytes(src, int(src.Int31n(valBytes)))
					if err := txn.Put(key, val); err != nil {
						log.Infof("experienced an error in routine %d: %s", i, err)
						return err
					}
				}
				return nil
			})
			if err != nil {
				t.Error(err)
			} else {
				time.Sleep(1 * time.Millisecond)
			}
		}
	}
}
Пример #25
0
func (hv *historyVerifier) runTxn(txnIdx int, priority int32,
	isolation roachpb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T) error {
	var retry int
	txnName := fmt.Sprintf("txn%d", txnIdx)
	err := db.Txn(func(txn *client.Txn) error {
		txn.SetDebugName(txnName, 0)
		if isolation == roachpb.SNAPSHOT {
			if err := txn.SetIsolation(roachpb.SNAPSHOT); err != nil {
				return err
			}
		}
		txn.InternalSetPriority(priority)

		env := map[string]int64{}
		// TODO(spencer): restarts must create additional histories. They
		// look like: given the current partial history and a restart on
		// txn txnIdx, re-enumerate a set of all histories containing the
		// remaining commands from extant txns and all commands from this
		// restarted txn.

		// If this is attempt > 1, reset cmds so no waits.
		if retry++; retry == 2 {
			for _, c := range cmds {
				c.done()
			}
		}
		if log.V(2) {
			log.Infof("%s, retry=%d", txnName, retry)
		}
		for i := range cmds {
			cmds[i].env = env
			if err := hv.runCmd(txn, txnIdx, retry, i, cmds, t); err != nil {
				if log.V(1) {
					log.Infof("%s encountered error: %s", cmds[i], err)
				}
				return err
			}
		}
		return nil
	})
	hv.wg.Done()
	return err
}
Пример #26
0
// GetTableDescriptor retrieves a table descriptor directly from the KV layer.
func GetTableDescriptor(kvDB *client.DB, database string, table string) *TableDescriptor {
	dbNameKey := MakeNameMetadataKey(keys.RootNamespaceID, database)
	gr, err := kvDB.Get(dbNameKey)
	if err != nil {
		panic(err)
	}
	if !gr.Exists() {
		panic("database missing")
	}
	dbDescID := ID(gr.ValueInt())

	tableNameKey := MakeNameMetadataKey(dbDescID, table)
	gr, err = kvDB.Get(tableNameKey)
	if err != nil {
		panic(err)
	}
	if !gr.Exists() {
		panic("table missing")
	}

	descKey := MakeDescMetadataKey(ID(gr.ValueInt()))
	desc := &Descriptor{}
	if err := kvDB.GetProto(descKey, desc); err != nil {
		panic("proto missing")
	}
	return desc.GetTable()
}
Пример #27
0
// refreshLease tries to refresh the node's table lease.
func (m *LeaseManager) refreshLease(db *client.DB, id ID, minVersion DescriptorVersion) error {
	// Only attempt to update a lease for a table that is already leased.
	if t := m.findTableState(id, false); t == nil {
		return nil
	}
	// Acquire and release a lease on the table at a version >= minVersion.
	var lease *LeaseState
	if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
		var pErr *roachpb.Error
		// Acquire() can only acquire a lease at a version if it has
		// already been acquired at that version, or that version
		// is the latest version. If the latest version is > minVersion
		// then the node acquires a lease at the latest version but
		// Acquire() itself returns an error. This is okay, because
		// we want to update the node lease.
		lease, pErr = m.Acquire(txn, id, minVersion)
		return pErr
	}); pErr != nil {
		return pErr.GoError()
	}
	return m.Release(lease)
}
Пример #28
0
// treeNodesEqual compares the expectedTree from the provided key to the actual
// nodes retrieved from the db.  It recursively calls itself on both left and
// right children if they exist.
func treeNodesEqual(db *client.DB, expected testRangeTree, key proto.Key) error {
	expectedNode, ok := expected.Nodes[string(key)]
	if !ok {
		return util.Errorf("Expected does not contain a node for %s", key)
	}
	actualNode := &proto.RangeTreeNode{}
	if err := db.GetProto(keys.RangeTreeNodeKey(key), actualNode); err != nil {
		return err
	}
	if err := nodesEqual(key, expectedNode, *actualNode); err != nil {
		return err
	}
	if expectedNode.LeftKey != nil {
		if err := treeNodesEqual(db, expected, *expectedNode.LeftKey); err != nil {
			return err
		}
	}
	if expectedNode.RightKey != nil {
		if err := treeNodesEqual(db, expected, *expectedNode.RightKey); err != nil {
			return err
		}
	}
	return nil
}
Пример #29
0
func (hv *historyVerifier) runHistory(historyIdx int, priorities []int32,
	isolations []roachpb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T) error {
	plannedStr := historyString(cmds)
	if log.V(1) {
		log.Infof("attempting iso=%v pri=%v history=%s", isolations, priorities, plannedStr)
	}

	hv.actual = []string{}
	hv.wg.Add(len(priorities))
	txnMap := map[int][]*cmd{}
	var prev *cmd
	for _, c := range cmds {
		c.historyIdx = historyIdx
		txnMap[c.txnIdx] = append(txnMap[c.txnIdx], c)
		c.init(prev)
		prev = c
	}
	for i, txnCmds := range txnMap {
		go func(i int, txnCmds []*cmd) {
			if err := hv.runTxn(i, priorities[i-1], isolations[i-1], txnCmds, db, t); err != nil {
				t.Errorf("(%s): unexpected failure running %s: %v", cmds, cmds[i], err)
			}
		}(i, txnCmds)
	}
	hv.wg.Wait()

	// Construct string for actual history.
	actualStr := strings.Join(hv.actual, " ")

	// Verify history.
	var verifyStrs []string
	verifyEnv := map[string]int64{}
	for _, c := range hv.verifyCmds {
		c.historyIdx = historyIdx
		c.env = verifyEnv
		c.init(nil)
		pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
			fmtStr, pErr := c.execute(txn, t)
			if pErr != nil {
				return pErr
			}
			cmdStr := fmt.Sprintf(fmtStr, 0, 0)
			verifyStrs = append(verifyStrs, cmdStr)
			return nil
		})
		if pErr != nil {
			t.Errorf("failed on execution of verification cmd %s: %s", c, pErr)
			return pErr.GoError()
		}
	}

	err := hv.verify.checkFn(verifyEnv)
	if err == nil {
		if log.V(1) {
			log.Infof("PASSED: iso=%v, pri=%v, history=%q", isolations, priorities, actualStr)
		}
	}
	if hv.expSuccess && err != nil {
		verifyStr := strings.Join(verifyStrs, " ")
		t.Errorf("%d: iso=%v, pri=%v, history=%q: actual=%q, verify=%q: %s",
			historyIdx, isolations, priorities, plannedStr, actualStr, verifyStr, err)
	}
	return err
}
Пример #30
0
// concurrentIncrements starts two Goroutines in parallel, both of which
// read the integers stored at the other's key and add it onto their own.
// It is checked that the outcome is serializable, i.e. exactly one of the
// two Goroutines (the later write) sees the previous write by the other.
func concurrentIncrements(db *client.DB, t *testing.T) {
	// wgStart waits for all transactions to line up, wgEnd has the main
	// function wait for them to finish.
	var wgStart, wgEnd sync.WaitGroup
	wgStart.Add(2 + 1)
	wgEnd.Add(2)

	for i := 0; i < 2; i++ {
		go func(i int) {
			// Read the other key, write key i.
			readKey := []byte(fmt.Sprintf(testUser+"/value-%d", (i+1)%2))
			writeKey := []byte(fmt.Sprintf(testUser+"/value-%d", i))
			defer wgEnd.Done()
			wgStart.Done()
			// Wait until the other goroutines are running.
			wgStart.Wait()

			if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
				txn.SetDebugName(fmt.Sprintf("test-%d", i), 0)

				// Retrieve the other key.
				gr, pErr := txn.Get(readKey)
				if pErr != nil {
					return pErr
				}

				otherValue := int64(0)
				if gr.Value != nil {
					otherValue = gr.ValueInt()
				}

				_, pErr = txn.Inc(writeKey, 1+otherValue)
				return pErr
			}); pErr != nil {
				t.Error(pErr)
			}
		}(i)
	}

	// Kick the goroutines loose.
	wgStart.Done()
	// Wait for the goroutines to finish.
	wgEnd.Wait()
	// Verify that both keys contain something and, more importantly, that
	// one key actually contains the value of the first writer and not only
	// its own.
	total := int64(0)
	results := []int64(nil)
	for i := 0; i < 2; i++ {
		readKey := []byte(fmt.Sprintf(testUser+"/value-%d", i))
		gr, pErr := db.Get(readKey)
		if pErr != nil {
			t.Fatal(pErr)
		}
		if gr.Value == nil {
			t.Fatalf("unexpected empty key: %s=%v", readKey, gr.Value)
		}
		total += gr.ValueInt()
		results = append(results, gr.ValueInt())
	}

	// First writer should have 1, second one 2
	if total != 3 {
		t.Fatalf("got unserializable values %v", results)
	}
}