Exemplo n.º 1
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(db *client.DB) (roachpb.NodeID, error) {
	r, err := db.Inc(keys.NodeIDGenerator, 1)
	if err != nil {
		return 0, errors.Errorf("unable to allocate node ID: %s", err)
	}
	return roachpb.NodeID(r.ValueInt()), nil
}
Exemplo n.º 2
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(nodeID roachpb.NodeID, inc int64, db *client.DB) (roachpb.StoreID, error) {
	r, err := db.Inc(keys.StoreIDGenerator, inc)
	if err != nil {
		return 0, errors.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
	}
	return roachpb.StoreID(r.ValueInt() - inc + 1), nil
}
Exemplo n.º 3
0
func countRangeReplicas(db *client.DB) (int, error) {
	desc := &roachpb.RangeDescriptor{}
	if err := db.GetProto(keys.RangeDescriptorKey(roachpb.RKeyMin), desc); err != nil {
		return 0, err
	}
	return len(desc.Replicas), nil
}
Exemplo n.º 4
0
// getRangeKeys returns the end keys of all ranges.
func getRangeKeys(db *client.DB) ([]roachpb.Key, error) {
	rows, err := db.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
	if err != nil {
		return nil, err
	}
	ret := make([]roachpb.Key, len(rows), len(rows))
	for i := 0; i < len(rows); i++ {
		ret[i] = bytes.TrimPrefix(rows[i].Key, keys.Meta2Prefix)
	}
	return ret, nil
}
Exemplo n.º 5
0
// loadTree loads the tree root and all of its nodes. It puts all of the nodes
// into a map.
func loadTree(t *testing.T, db *client.DB) (storage.RangeTree, map[string]storage.RangeTreeNode) {
	var tree storage.RangeTree
	if err := db.GetProto(keys.RangeTreeRoot, &tree); err != nil {
		t.Fatal(err)
	}
	nodes := make(map[string]storage.RangeTreeNode)
	if tree.RootKey != nil {
		loadNodes(t, db, tree.RootKey, nodes)
	}
	return tree, nodes
}
Exemplo n.º 6
0
// loadNodes fetches a node and recursively all of its children.
func loadNodes(t *testing.T, db *client.DB, key roachpb.RKey, nodes map[string]storage.RangeTreeNode) {
	var node storage.RangeTreeNode
	if err := db.GetProto(keys.RangeTreeNodeKey(key), &node); err != nil {
		t.Fatal(err)
	}
	nodes[node.Key.String()] = node
	if node.LeftKey != nil {
		loadNodes(t, db, node.LeftKey, nodes)
	}
	if node.RightKey != nil {
		loadNodes(t, db, node.RightKey, nodes)
	}
}
Exemplo n.º 7
0
// WaitForInitialSplits waits for the expected number of initial ranges to be
// populated in the meta2 table. If the expected range count is not reached
// within a configured timeout, an error is returned.
func WaitForInitialSplits(db *client.DB) error {
	expectedRanges := ExpectedInitialRangeCount()
	return util.RetryForDuration(initialSplitsTimeout, func() error {
		// Scan all keys in the Meta2Prefix; we only need a count.
		rows, err := db.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			return err
		}
		if a, e := len(rows), expectedRanges; a != e {
			return errors.Errorf("had %d ranges at startup, expected %d", a, e)
		}
		return nil
	})
}
Exemplo n.º 8
0
func (hv *historyVerifier) runTxn(txnIdx int, priority int32,
	isolation enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T) error {
	var retry int
	txnName := fmt.Sprintf("txn %d", txnIdx+1)
	cmdIdx := -1

	err := db.Txn(context.TODO(), func(txn *client.Txn) error {
		// If this is 2nd attempt, and a retry wasn't expected, return a
		// retry error which results in further histories being enumerated.
		if retry++; retry > 1 {
			if !cmds[cmdIdx].expRetry {
				// Propagate retry error to history execution to enumerate all
				// histories where this txn retries at this command.
				return &retryError{txnIdx: txnIdx, cmdIdx: cmdIdx}
			}
			// We're expecting a retry, so just send nil down the done channel.
			cmds[cmdIdx].done(nil)
		}

		txn.SetDebugName(txnName, 0)
		if isolation == enginepb.SNAPSHOT {
			if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {
				return err
			}
		}
		txn.InternalSetPriority(priority)

		env := map[string]int64{}
		for cmdIdx+1 < len(cmds) {
			cmdIdx++
			cmds[cmdIdx].env = env
			_, err := hv.runCmd(txn, txnIdx, retry, cmds[cmdIdx], t)
			if err != nil {
				if log.V(1) {
					log.Infof(context.Background(), "%s: failed running %s: %s", txnName, cmds[cmdIdx], err)
				}
				return err
			}
		}
		return nil
	})
	if err != nil {
		for _, c := range cmds[cmdIdx:] {
			c.done(err)
		}
	}
	return err
}
Exemplo n.º 9
0
func (hv *historyVerifier) runCmds(cmds []*cmd, db *client.DB, t *testing.T) (string, map[string]int64, error) {
	var strs []string
	env := map[string]int64{}
	err := db.Txn(context.TODO(), func(txn *client.Txn) error {
		for _, c := range cmds {
			c.historyIdx = hv.idx
			c.env = env
			c.init(nil)
			fmtStr, err := c.execute(txn, t)
			if err != nil {
				return err
			}
			strs = append(strs, fmt.Sprintf(fmtStr, 0, 0))
		}
		return nil
	})
	return strings.Join(strs, " "), env, err
}
Exemplo n.º 10
0
// purgeOldLeases refreshes the leases on a table. Unused leases older than
// minVersion will be released.
// If deleted is set, minVersion is ignored; no lease is acquired and all
// existing unused leases are released. The table is further marked for
// deletion, which will cause existing in-use leases to be eagerly released once
// they're not in use any more.
// If t has no active leases, nothing is done.
func (t *tableState) purgeOldLeases(
	db *client.DB, deleted bool, minVersion sqlbase.DescriptorVersion, store LeaseStore,
) error {
	t.mu.Lock()
	empty := len(t.active.data) == 0
	t.mu.Unlock()
	if empty {
		// We don't currently have a lease on this table, so no need to refresh
		// anything.
		return nil
	}

	// Acquire and release a lease on the table at a version >= minVersion.
	var lease *LeaseState
	err := db.Txn(context.TODO(), func(txn *client.Txn) error {
		var err error
		if !deleted {
			lease, err = t.acquire(txn, minVersion, store)
			if err == errTableDeleted {
				deleted = true
			}
		}
		if err == nil || deleted {
			t.mu.Lock()
			defer t.mu.Unlock()
			var toRelease []*LeaseState
			if deleted {
				t.deleted = true
			}
			toRelease = append([]*LeaseState(nil), t.active.data...)

			t.releaseLeasesIfNotActive(toRelease, store)
			return nil
		}
		return err
	})
	if err != nil {
		return err
	}
	if lease == nil {
		return nil
	}
	return t.release(lease, store)
}
Exemplo n.º 11
0
// startTestWriter creates a writer which initiates a sequence of
// transactions, each which writes up to 10 times to random keys with
// random values. If not nil, txnChannel is written to non-blockingly
// every time a new transaction starts.
func startTestWriter(db *client.DB, i int64, valBytes int32, wg *sync.WaitGroup, retries *int32,
	txnChannel chan struct{}, done <-chan struct{}, t *testing.T) {
	src := rand.New(rand.NewSource(i))
	defer func() {
		if wg != nil {
			wg.Done()
		}
	}()

	for j := 0; ; j++ {
		select {
		case <-done:
			return
		default:
			first := true
			err := db.Txn(func(txn *client.Txn) error {
				if first && txnChannel != nil {
					select {
					case txnChannel <- struct{}{}:
					default:
					}
				} else if !first && retries != nil {
					atomic.AddInt32(retries, 1)
				}
				first = false
				for j := 0; j <= int(src.Int31n(10)); j++ {
					key := randutil.RandBytes(src, 10)
					val := randutil.RandBytes(src, int(src.Int31n(valBytes)))
					if err := txn.Put(key, val); err != nil {
						log.Infof("experienced an error in routine %d: %s", i, err)
						return err
					}
				}
				return nil
			})
			if err != nil {
				t.Error(err)
			} else {
				time.Sleep(1 * time.Millisecond)
			}
		}
	}
}
Exemplo n.º 12
0
// GetTableDescriptor retrieves a table descriptor directly from the KV layer.
func GetTableDescriptor(kvDB *client.DB, database string, table string) *TableDescriptor {
	dbNameKey := MakeNameMetadataKey(keys.RootNamespaceID, database)
	gr, err := kvDB.Get(dbNameKey)
	if err != nil {
		panic(err)
	}
	if !gr.Exists() {
		panic("database missing")
	}
	dbDescID := ID(gr.ValueInt())

	tableNameKey := MakeNameMetadataKey(dbDescID, table)
	gr, err = kvDB.Get(tableNameKey)
	if err != nil {
		panic(err)
	}
	if !gr.Exists() {
		panic("table missing")
	}

	descKey := MakeDescMetadataKey(ID(gr.ValueInt()))
	desc := &Descriptor{}
	if err := kvDB.GetProto(descKey, desc); err != nil {
		panic("proto missing")
	}
	return desc.GetTable()
}
Exemplo n.º 13
0
// pushTxn attempts to abort the txn via push. The wait group is signaled on
// completion.
func pushTxn(db *client.DB, now hlc.Timestamp, txn *roachpb.Transaction,
	typ roachpb.PushTxnType) {

	// Attempt to push the transaction which created the intent.
	pushArgs := &roachpb.PushTxnRequest{
		Span: roachpb.Span{
			Key: txn.Key,
		},
		Now:       now,
		PusherTxn: roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Priority: math.MaxInt32}},
		PusheeTxn: txn.TxnMeta,
		PushType:  typ,
	}
	b := &client.Batch{}
	b.AddRawRequest(pushArgs)
	if err := db.Run(b); err != nil {
		log.Warningf(context.TODO(), "push of txn %s failed: %s", txn, err)
		return
	}
	br := b.RawResponse()
	// Update the supplied txn on successful push.
	*txn = br.Responses[0].GetInner().(*roachpb.PushTxnResponse).PusheeTxn
}
Exemplo n.º 14
0
// concurrentIncrements starts two Goroutines in parallel, both of which
// read the integers stored at the other's key and add it onto their own.
// It is checked that the outcome is serializable, i.e. exactly one of the
// two Goroutines (the later write) sees the previous write by the other.
func concurrentIncrements(db *client.DB, t *testing.T) {
	// wgStart waits for all transactions to line up, wgEnd has the main
	// function wait for them to finish.
	var wgStart, wgEnd sync.WaitGroup
	wgStart.Add(2 + 1)
	wgEnd.Add(2)

	for i := 0; i < 2; i++ {
		go func(i int) {
			// Read the other key, write key i.
			readKey := []byte(fmt.Sprintf(testUser+"/value-%d", (i+1)%2))
			writeKey := []byte(fmt.Sprintf(testUser+"/value-%d", i))
			defer wgEnd.Done()
			wgStart.Done()
			// Wait until the other goroutines are running.
			wgStart.Wait()

			if err := db.Txn(func(txn *client.Txn) error {
				txn.SetDebugName(fmt.Sprintf("test-%d", i), 0)

				// Retrieve the other key.
				gr, err := txn.Get(readKey)
				if err != nil {
					return err
				}

				otherValue := int64(0)
				if gr.Value != nil {
					otherValue = gr.ValueInt()
				}

				_, err = txn.Inc(writeKey, 1+otherValue)
				return err
			}); err != nil {
				t.Error(err)
			}
		}(i)
	}

	// Kick the goroutines loose.
	wgStart.Done()
	// Wait for the goroutines to finish.
	wgEnd.Wait()
	// Verify that both keys contain something and, more importantly, that
	// one key actually contains the value of the first writer and not only
	// its own.
	total := int64(0)
	results := []int64(nil)
	for i := 0; i < 2; i++ {
		readKey := []byte(fmt.Sprintf(testUser+"/value-%d", i))
		gr, err := db.Get(readKey)
		if err != nil {
			t.Fatal(err)
		}
		if gr.Value == nil {
			t.Fatalf("unexpected empty key: %s=%v", readKey, gr.Value)
		}
		total += gr.ValueInt()
		results = append(results, gr.ValueInt())
	}

	// First writer should have 1, second one 2
	if total != 3 {
		t.Fatalf("got unserializable values %v", results)
	}
}
Exemplo n.º 15
0
// Run a particular schema change and run some OLTP operations in parallel, as
// soon as the schema change starts executing its backfill.
func runSchemaChangeWithOperations(
	t *testing.T,
	sqlDB *gosql.DB,
	kvDB *client.DB,
	schemaChange string,
	maxValue int,
	keyMultiple int,
	backfillNotification chan bool,
) {
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Run the schema change in a separate goroutine.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		start := timeutil.Now()
		// Start schema change that eventually runs a backfill.
		if _, err := sqlDB.Exec(schemaChange); err != nil {
			t.Error(err)
		}
		t.Logf("schema change %s took %v", schemaChange, timeutil.Since(start))
		wg.Done()
	}()

	// Wait until the schema change backfill starts.
	<-backfillNotification

	// Run a variety of operations during the backfill.

	// Grabbing a schema change lease on the table will fail, disallowing
	// another schema change from being simultaneously executed.
	sc := csql.NewSchemaChangerForTesting(tableDesc.ID, 0, 0, *kvDB, nil)
	if l, err := sc.AcquireLease(); err == nil {
		t.Fatalf("schema change lease acquisition on table %d succeeded: %v", tableDesc.ID, l)
	}

	// Update some rows.
	var updatedKeys []int
	for i := 0; i < 10; i++ {
		k := rand.Intn(maxValue)
		v := maxValue + i + 1
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = $2 WHERE k = $1`, k, v); err != nil {
			t.Fatal(err)
		}
		updatedKeys = append(updatedKeys, k)
	}

	// Reupdate updated values back to what they were before.
	for _, k := range updatedKeys {
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = $2 WHERE k = $1`, k, maxValue-k); err != nil {
			t.Fatal(err)
		}
	}

	// Delete some rows.
	deleteStartKey := rand.Intn(maxValue - 10)
	for i := 0; i < 10; i++ {
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, deleteStartKey+i); err != nil {
			t.Fatal(err)
		}
	}
	// Reinsert deleted rows.
	for i := 0; i < 10; i++ {
		k := deleteStartKey + i
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, k, maxValue-k); err != nil {
			t.Fatal(err)
		}
	}

	// Insert some new rows.
	numInserts := 10
	for i := 0; i < numInserts; i++ {
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, maxValue+i+1, maxValue+i+1); err != nil {
			t.Fatal(err)
		}
	}

	wg.Wait() // for schema change to complete.

	// Verify the number of keys left behind in the table to validate schema
	// change operations.
	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	tableEnd := tablePrefix.PrefixEnd()
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := keyMultiple * (maxValue + numInserts + 1); len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}

	// Delete the rows inserted.
	for i := 0; i < numInserts; i++ {
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, maxValue+i+1); err != nil {
			t.Fatal(err)
		}
	}
}