Exemplo n.º 1
0
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
	candidate := roachpb.RKeyMin
	for _, union := range ba.Requests {
		inner := union.GetInner()
		if _, ok := inner.(*roachpb.NoopRequest); ok {
			continue
		}
		h := inner.Header()
		addr, err := keys.Addr(h.Key)
		if err != nil {
			return nil, err
		}
		eAddr, err := keys.AddrUpperBound(h.EndKey)
		if err != nil {
			return nil, err
		}
		if len(eAddr) == 0 {
			eAddr = addr.Next()
		}
		if !eAddr.Less(k) {
			if !k.Less(addr) {
				// Range contains k, so won't be able to go lower.
				return k, nil
			}
			// Range is disjoint from [KeyMin,k).
			continue
		}
		// We want the largest surviving candidate.
		if candidate.Less(addr) {
			candidate = addr
		}
	}
	return candidate, nil
}
Exemplo n.º 2
0
// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
	candidate := roachpb.RKeyMax
	for _, union := range ba.Requests {
		inner := union.GetInner()
		if _, ok := inner.(*roachpb.NoopRequest); ok {
			continue
		}
		h := inner.Header()
		addr, err := keys.Addr(h.Key)
		if err != nil {
			return nil, err
		}
		if addr.Less(k) {
			eAddr, err := keys.AddrUpperBound(h.EndKey)
			if err != nil {
				return nil, err
			}
			if k.Less(eAddr) {
				// Starts below k, but continues beyond. Need to stay at k.
				return k, nil
			}
			// Affects only [KeyMin,k).
			continue
		}
		// We want the smallest of the surviving candidates.
		if addr.Less(candidate) {
			candidate = addr
		}
	}
	return candidate, nil
}
Exemplo n.º 3
0
func metaKey(key roachpb.RKey) []byte {
	rk, err := keys.Addr(keys.RangeMetaKey(key))
	if err != nil {
		panic(err)
	}
	return rk
}
Exemplo n.º 4
0
func TestKeyAddress(t *testing.T) {
	defer leaktest.AfterTest(t)()
	testCases := []struct {
		key roachpb.Key
	}{
		{MakeNameMetadataKey(0, "BAR")},
		{MakeNameMetadataKey(1, "BAR")},
		{MakeNameMetadataKey(1, "foo")},
		{MakeNameMetadataKey(2, "foo")},
		{MakeDescMetadataKey(123)},
		{MakeDescMetadataKey(124)},
	}
	var lastKey roachpb.Key
	for i, test := range testCases {
		resultAddr, err := keys.Addr(test.key)
		if err != nil {
			t.Fatal(err)
		}
		result := resultAddr.AsRawKey()
		if result.Compare(lastKey) <= 0 {
			t.Errorf("%d: key address %q is <= %q", i, result, lastKey)
		}
		lastKey = result
	}
}
Exemplo n.º 5
0
// Seek positions the iterator on the start of a span (span.Key or span.EndKey,
// depending on ScanDir). Note that span.EndKey is exclusive, regardless of
// scanDir.
// After calling this, ReplicaInfo() will return information about the range
// containing the start key of the span (or the end key, if the direction is
// Descending).
// NeedAnother() will return true until the iterator is positioned on or after
// the end of the span.
// Possible errors encountered should be checked for with Valid().
//
// Seek can be called repeatedly on the same iterator. To make optimal uses of
// caches, Seek()s should be performed on spans sorted according to the
// scanDir (if Descending, then the span with the highest keys should be
// Seek()ed first).
//
// scanDir changes the direction in which Next() will advance the iterator.
func (it *SpanResolverIterator) Seek(
	ctx context.Context, span roachpb.Span, scanDir kv.ScanDirection,
) {
	var key, endKey roachpb.RKey
	var err error
	if key, err = keys.Addr(span.Key); err != nil {
		it.err = err
		return
	}
	if endKey, err = keys.Addr(span.EndKey); err != nil {
		it.err = err
		return
	}
	oldSpan := it.curSpan
	oldDir := it.dir
	it.curSpan = roachpb.RSpan{
		Key:    key,
		EndKey: endKey,
	}
	it.dir = scanDir

	var seekKey roachpb.RKey
	if scanDir == kv.Ascending {
		seekKey = it.curSpan.Key
	} else {
		seekKey = it.curSpan.EndKey
	}

	// Check if the start of the span falls within the descriptor on which we're
	// already positioned. If so, and if the direction also corresponds, there's
	// no need to change the underlying iterator's state.
	if it.dir == oldDir && it.it.Valid() {
		if it.dir == kv.Ascending && oldSpan.ContainsKey(seekKey) {
			return
		}
		if it.dir == kv.Descending && oldSpan.ContainsExclusiveEndKey(seekKey) {
			return
		}
	}
	it.it.Seek(ctx, seekKey, scanDir)
}
Exemplo n.º 6
0
// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (ts *TestServer) SplitRange(
	splitKey roachpb.Key,
) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) {
	splitRKey, err := keys.Addr(splitKey)
	if err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
	}
	origRangeDesc, err := ts.LookupRange(splitKey)
	if err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
	}
	if origRangeDesc.StartKey.Equal(splitRKey) {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Errorf(
				"cannot split range %+v at start key %q", origRangeDesc, splitKey)
	}
	splitReq := roachpb.AdminSplitRequest{
		Span: roachpb.Span{
			Key: splitKey,
		},
		SplitKey: splitKey,
	}
	_, pErr := client.SendWrapped(context.Background(), ts.DistSender(), &splitReq)
	if pErr != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Errorf(
				"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
	}

	var leftRangeDesc, rightRangeDesc roachpb.RangeDescriptor
	if err := ts.DB().GetProto(context.TODO(),
		keys.RangeDescriptorKey(origRangeDesc.StartKey), &leftRangeDesc); err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Wrap(err, "could not look up left-hand side descriptor")
	}
	// The split point might not be exactly the one we requested (it can be
	// adjusted slightly so we don't split in the middle of SQL rows). Update it
	// to the real point.
	splitRKey = leftRangeDesc.EndKey
	if err := ts.DB().GetProto(context.TODO(),
		keys.RangeDescriptorKey(splitRKey), &rightRangeDesc); err != nil {
		return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
			errors.Wrap(err, "could not look up right-hand side descriptor")
	}
	return leftRangeDesc, rightRangeDesc, nil
}
Exemplo n.º 7
0
func runLsRanges(cmd *cobra.Command, args []string) error {
	if len(args) > 1 {
		return usageAndError(cmd)
	}

	var startKey roachpb.Key
	{
		k := roachpb.KeyMin.Next()
		if len(args) > 0 {
			k = roachpb.Key(args[0])
		}
		rk, err := keys.Addr(k)
		if err != nil {
			panic(err)
		}
		startKey = keys.RangeMetaKey(rk)
	}
	endKey := keys.Meta2Prefix.PrefixEnd()

	kvDB, stopper, err := MakeDBClient()
	if err != nil {
		return err
	}
	defer stopper.Stop()

	rows, err := kvDB.Scan(context.Background(), startKey, endKey, maxResults)
	if err != nil {
		return err
	}

	for _, row := range rows {
		desc := &roachpb.RangeDescriptor{}
		if err := row.ValueProto(desc); err != nil {
			return errors.Wrapf(err, "unable to unmarshal range descriptor at %s", row.Key)
		}
		fmt.Printf("%s-%s [%d]\n", desc.StartKey, desc.EndKey, desc.RangeID)
		for i, replica := range desc.Replicas {
			fmt.Printf("\t%d: node-id=%d store-id=%d\n",
				i, replica.NodeID, replica.StoreID)
		}
	}
	fmt.Printf("%d result(s)\n", len(rows))
	return nil
}
Exemplo n.º 8
0
// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(args storagebase.FilterArgs) *roachpb.Error {
	req, ok := args.Req.(*roachpb.EndTransactionRequest)
	if !ok {
		return nil
	}

	if !req.Commit {
		// This is a rollback: skip trigger verification.
		return nil
	}

	modifiedSpanTrigger := req.InternalCommitTrigger.GetModifiedSpanTrigger()
	modifiedSystemConfigSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemConfigSpan

	var hasSystemKey bool
	for _, span := range req.IntentSpans {
		keyAddr, err := keys.Addr(span.Key)
		if err != nil {
			return roachpb.NewError(err)
		}
		if bytes.Compare(keyAddr, keys.SystemConfigSpan.Key) >= 0 &&
			bytes.Compare(keyAddr, keys.SystemConfigSpan.EndKey) < 0 {
			hasSystemKey = true
			break
		}
	}
	// If the transaction in question has intents in the system span, then
	// modifiedSystemConfigSpan should always be true. However, it is possible
	// for modifiedSystemConfigSpan to be set, even though no system keys are
	// present. This can occur with certain conditional DDL statements (e.g.
	// "CREATE TABLE IF NOT EXISTS"), which set the SystemConfigTrigger
	// aggressively but may not actually end up changing the system DB depending
	// on the current state.
	// For more information, see the related comment at the beginning of
	// planner.makePlan().
	if hasSystemKey && !modifiedSystemConfigSpan {
		return roachpb.NewError(errors.Errorf("EndTransaction hasSystemKey=%t, but hasSystemConfigTrigger=%t",
			hasSystemKey, modifiedSystemConfigSpan))
	}

	return nil
}
Exemplo n.º 9
0
// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, errors.Errorf("%T is not a range command, but EndKey is set", args)
			}
			keyAddr, err := keys.Addr(header.Key)
			if err != nil {
				return false, emptySpan, err
			}
			if !rs.ContainsKey(keyAddr) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		local := false
		keyAddr, err := keys.Addr(header.Key)
		if err != nil {
			return false, emptySpan, err
		}
		endKeyAddr, err := keys.Addr(header.EndKey)
		if err != nil {
			return false, emptySpan, err
		}
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !l || !r {
				return false, emptySpan, errors.Errorf("local key mixed with global key in range")
			}
			local = true
		}
		if keyAddr.Less(rs.Key) {
			// rs.Key can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.Key = rs.Key.AsRawKey()
			} else {
				// The local start key should be truncated to the boundary of local keys which
				// address to rs.Key.
				header.Key = keys.MakeRangeKeyPrefix(rs.Key)
			}
		}
		if !endKeyAddr.Less(rs.EndKey) {
			// rs.EndKey can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.EndKey = rs.EndKey.AsRawKey()
			} else {
				// The local end key should be truncated to the boundary of local keys which
				// address to rs.EndKey.
				header.EndKey = keys.MakeRangeKeyPrefix(rs.EndKey)
			}
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if header.Key.Compare(header.EndKey) >= 0 {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	truncBA := ba
	truncBA.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range ba.Requests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			union := roachpb.RequestUnion{}
			union.MustSetInner(&noopRequest)
			truncBA.Requests[pos] = union
		} else {
			// Keep the old one. If we must adjust the header, must copy.
			if inner := ba.Requests[pos].GetInner(); newHeader.Equal(inner.Header()) {
				truncBA.Requests[pos] = ba.Requests[pos]
			} else {
				shallowCopy := inner.ShallowCopy()
				shallowCopy.SetHeader(newHeader)
				union := &truncBA.Requests[pos] // avoid operating on copy
				union.MustSetInner(shallowCopy)
			}
		}
		if err != nil {
			return roachpb.BatchRequest{}, 0, err
		}
	}
	return truncBA, len(ba.Requests) - numNoop, nil
}
Exemplo n.º 10
0
// TableStats is an endpoint that returns columns, indices, and other
// relevant details for the specified table.
func (s *adminServer) TableStats(
	ctx context.Context, req *serverpb.TableStatsRequest,
) (*serverpb.TableStatsResponse, error) {
	escDBName := parser.Name(req.Database).String()
	if err := s.assertNotVirtualSchema(escDBName); err != nil {
		return nil, err
	}

	// Get table span.
	var tableSpan roachpb.Span
	iexecutor := sql.InternalExecutor{LeaseManager: s.server.leaseMgr}
	if err := s.server.db.Txn(ctx, func(txn *client.Txn) error {
		var err error
		tableSpan, err = iexecutor.GetTableSpan(s.getUser(req), txn, req.Database, req.Table)
		return err
	}); err != nil {
		return nil, s.serverError(err)
	}

	startKey, err := keys.Addr(tableSpan.Key)
	if err != nil {
		return nil, s.serverError(err)
	}
	endKey, err := keys.Addr(tableSpan.EndKey)
	if err != nil {
		return nil, s.serverError(err)
	}

	// Get current range descriptors for table. This is done by scanning over
	// meta2 keys for the range.
	rangeDescKVs, err := s.server.db.Scan(ctx, keys.RangeMetaKey(startKey), keys.RangeMetaKey(endKey), 0)
	if err != nil {
		return nil, s.serverError(err)
	}

	// Extract a list of node IDs from the response.
	nodeIDs := make(map[roachpb.NodeID]struct{})
	for _, kv := range rangeDescKVs {
		var rng roachpb.RangeDescriptor
		if err := kv.Value.GetProto(&rng); err != nil {
			return nil, s.serverError(err)
		}
		for _, repl := range rng.Replicas {
			nodeIDs[repl.NodeID] = struct{}{}
		}
	}

	// Construct TableStatsResponse by sending an RPC to every node involved.
	tableStatResponse := serverpb.TableStatsResponse{
		NodeCount: int64(len(nodeIDs)),
		// TODO(mrtracy): The "RangeCount" returned by TableStats is more
		// accurate than the "RangeCount" returned by TableDetails, because this
		// method always consistently queries the meta2 key range for the table;
		// in contrast, TableDetails uses a method on the DistSender, which
		// queries using a range metadata cache and thus may return stale data
		// for tables that are rapidly splitting. However, one potential
		// *advantage* of using the DistSender is that it will populate the
		// DistSender's range metadata cache in the case where meta2 information
		// for this table is not already present; the query used by TableStats
		// does not populate the DistSender cache. We should consider plumbing
		// TableStats' meta2 query through the DistSender so that it will share
		// the advantage of populating the cache (without the disadvantage of
		// potentially returning stale data).
		// See Github #5435 for some discussion.
		RangeCount: int64(len(rangeDescKVs)),
	}
	type nodeResponse struct {
		nodeID roachpb.NodeID
		resp   *serverpb.SpanStatsResponse
		err    error
	}

	// Send a SpanStats query to each node. Set a timeout on the context for
	// these queries.
	responses := make(chan nodeResponse)
	nodeCtx, cancel := context.WithTimeout(ctx, base.NetworkTimeout)
	defer cancel()
	for nodeID := range nodeIDs {
		nodeID := nodeID
		if err := s.server.stopper.RunAsyncTask(nodeCtx, func(ctx context.Context) {
			var spanResponse *serverpb.SpanStatsResponse
			client, err := s.server.status.dialNode(nodeID)
			if err == nil {
				req := serverpb.SpanStatsRequest{
					StartKey: startKey,
					EndKey:   endKey,
					NodeID:   nodeID.String(),
				}
				spanResponse, err = client.SpanStats(ctx, &req)
			}

			response := nodeResponse{
				nodeID: nodeID,
				resp:   spanResponse,
				err:    err,
			}
			select {
			case responses <- response:
				// Response processed.
			case <-ctx.Done():
				// Context completed, response no longer needed.
			}
		}); err != nil {
			return nil, err
		}
	}
	for remainingResponses := len(nodeIDs); remainingResponses > 0; remainingResponses-- {
		select {
		case resp := <-responses:
			// For nodes which returned an error, note that the node's data
			// is missing. For successful calls, aggregate statistics.
			if resp.err != nil {
				tableStatResponse.MissingNodes = append(
					tableStatResponse.MissingNodes,
					serverpb.TableStatsResponse_MissingNode{
						NodeID:       resp.nodeID.String(),
						ErrorMessage: resp.err.Error(),
					},
				)
			} else {
				tableStatResponse.Stats.Add(resp.resp.TotalStats)
				tableStatResponse.ReplicaCount += int64(resp.resp.RangeCount)
			}
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}

	return &tableStatResponse, nil
}
Exemplo n.º 11
0
// TableDetails is an endpoint that returns columns, indices, and other
// relevant details for the specified table.
func (s *adminServer) TableDetails(
	ctx context.Context, req *serverpb.TableDetailsRequest,
) (*serverpb.TableDetailsResponse, error) {
	args := sql.SessionArgs{User: s.getUser(req)}
	session := s.NewSessionForRPC(ctx, args)
	defer session.Finish(s.server.sqlExecutor)

	escDBName := parser.Name(req.Database).String()
	if err := s.assertNotVirtualSchema(escDBName); err != nil {
		return nil, err
	}

	// TODO(cdo): Use real placeholders for the table and database names when we've extended our SQL
	// grammar to allow that.
	escTableName := parser.Name(req.Table).String()
	escQualTable := fmt.Sprintf("%s.%s", escDBName, escTableName)
	query := fmt.Sprintf("SHOW COLUMNS FROM %s; SHOW INDEX FROM %s; SHOW GRANTS ON TABLE %s; SHOW CREATE TABLE %s;",
		escQualTable, escQualTable, escQualTable, escQualTable)
	r := s.server.sqlExecutor.ExecuteStatements(session, query, nil)
	defer r.Close()
	if err := s.firstNotFoundError(r.ResultList); err != nil {
		return nil, grpc.Errorf(codes.NotFound, "%s", err)
	}
	if err := s.checkQueryResults(r.ResultList, 4); err != nil {
		return nil, err
	}

	var resp serverpb.TableDetailsResponse

	// Marshal SHOW COLUMNS result.
	//
	// TODO(cdo): protobuf v3's default behavior for fields with zero values (e.g. empty strings)
	// is to suppress them. So, if protobuf field "foo" is an empty string, "foo" won't show
	// up in the marshalled JSON. I feel that this is counterintuitive, and this should be fixed
	// for our API.
	{
		const (
			fieldCol   = "Field" // column name
			typeCol    = "Type"
			nullCol    = "Null"
			defaultCol = "Default"
		)
		scanner := makeResultScanner(r.ResultList[0].Columns)
		for i, nRows := 0, r.ResultList[0].Rows.Len(); i < nRows; i++ {
			row := r.ResultList[0].Rows.At(i)
			var col serverpb.TableDetailsResponse_Column
			if err := scanner.Scan(row, fieldCol, &col.Name); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, typeCol, &col.Type); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, nullCol, &col.Nullable); err != nil {
				return nil, err
			}
			isDefaultNull, err := scanner.IsNull(row, defaultCol)
			if err != nil {
				return nil, err
			}
			if !isDefaultNull {
				if err := scanner.Scan(row, defaultCol, &col.DefaultValue); err != nil {
					return nil, err
				}
			}
			resp.Columns = append(resp.Columns, col)
		}
	}

	// Marshal SHOW INDEX result.
	{
		const (
			nameCol      = "Name"
			uniqueCol    = "Unique"
			seqCol       = "Seq"
			columnCol    = "Column"
			directionCol = "Direction"
			storingCol   = "Storing"
			implicitCol  = "Implicit"
		)
		scanner := makeResultScanner(r.ResultList[1].Columns)
		for i, nRows := 0, r.ResultList[1].Rows.Len(); i < nRows; i++ {
			row := r.ResultList[1].Rows.At(i)
			// Marshal grant, splitting comma-separated privileges into a proper slice.
			var index serverpb.TableDetailsResponse_Index
			if err := scanner.Scan(row, nameCol, &index.Name); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, uniqueCol, &index.Unique); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, seqCol, &index.Seq); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, columnCol, &index.Column); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, directionCol, &index.Direction); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, storingCol, &index.Storing); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, implicitCol, &index.Implicit); err != nil {
				return nil, err
			}
			resp.Indexes = append(resp.Indexes, index)
		}
	}

	// Marshal SHOW GRANTS result.
	{
		const (
			userCol       = "User"
			privilegesCol = "Privileges"
		)
		scanner := makeResultScanner(r.ResultList[2].Columns)
		for i, nRows := 0, r.ResultList[2].Rows.Len(); i < nRows; i++ {
			row := r.ResultList[2].Rows.At(i)
			// Marshal grant, splitting comma-separated privileges into a proper slice.
			var grant serverpb.TableDetailsResponse_Grant
			var privileges string
			if err := scanner.Scan(row, userCol, &grant.User); err != nil {
				return nil, err
			}
			if err := scanner.Scan(row, privilegesCol, &privileges); err != nil {
				return nil, err
			}
			grant.Privileges = strings.Split(privileges, ",")
			resp.Grants = append(resp.Grants, grant)
		}
	}

	// Marshal SHOW CREATE TABLE result.
	{
		const createTableCol = "CreateTable"
		showResult := r.ResultList[3]
		if showResult.Rows.Len() != 1 {
			return nil, s.serverErrorf("CreateTable response not available.")
		}

		scanner := makeResultScanner(showResult.Columns)
		var createStmt string
		if err := scanner.Scan(showResult.Rows.At(0), createTableCol, &createStmt); err != nil {
			return nil, err
		}

		resp.CreateTableStatement = createStmt
	}

	// Get the number of ranges in the table. We get the key span for the table
	// data. Then, we count the number of ranges that make up that key span.
	{
		iexecutor := sql.InternalExecutor{LeaseManager: s.server.leaseMgr}
		var tableSpan roachpb.Span
		if err := s.server.db.Txn(ctx, func(txn *client.Txn) error {
			var err error
			tableSpan, err = iexecutor.GetTableSpan(
				s.getUser(req), txn, req.Database, req.Table,
			)
			return err
		}); err != nil {
			return nil, s.serverError(err)
		}
		tableRSpan := roachpb.RSpan{}
		var err error
		tableRSpan.Key, err = keys.Addr(tableSpan.Key)
		if err != nil {
			return nil, s.serverError(err)
		}
		tableRSpan.EndKey, err = keys.Addr(tableSpan.EndKey)
		if err != nil {
			return nil, s.serverError(err)
		}
		rangeCount, err := s.server.distSender.CountRanges(ctx, tableRSpan)
		if err != nil {
			return nil, s.serverError(err)
		}
		resp.RangeCount = rangeCount
	}

	// Query the descriptor ID and zone configuration for this table.
	{
		path, err := s.queryDescriptorIDPath(session, []string{req.Database, req.Table})
		if err != nil {
			return nil, s.serverError(err)
		}
		resp.DescriptorID = int64(path[2])

		id, zone, zoneExists, err := s.queryZonePath(session, path)
		if err != nil {
			return nil, s.serverError(err)
		}

		if !zoneExists {
			zone = config.DefaultZoneConfig()
		}
		resp.ZoneConfig = zone

		switch id {
		case path[1]:
			resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_DATABASE
		case path[2]:
			resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_TABLE
		default:
			resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_CLUSTER
		}
	}

	return &resp, nil
}
Exemplo n.º 12
0
// partitionSpans finds out which nodes are owners for ranges touching the given
// spans, and splits the spans according to owning nodes. The result is a set of
// spanPartitions (one for each relevant node), which form a partitioning of the
// spans (i.e. they are non-overlapping and their union is exactly the original
// set of spans).
func (dsp *distSQLPlanner) partitionSpans(
	planCtx *planningCtx, spans roachpb.Spans,
) ([]spanPartition, error) {
	if len(spans) == 0 {
		panic("no spans")
	}
	ctx := planCtx.ctx
	splits := make([]spanPartition, 0, 1)
	// nodeMap maps a nodeID to an index inside the splits array.
	nodeMap := make(map[roachpb.NodeID]int)
	it := planCtx.spanIter
	for _, span := range spans {
		var rspan roachpb.RSpan
		var err error
		if rspan.Key, err = keys.Addr(span.Key); err != nil {
			return nil, err
		}
		if rspan.EndKey, err = keys.Addr(span.EndKey); err != nil {
			return nil, err
		}

		var lastNodeID roachpb.NodeID
		for it.Seek(ctx, span, kv.Ascending); ; it.Next(ctx) {
			if !it.Valid() {
				return nil, it.Error()
			}
			replInfo, err := it.ReplicaInfo(ctx)
			if err != nil {
				return nil, err
			}
			desc := it.Desc()

			var trimmedSpan roachpb.Span
			if rspan.Key.Less(desc.StartKey) {
				trimmedSpan.Key = desc.StartKey.AsRawKey()
			} else {
				trimmedSpan.Key = span.Key
			}
			if desc.EndKey.Less(rspan.EndKey) {
				trimmedSpan.EndKey = desc.EndKey.AsRawKey()
			} else {
				trimmedSpan.EndKey = span.EndKey
			}

			nodeID := replInfo.NodeDesc.NodeID
			idx, ok := nodeMap[nodeID]
			if !ok {
				idx = len(splits)
				splits = append(splits, spanPartition{node: nodeID})
				nodeMap[nodeID] = idx
				if _, ok := planCtx.nodeAddresses[nodeID]; !ok {
					planCtx.nodeAddresses[nodeID] = replInfo.NodeDesc.Address.String()
				}
			}
			split := &splits[idx]

			if lastNodeID == nodeID {
				// Two consecutive ranges on the same node, merge the spans.
				if !split.spans[len(split.spans)-1].EndKey.Equal(trimmedSpan.Key) {
					log.Fatalf(ctx, "expected consecutive span pieces %v %v", split.spans, trimmedSpan)
				}
				split.spans[len(split.spans)-1].EndKey = trimmedSpan.EndKey
			} else {
				split.spans = append(split.spans, trimmedSpan)
			}

			lastNodeID = nodeID
			if !it.NeedAnother() {
				break
			}
		}
	}
	return splits, nil
}
Exemplo n.º 13
0
// TableStats is an endpoint that returns columns, indices, and other
// relevant details for the specified table.
func (s *adminServer) TableStats(
	ctx context.Context, req *serverpb.TableStatsRequest,
) (*serverpb.TableStatsResponse, error) {
	// Get table span.
	var tableSpan roachpb.Span
	iexecutor := sql.InternalExecutor{LeaseManager: s.server.leaseMgr}
	if err := s.server.db.Txn(ctx, func(txn *client.Txn) error {
		var err error
		tableSpan, err = iexecutor.GetTableSpan(s.getUser(req), txn, req.Database, req.Table)
		return err
	}); err != nil {
		return nil, s.serverError(err)
	}

	startKey, err := keys.Addr(tableSpan.Key)
	if err != nil {
		return nil, s.serverError(err)
	}
	endKey, err := keys.Addr(tableSpan.EndKey)
	if err != nil {
		return nil, s.serverError(err)
	}

	// Get current range descriptors for table. This is done by scanning over
	// meta2 keys for the range.
	rangeDescKVs, err := s.server.db.Scan(ctx, keys.RangeMetaKey(startKey), keys.RangeMetaKey(endKey), 0)
	if err != nil {
		return nil, s.serverError(err)
	}

	// Extract a list of node IDs from the response.
	nodeIDs := make(map[roachpb.NodeID]struct{})
	for _, kv := range rangeDescKVs {
		var rng roachpb.RangeDescriptor
		if err := kv.Value.GetProto(&rng); err != nil {
			return nil, s.serverError(err)
		}
		for _, repl := range rng.Replicas {
			nodeIDs[repl.NodeID] = struct{}{}
		}
	}

	// Construct TableStatsResponse by sending an RPC to every node involved.
	tableStatResponse := serverpb.TableStatsResponse{
		NodeCount:  int64(len(nodeIDs)),
		RangeCount: int64(len(rangeDescKVs)),
	}
	type nodeResponse struct {
		nodeID roachpb.NodeID
		resp   *serverpb.SpanStatsResponse
		err    error
	}

	// Send a SpanStats query to each node. Set a timeout on the context for
	// these queries.
	responses := make(chan nodeResponse)
	nodeCtx, cancel := context.WithTimeout(ctx, base.NetworkTimeout)
	defer cancel()
	for nodeID := range nodeIDs {
		nodeID := nodeID
		if err := s.server.stopper.RunAsyncTask(nodeCtx, func(ctx context.Context) {
			var spanResponse *serverpb.SpanStatsResponse
			client, err := s.server.status.dialNode(nodeID)
			if err == nil {
				req := serverpb.SpanStatsRequest{
					StartKey: startKey,
					EndKey:   endKey,
					NodeID:   nodeID.String(),
				}
				spanResponse, err = client.SpanStats(ctx, &req)
			}

			response := nodeResponse{
				nodeID: nodeID,
				resp:   spanResponse,
				err:    err,
			}
			select {
			case responses <- response:
				// Response processed.
			case <-ctx.Done():
				// Context completed, response no longer needed.
			}
		}); err != nil {
			return nil, err
		}
	}
	for remainingResponses := len(nodeIDs); remainingResponses > 0; remainingResponses-- {
		select {
		case resp := <-responses:
			// For nodes which returned an error, note that the node's data
			// is missing. For successful calls, aggregate statistics.
			if resp.err != nil {
				tableStatResponse.MissingNodes = append(
					tableStatResponse.MissingNodes,
					serverpb.TableStatsResponse_MissingNode{
						NodeID:       resp.nodeID.String(),
						ErrorMessage: resp.err.Error(),
					},
				)
			} else {
				tableStatResponse.Stats.Add(resp.resp.TotalStats)
				tableStatResponse.ReplicaCount += int64(resp.resp.RangeCount)
			}
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}

	return &tableStatResponse, nil
}
Exemplo n.º 14
0
// Test that a lease extension (a RequestLeaseRequest that doesn't change the
// lease holder) is not blocked by ongoing reads.
// The test relies on two things:
// 1) Lease extensions, unlike lease transfers, are not blocked by reads through their
// PostCommitTrigger.noConcurrentReads.
// 2) Requests with the non-KV flag, such as RequestLeaseRequest, do not
// go through the command queue.
func TestLeaseExtensionNotBlockedByRead(t *testing.T) {
	defer leaktest.AfterTest(t)()
	readBlocked := make(chan struct{})
	cmdFilter := func(fArgs storagebase.FilterArgs) *roachpb.Error {
		if fArgs.Hdr.UserPriority == 42 {
			// Signal that the read is blocked.
			readBlocked <- struct{}{}
			// Wait for read to be unblocked.
			<-readBlocked
		}
		return nil
	}
	srv, _, _ := serverutils.StartServer(t,
		base.TestServerArgs{
			Knobs: base.TestingKnobs{
				Store: &storage.StoreTestingKnobs{
					TestingCommandFilter: cmdFilter,
				},
			},
		})
	s := srv.(*server.TestServer)
	defer s.Stopper().Stop()

	// Start a read and wait for it to block.
	key := roachpb.Key("a")
	errChan := make(chan error)
	go func() {
		getReq := roachpb.GetRequest{
			Span: roachpb.Span{
				Key: key,
			},
		}
		if _, pErr := client.SendWrappedWith(context.Background(), s.DistSender(),
			roachpb.Header{UserPriority: 42},
			&getReq); pErr != nil {
			errChan <- pErr.GoError()
		}
	}()

	select {
	case err := <-errChan:
		t.Fatal(err)
	case <-readBlocked:
		// Send the lease request.
		rKey, err := keys.Addr(key)
		if err != nil {
			t.Fatal(err)
		}
		_, repDesc, err := s.Stores().LookupReplica(rKey, nil)
		if err != nil {
			t.Fatal(err)
		}
		leaseReq := roachpb.RequestLeaseRequest{
			Span: roachpb.Span{
				Key: key,
			},
			Lease: roachpb.Lease{
				Start:       s.Clock().Now(),
				StartStasis: s.Clock().Now().Add(time.Second.Nanoseconds(), 0),
				Expiration:  s.Clock().Now().Add(2*time.Second.Nanoseconds(), 0),
				Replica:     repDesc,
			},
		}
		if _, pErr := client.SendWrapped(context.Background(), s.DistSender(), &leaseReq); pErr != nil {
			t.Fatal(pErr)
		}
		// Unblock the read.
		readBlocked <- struct{}{}
	}
}
Exemplo n.º 15
0
func meta(k roachpb.RKey) (roachpb.RKey, error) {
	return keys.Addr(keys.RangeMetaKey(k))
}