コード例 #1
0
ファイル: db.go プロジェクト: csdigi/cockroach
// StoreData writes the supplied time series data to the cockroach server.
// Stored data will be sampled at the supplied resolution.
func (db *DB) StoreData(r Resolution, data []tspb.TimeSeriesData) error {
	var kvs []roachpb.KeyValue

	// Process data collection: data is converted to internal format, and a key
	// is generated for each internal message.
	for _, d := range data {
		idatas, err := d.ToInternal(r.KeyDuration(), r.SampleDuration())
		if err != nil {
			return err
		}
		for _, idata := range idatas {
			var value roachpb.Value
			if err := value.SetProto(&idata); err != nil {
				return err
			}
			kvs = append(kvs, roachpb.KeyValue{
				Key:   MakeDataKey(d.Name, d.Source, r, idata.StartTimestampNanos),
				Value: value,
			})
		}
	}

	// Send the individual internal merge requests.
	b := client.Batch{}
	for _, kv := range kvs {
		b.AddRawRequest(&roachpb.MergeRequest{
			Span: roachpb.Span{
				Key: kv.Key,
			},
			Value: kv.Value,
		})
	}

	return db.db.Run(&b)
}
コード例 #2
0
ファイル: metadata.go プロジェクト: mrtracy/cockroach
// GetInitialValues returns the set of initial K/V values which should be added to
// a bootstrapping CockroachDB cluster in order to create the tables contained
// in the schema.
func (ms MetadataSchema) GetInitialValues() []roachpb.KeyValue {
	var ret []roachpb.KeyValue

	// Save the ID generator value, which will generate descriptor IDs for user
	// objects.
	value := roachpb.Value{}
	value.SetInt(int64(keys.MaxReservedDescID + 1))
	ret = append(ret, roachpb.KeyValue{
		Key:   keys.DescIDGenerator,
		Value: value,
	})

	// addDescriptor generates the needed KeyValue objects to install a
	// descriptor on a new cluster.
	addDescriptor := func(parentID ID, desc descriptorProto) {
		// Create name metadata key.
		value := roachpb.Value{}
		value.SetInt(int64(desc.GetID()))
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeNameMetadataKey(parentID, desc.GetName()),
			Value: value,
		})

		// Create descriptor metadata key.
		value = roachpb.Value{}
		wrappedDesc := wrapDescriptor(desc)
		if err := value.SetProto(wrappedDesc); err != nil {
			log.Fatalf("could not marshal %v", desc)
		}
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeDescMetadataKey(desc.GetID()),
			Value: value,
		})
	}

	// Generate initial values for system databases and tables, which have
	// static descriptors that were generated elsewhere.
	for _, sysObj := range ms.descs {
		addDescriptor(sysObj.parentID, sysObj.desc)
	}

	for _, tbl := range ms.tables {
		dbID := ID(keys.SystemDatabaseID)
		desc := createTableDescriptor(tbl.id, dbID, tbl.definition, tbl.privileges)
		addDescriptor(dbID, &desc)
	}

	// Other key/value generation that doesn't fit into databases and
	// tables. This can be used to add initial entries to a table.
	ret = append(ret, ms.otherKV...)

	// Sort returned key values; this is valuable because it matches the way the
	// objects would be sorted if read from the engine.
	sort.Sort(roachpb.KeyValueByKey(ret))
	return ret
}
コード例 #3
0
ファイル: system.go プロジェクト: mbertschler/cockroach
// GetInitialSystemValues returns a list of key/value pairs.
// They are written at cluster bootstrap time (see storage/node.go:BootstrapCLuster).
func GetInitialSystemValues() []roachpb.KeyValue {
	systemData := []struct {
		parentID ID
		desc     descriptorProto
	}{
		{keys.RootNamespaceID, &SystemDB},
		{SystemDB.ID, &NamespaceTable},
		{SystemDB.ID, &DescriptorTable},
		{SystemDB.ID, &LeaseTable},
		{SystemDB.ID, &UsersTable},
		{SystemDB.ID, &ZonesTable},
	}

	// Initial kv pairs:
	// - ID generator
	// - 2 per table/database
	numEntries := 1 + len(systemData)*2
	ret := make([]roachpb.KeyValue, numEntries, numEntries)
	i := 0

	// Descriptor ID generator.
	value := roachpb.Value{}
	value.SetInt(int64(keys.MaxReservedDescID + 1))
	ret[i] = roachpb.KeyValue{
		Key:   keys.DescIDGenerator,
		Value: value,
	}
	i++

	// System database and tables.
	for _, d := range systemData {
		value = roachpb.Value{}
		value.SetInt(int64(d.desc.GetID()))
		ret[i] = roachpb.KeyValue{
			Key:   MakeNameMetadataKey(d.parentID, d.desc.GetName()),
			Value: value,
		}
		i++

		value = roachpb.Value{}
		desc := wrapDescriptor(d.desc)
		if err := value.SetProto(desc); err != nil {
			log.Fatalf("could not marshal %v", desc)
		}
		ret[i] = roachpb.KeyValue{
			Key:   MakeDescMetadataKey(d.desc.GetID()),
			Value: value,
		}
		i++
	}

	return ret
}
コード例 #4
0
func writeRandomTimeSeriesDataToRange(
	t testing.TB,
	store *storage.Store,
	rangeID roachpb.RangeID,
	keyPrefix []byte,
) (midpoint []byte) {
	src := rand.New(rand.NewSource(0))
	r := ts.Resolution10s
	for i := 0; i < 20; i++ {
		var data []tspb.TimeSeriesData
		for j := int64(0); j <= src.Int63n(5); j++ {
			d := tspb.TimeSeriesData{
				Name:   "test.random.metric",
				Source: "cpu01",
			}
			for k := int64(0); k <= src.Int63n(10); k++ {
				d.Datapoints = append(d.Datapoints, tspb.TimeSeriesDatapoint{
					TimestampNanos: src.Int63n(200) * r.KeyDuration(),
					Value:          src.Float64(),
				})
			}
			data = append(data, d)
		}
		for _, d := range data {
			idatas, err := d.ToInternal(r.KeyDuration(), r.SampleDuration())
			if err != nil {
				t.Fatal(err)
			}
			for _, idata := range idatas {
				var value roachpb.Value
				if err := value.SetProto(&idata); err != nil {
					t.Fatal(err)
				}
				mArgs := roachpb.MergeRequest{
					Span: roachpb.Span{
						Key: encoding.EncodeVarintAscending(keyPrefix, idata.StartTimestampNanos),
					},
					Value: value,
				}
				if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
					RangeID: rangeID,
				}, &mArgs); pErr != nil {
					t.Fatal(pErr)
				}
			}
		}
	}
	// Return approximate midway point (100 is midway between random timestamps in range [0,200)).
	midKey := append([]byte(nil), keyPrefix...)
	midKey = encoding.EncodeVarintAscending(midKey, 100*r.KeyDuration())
	return keys.MakeRowSentinelKey(midKey)
}
コード例 #5
0
ファイル: system.go プロジェクト: liugangnhm/cockroach
// Create the key/value pairs for the default zone config entry.
func createDefaultZoneConfig() []roachpb.KeyValue {
	var ret []roachpb.KeyValue
	value := roachpb.Value{}
	desc := config.DefaultZoneConfig()
	if err := value.SetProto(&desc); err != nil {
		log.Fatalf("could not marshal %v", desc)
	}
	ret = append(ret, roachpb.KeyValue{
		Key:   MakeZoneKey(keys.RootNamespaceID),
		Value: value,
	})
	return ret
}
コード例 #6
0
// BenchmarkMVCCMergeTimeSeries computes performance of merging time series data.
func BenchmarkMVCCMergeTimeSeries(b *testing.B) {
	ts := &roachpb.InternalTimeSeriesData{
		StartTimestampNanos: 0,
		SampleDurationNanos: 1000,
		Samples: []*roachpb.InternalTimeSeriesSample{
			{Offset: 0, Count: 1, Sum: 5.0},
		},
	}
	var value roachpb.Value
	if err := value.SetProto(ts); err != nil {
		b.Fatal(err)
	}
	runMVCCMerge(&value, 1024, b)
}
コード例 #7
0
// append the given entries to the raft log. Takes the previous values of
// r.mu.lastIndex and r.mu.raftLogSize, and returns new values. We do this
// rather than modifying them directly because these modifications need to be
// atomic with the commit of the batch.
func (r *Replica) append(
	ctx context.Context,
	batch engine.ReadWriter,
	prevLastIndex uint64,
	prevRaftLogSize int64,
	entries []raftpb.Entry,
) (uint64, int64, error) {
	if len(entries) == 0 {
		return prevLastIndex, prevRaftLogSize, nil
	}
	var diff enginepb.MVCCStats
	var value roachpb.Value
	for i := range entries {
		ent := &entries[i]
		key := keys.RaftLogKey(r.RangeID, ent.Index)
		if err := value.SetProto(ent); err != nil {
			return 0, 0, err
		}
		value.InitChecksum(key)
		var err error
		if ent.Index > prevLastIndex {
			err = engine.MVCCBlindPut(ctx, batch, &diff, key, hlc.ZeroTimestamp, value, nil /* txn */)
		} else {
			err = engine.MVCCPut(ctx, batch, &diff, key, hlc.ZeroTimestamp, value, nil /* txn */)
		}
		if err != nil {
			return 0, 0, err
		}
	}

	// Delete any previously appended log entries which never committed.
	lastIndex := entries[len(entries)-1].Index
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(ctx, batch, &diff, keys.RaftLogKey(r.RangeID, i),
			hlc.ZeroTimestamp, nil /* txn */)
		if err != nil {
			return 0, 0, err
		}
	}

	if err := setLastIndex(ctx, batch, r.RangeID, lastIndex); err != nil {
		return 0, 0, err
	}

	raftLogSize := prevRaftLogSize + diff.SysBytes

	return lastIndex, raftLogSize, nil
}
コード例 #8
0
ファイル: db.go プロジェクト: nporsche/cockroach
// StoreData writes the supplied time series data to the cockroach server.
// Stored data will be sampled at the supplied resolution.
func (db *DB) StoreData(r Resolution, data []TimeSeriesData) error {
	var kvs []roachpb.KeyValue

	// Process data collection: data is converted to internal format, and a key
	// is generated for each internal message.
	for _, d := range data {
		idatas, err := d.ToInternal(r.KeyDuration(), r.SampleDuration())
		if err != nil {
			return err
		}
		for _, idata := range idatas {
			var value roachpb.Value
			if err := value.SetProto(idata); err != nil {
				return err
			}
			kvs = append(kvs, roachpb.KeyValue{
				Key:   MakeDataKey(d.Name, d.Source, r, idata.StartTimestampNanos),
				Value: value,
			})
		}
	}

	// Send the individual internal merge requests.
	// TODO(mrtracy): In the likely event that there are multiple values to
	// merge, they should be batched together instead of being called
	// individually. However, BatchRequest currently does not support
	// MergeRequest, probably because it cannot be part of a
	// transaction. Look into batching this.
	for _, kv := range kvs {
		// Note, this looks like a batch, but isn't a batch because we only add a
		// single request to it.
		b := &client.Batch{}
		b.InternalAddRequest(&roachpb.MergeRequest{
			Span: roachpb.Span{
				Key: kv.Key,
			},
			Value: kv.Value,
		})
		if err := db.db.Run(b); err != nil {
			return err
		}
	}

	return nil
}
コード例 #9
0
ファイル: merge.go プロジェクト: GitGoldie/cockroach
// MergeInternalTimeSeriesData exports the engine's C++ merge logic for
// InternalTimeSeriesData to higher level packages. This is intended primarily
// for consumption by high level testing of time series functionality.
func MergeInternalTimeSeriesData(
	sources ...roachpb.InternalTimeSeriesData,
) (roachpb.InternalTimeSeriesData, error) {
	// Wrap each proto in an inlined MVCC value, and marshal each wrapped value
	// to bytes. This is the format required by the engine.
	srcBytes := make([][]byte, 0, len(sources))
	for _, src := range sources {
		var val roachpb.Value
		if err := val.SetProto(&src); err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		bytes, err := protoutil.Marshal(&MVCCMetadata{
			RawBytes: val.RawBytes,
		})
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		srcBytes = append(srcBytes, bytes)
	}

	// Merge every element into a nil byte slice, one at a time.
	var (
		mergedBytes []byte
		err         error
	)
	for _, bytes := range srcBytes {
		mergedBytes, err = goMerge(mergedBytes, bytes)
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
	}

	// Unmarshal merged bytes and extract the time series value within.
	var meta MVCCMetadata
	if err := proto.Unmarshal(mergedBytes, &meta); err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	mergedTS, err := meta.Value().GetTimeseries()
	if err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	return mergedTS, nil
}
コード例 #10
0
ファイル: db_test.go プロジェクト: chzyer-dev/cockroach
func (tm *testModel) storeInModel(r Resolution, data TimeSeriesData) {
	// Note the source, used to construct keys for model queries.
	tm.seenSources[data.Source] = struct{}{}

	// Process and store data in the model.
	internalData, err := data.ToInternal(r.KeyDuration(), r.SampleDuration())
	if err != nil {
		tm.t.Fatalf("test could not convert time series to internal format: %s", err.Error())
	}

	for _, idata := range internalData {
		key := MakeDataKey(data.Name, data.Source, r, idata.StartTimestampNanos)
		keyStr := string(key)

		existing, ok := tm.modelData[keyStr]
		var newTs roachpb.InternalTimeSeriesData
		if ok {
			existingTs, err := existing.GetTimeseries()
			if err != nil {
				tm.t.Fatalf("test could not extract time series from existing model value: %s", err.Error())
			}
			newTs, err = engine.MergeInternalTimeSeriesData(existingTs, idata)
			if err != nil {
				tm.t.Fatalf("test could not merge time series into model value: %s", err.Error())
			}
		} else {
			newTs, err = engine.MergeInternalTimeSeriesData(idata)
			if err != nil {
				tm.t.Fatalf("test could not merge time series into model value: %s", err.Error())
			}
		}
		var val roachpb.Value
		if err := val.SetProto(&newTs); err != nil {
			tm.t.Fatal(err)
		}
		tm.modelData[keyStr] = val
	}
}
コード例 #11
0
ファイル: merge_test.go プロジェクト: mbertschler/cockroach
// timeSeries generates a simple InternalTimeSeriesData object which starts
// at the given timestamp and has samples of the given duration.
func timeSeries(start int64, duration int64, samples ...tsSample) []byte {
	ts := &roachpb.InternalTimeSeriesData{
		StartTimestampNanos: start,
		SampleDurationNanos: duration,
	}
	for _, sample := range samples {
		newSample := &roachpb.InternalTimeSeriesSample{
			Offset: sample.offset,
			Count:  sample.count,
			Sum:    sample.sum,
		}
		if sample.count > 1 {
			newSample.Max = proto.Float64(sample.max)
			newSample.Min = proto.Float64(sample.min)
		}
		ts.Samples = append(ts.Samples, newSample)
	}
	var v roachpb.Value
	if err := v.SetProto(ts); err != nil {
		panic(err)
	}
	return mustMarshal(&MVCCMetadata{Value: &v})
}
コード例 #12
0
ファイル: util.go プロジェクト: GitGoldie/cockroach
// marshalValue returns a roachpb.Value initialized from the source
// interface{}, returning an error if the types are not compatible.
func marshalValue(v interface{}) (roachpb.Value, error) {
	var r roachpb.Value

	// Handle a few common types via a type switch.
	switch t := v.(type) {
	case *roachpb.Value:
		return *t, nil

	case nil:
		return r, nil

	case bool:
		i := int64(0)
		if t {
			i = 1
		}
		r.SetInt(i)
		return r, nil

	case string:
		r.SetBytes([]byte(t))
		return r, nil

	case []byte:
		r.SetBytes(t)
		return r, nil

	case inf.Dec:
		err := r.SetDecimal(&t)
		return r, err

	case roachpb.Key:
		r.SetBytes([]byte(t))
		return r, nil

	case time.Time:
		r.SetTime(t)
		return r, nil

	case duration.Duration:
		err := r.SetDuration(t)
		return r, err

	case proto.Message:
		err := r.SetProto(t)
		return r, err
	}

	// Handle all of the Go primitive types besides struct and pointers. This
	// switch also handles types based on a primitive type (e.g. "type MyInt
	// int").
	switch v := reflect.ValueOf(v); v.Kind() {
	case reflect.Bool:
		i := int64(0)
		if v.Bool() {
			i = 1
		}
		r.SetInt(i)
		return r, nil

	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
		r.SetInt(v.Int())
		return r, nil

	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
		r.SetInt(int64(v.Uint()))
		return r, nil

	case reflect.Float32, reflect.Float64:
		r.SetFloat(v.Float())
		return r, nil

	case reflect.String:
		r.SetBytes([]byte(v.String()))
		return r, nil
	}

	return r, fmt.Errorf("unable to marshal %T: %v", v, v)
}
コード例 #13
0
ファイル: metadata.go プロジェクト: kaustubhkurve/cockroach
// GetInitialValues returns the set of initial K/V values which should be added to
// a bootstrapping CockroachDB cluster in order to create the tables contained
// in the schema.
func (ms MetadataSchema) GetInitialValues() []roachpb.KeyValue {
	var ret []roachpb.KeyValue

	// Save the ID generator value, which will generate descriptor IDs for user
	// objects.
	value := roachpb.Value{}
	value.SetInt(int64(keys.MaxReservedDescID + 1))
	ret = append(ret, roachpb.KeyValue{
		Key:   keys.DescIDGenerator,
		Value: value,
	})

	// addDescriptor generates the needed KeyValue objects to install a
	// descriptor on a new cluster.
	addDescriptor := func(parentID ID, desc descriptorProto) {
		// Create name metadata key.
		value := roachpb.Value{}
		value.SetInt(int64(desc.GetID()))
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeNameMetadataKey(parentID, desc.GetName()),
			Value: value,
		})

		// Create descriptor metadata key.
		value = roachpb.Value{}
		wrappedDesc := wrapDescriptor(desc)
		if err := value.SetProto(wrappedDesc); err != nil {
			log.Fatalf("could not marshal %v", desc)
		}
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeDescMetadataKey(desc.GetID()),
			Value: value,
		})
	}

	// Generate initial values for system databases and tables, which have
	// static descriptors that were generated elsewhere.
	for _, sysObj := range ms.systemObjects {
		addDescriptor(sysObj.parentID, sysObj.desc)
	}

	// Descriptor IDs for non-system databases and objects will be generated
	// sequentially within the non-system reserved range.
	initialDescID := keys.MaxSystemDescID + 1
	nextID := func() ID {
		next := initialDescID
		initialDescID++
		return ID(next)
	}

	// Generate initial values for non-system metadata tables, which do not need
	// well-known IDs.
	for _, db := range ms.databases {
		dbID := nextID()
		addDescriptor(keys.RootNamespaceID, &DatabaseDescriptor{
			Name:       db.name,
			ID:         dbID,
			Privileges: db.privileges,
		})

		for _, tbl := range db.tables {
			desc := createTableDescriptor(nextID(), dbID, tbl.definition, tbl.privileges)
			addDescriptor(dbID, &desc)
		}
	}

	// Sort returned key values; this is valuable because it matches the way the
	// objects would be sorted if read from the engine.
	sort.Sort(roachpb.KeyValueByKey(ret))
	return ret
}
コード例 #14
0
func TestValidateCrossTableReferences(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	tests := []struct {
		err        string
		desc       TableDescriptor
		referenced []TableDescriptor
	}{
		// Foreign keys
		{
			err: `invalid foreign key: missing table=52 index=2: descriptor not found`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:         1,
					ForeignKey: ForeignKeyReference{Table: 52, Index: 2},
				},
			},
			referenced: nil,
		},
		{
			err: `invalid foreign key: missing table=baz index=2: index-id "2" does not exist`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:         1,
					ForeignKey: ForeignKeyReference{Table: 52, Index: 2},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
			}},
		},
		{
			err: `missing fk back reference to foo.bar from baz.qux`,
			desc: TableDescriptor{
				ID:   51,
				Name: "foo",
				PrimaryIndex: IndexDescriptor{
					ID:         1,
					Name:       "bar",
					ForeignKey: ForeignKeyReference{Table: 52, Index: 2},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
				PrimaryIndex: IndexDescriptor{
					ID:   2,
					Name: "qux",
				},
			}},
		},
		{
			err: `invalid fk backreference table=52 index=2: descriptor not found`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:           1,
					ReferencedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
		},
		{
			err: `invalid fk backreference table=baz index=2: index-id "2" does not exist`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:           1,
					ReferencedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
			}},
		},
		{
			err: `broken fk backward reference from foo.bar to baz.qux`,
			desc: TableDescriptor{
				ID:   51,
				Name: "foo",
				PrimaryIndex: IndexDescriptor{
					ID:           1,
					Name:         "bar",
					ReferencedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
				PrimaryIndex: IndexDescriptor{
					ID:   2,
					Name: "qux",
				},
			}},
		},

		// Interleaves
		{
			err: `invalid interleave: missing table=52 index=2: descriptor not found`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID: 1,
					Interleave: InterleaveDescriptor{Ancestors: []InterleaveDescriptor_Ancestor{
						{TableID: 52, IndexID: 2},
					}},
				},
			},
			referenced: nil,
		},
		{
			err: `invalid interleave: missing table=baz index=2: index-id "2" does not exist`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID: 1,
					Interleave: InterleaveDescriptor{Ancestors: []InterleaveDescriptor_Ancestor{
						{TableID: 52, IndexID: 2},
					}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
			}},
		},
		{
			err: `missing interleave back reference to foo.bar from baz.qux`,
			desc: TableDescriptor{
				ID:   51,
				Name: "foo",
				PrimaryIndex: IndexDescriptor{
					ID:   1,
					Name: "bar",
					Interleave: InterleaveDescriptor{Ancestors: []InterleaveDescriptor_Ancestor{
						{TableID: 52, IndexID: 2},
					}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
				PrimaryIndex: IndexDescriptor{
					ID:   2,
					Name: "qux",
				},
			}},
		},
		{
			err: `invalid interleave backreference table=52 index=2: descriptor not found`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:            1,
					InterleavedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
		},
		{
			err: `invalid interleave backreference table=baz index=2: index-id "2" does not exist`,
			desc: TableDescriptor{
				ID: 51,
				PrimaryIndex: IndexDescriptor{
					ID:            1,
					InterleavedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
			}},
		},
		{
			err: `broken interleave backward reference from foo.bar to baz.qux`,
			desc: TableDescriptor{
				ID:   51,
				Name: "foo",
				PrimaryIndex: IndexDescriptor{
					ID:            1,
					Name:          "bar",
					InterleavedBy: []ForeignKeyReference{{Table: 52, Index: 2}},
				},
			},
			referenced: []TableDescriptor{{
				ID:   52,
				Name: "baz",
				PrimaryIndex: IndexDescriptor{
					ID:   2,
					Name: "qux",
				},
			}},
		},
	}

	for i, test := range tests {
		for _, referencedDesc := range test.referenced {
			var v roachpb.Value
			desc := &Descriptor{Union: &Descriptor_Table{Table: &referencedDesc}}
			if err := v.SetProto(desc); err != nil {
				t.Fatal(err)
			}
			if err := kvDB.Put(MakeDescMetadataKey(referencedDesc.ID), &v); err != nil {
				t.Fatal(err)
			}
		}
		txn := client.NewTxn(context.Background(), *kvDB)
		if err := test.desc.validateCrossReferences(txn); err == nil {
			t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc)
		} else if test.err != err.Error() {
			t.Errorf("%d: expected \"%s\", but found \"%s\"", i, test.err, err.Error())
		}
		for _, referencedDesc := range test.referenced {
			if err := kvDB.Del(MakeDescMetadataKey(referencedDesc.ID)); err != nil {
				t.Fatal(err)
			}
		}
	}
}