Exemple #1
0
func mustMarshal(m proto.Message) []byte {
	b, err := protoutil.Marshal(m)
	if err != nil {
		panic(err)
	}
	return b
}
Exemple #2
0
// AddInfoProto adds or updates an info object. Returns an error if info
// couldn't be added.
func (g *Gossip) AddInfoProto(key string, msg proto.Message, ttl time.Duration) error {
	bytes, err := protoutil.Marshal(msg)
	if err != nil {
		return err
	}
	return g.AddInfo(key, bytes, ttl)
}
func TestBelowRaftProtos(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// Enable the additional checks in TestMain. NB: running this test by itself
	// will fail those extra checks - such failures are safe to ignore, so long
	// as this test passes when run with the entire package's tests.
	verifyBelowRaftProtos = true

	slice := make([]byte, 1<<20)
	for typ, fix := range belowRaftGoldenProtos {
		if b, err := protoutil.Marshal(reflect.New(typ.Elem()).Interface().(proto.Message)); err != nil {
			t.Fatal(err)
		} else if err := verifyHash(b, fix.emptySum); err != nil {
			t.Errorf("%s (empty): %s\n", typ, err)
		}

		randGen := rand.New(rand.NewSource(goldenSeed))

		bytes := slice
		numBytes := 0
		for i := 0; i < itersPerProto; i++ {
			if n, err := marshalTo(fix.populatedConstructor(randGen), bytes); err != nil {
				t.Fatal(err)
			} else {
				bytes = bytes[n:]
				numBytes += n
			}
		}
		if err := verifyHash(slice[:numBytes], fix.populatedSum); err != nil {
			t.Errorf("%s (populated): %s\n", typ, err)
		}
	}
}
Exemple #4
0
// Encode implements gwruntime.Marshaler.
func (e *protoEncoder) Encode(v interface{}) error {
	if p, ok := v.(proto.Message); ok {
		bytes, err := protoutil.Marshal(p)
		if err == nil {
			_, err = e.w.Write(bytes)
		}
		return err
	}
	return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
}
Exemple #5
0
// PutProto sets the given key to the protobuf-serialized byte string
// of msg and the provided timestamp. Returns the length in bytes of
// key and the value.
func PutProto(engine Writer, key MVCCKey, msg proto.Message) (keyBytes, valBytes int64, err error) {
	bytes, err := protoutil.Marshal(msg)
	if err != nil {
		return 0, 0, err
	}

	if err := engine.Put(key, bytes); err != nil {
		return 0, 0, err
	}

	return int64(key.EncodedSize()), int64(len(bytes)), nil
}
Exemple #6
0
// SetProto encodes the specified proto message into the bytes field of the
// receiver and clears the checksum. If the proto message is an
// InternalTimeSeriesData, the tag will be set to TIMESERIES rather than BYTES.
func (v *Value) SetProto(msg proto.Message) error {
	data, err := protoutil.Marshal(msg)
	if err != nil {
		return err
	}
	v.SetBytes(data)
	// Special handling for timeseries data.
	if _, ok := msg.(*InternalTimeSeriesData); ok {
		v.setTag(ValueType_TIMESERIES)
	}
	return nil
}
// TestStoreRangeSplitAtTablePrefix verifies a range can be split at
// UserTableDataMin and still gossip the SystemConfig properly.
func TestStoreRangeSplitAtTablePrefix(t *testing.T) {
	defer leaktest.AfterTest(t)()
	sCtx := storage.TestStoreContext()
	sCtx.TestingKnobs.DisableSplitQueue = true
	store, stopper, _ := createTestStoreWithContext(t, sCtx)
	defer stopper.Stop()

	key := keys.MakeRowSentinelKey(append([]byte(nil), keys.UserTableDataMin...))
	args := adminSplitArgs(key, key)
	if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
		t.Fatalf("%q: split unexpected error: %s", key, pErr)
	}

	var desc sqlbase.TableDescriptor
	descBytes, err := protoutil.Marshal(&desc)
	if err != nil {
		t.Fatal(err)
	}

	// Update SystemConfig to trigger gossip.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemConfigTrigger()
		// We don't care about the values, just the keys.
		k := sqlbase.MakeDescMetadataKey(sqlbase.ID(keys.MaxReservedDescID + 1))
		return txn.Put(k, &desc)
	}); err != nil {
		t.Fatal(err)
	}

	successChan := make(chan struct{}, 1)
	store.Gossip().RegisterCallback(gossip.KeySystemConfig, func(_ string, content roachpb.Value) {
		contentBytes, err := content.GetBytes()
		if err != nil {
			t.Fatal(err)
		}
		if bytes.Contains(contentBytes, descBytes) {
			select {
			case successChan <- struct{}{}:
			default:
			}
		}
	})

	select {
	case <-time.After(time.Second):
		t.Errorf("expected a schema gossip containing %q, but did not see one", descBytes)
	case <-successChan:
	}
}
Exemple #8
0
func TestBatchProto(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := NewInMem(roachpb.Attributes{}, 1<<20, stopper)

	b := e.NewBatch()
	defer b.Close()

	val := roachpb.MakeValueFromString("value")
	if _, _, err := PutProto(b, mvccKey("proto"), &val); err != nil {
		t.Fatal(err)
	}
	getVal := &roachpb.Value{}
	ok, keySize, valSize, err := b.GetProto(mvccKey("proto"), getVal)
	if !ok || err != nil {
		t.Fatalf("expected GetProto to success ok=%t: %s", ok, err)
	}
	if keySize != 6 {
		t.Errorf("expected key size 6; got %d", keySize)
	}
	data, err := protoutil.Marshal(&val)
	if err != nil {
		t.Fatal(err)
	}
	if valSize != int64(len(data)) {
		t.Errorf("expected value size %d; got %d", len(data), valSize)
	}
	if !proto.Equal(getVal, &val) {
		t.Errorf("expected %v; got %v", &val, getVal)
	}
	// Before commit, proto will not be available via engine.
	if ok, _, _, err := e.GetProto(mvccKey("proto"), getVal); ok || err != nil {
		t.Fatalf("expected GetProto to fail ok=%t: %s", ok, err)
	}
	// Commit and verify the proto can be read directly from the engine.
	if err := b.Commit(); err != nil {
		t.Fatal(err)
	}
	if ok, _, _, err := e.GetProto(mvccKey("proto"), getVal); !ok || err != nil {
		t.Fatalf("expected GetProto to success ok=%t: %s", ok, err)
	}
	if !proto.Equal(getVal, &val) {
		t.Errorf("expected %v; got %v", &val, getVal)
	}
}
Exemple #9
0
// MergeInternalTimeSeriesData exports the engine's C++ merge logic for
// InternalTimeSeriesData to higher level packages. This is intended primarily
// for consumption by high level testing of time series functionality.
func MergeInternalTimeSeriesData(
	sources ...roachpb.InternalTimeSeriesData,
) (roachpb.InternalTimeSeriesData, error) {
	// Wrap each proto in an inlined MVCC value, and marshal each wrapped value
	// to bytes. This is the format required by the engine.
	srcBytes := make([][]byte, 0, len(sources))
	for _, src := range sources {
		var val roachpb.Value
		if err := val.SetProto(&src); err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		bytes, err := protoutil.Marshal(&MVCCMetadata{
			RawBytes: val.RawBytes,
		})
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		srcBytes = append(srcBytes, bytes)
	}

	// Merge every element into a nil byte slice, one at a time.
	var (
		mergedBytes []byte
		err         error
	)
	for _, bytes := range srcBytes {
		mergedBytes, err = goMerge(mergedBytes, bytes)
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
	}

	// Unmarshal merged bytes and extract the time series value within.
	var meta MVCCMetadata
	if err := proto.Unmarshal(mergedBytes, &meta); err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	mergedTS, err := meta.Value().GetTimeseries()
	if err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	return mergedTS, nil
}
func doHTTPReq(t *testing.T, client http.Client, method, url string, body proto.Message) (*http.Response, error) {
	var b io.Reader
	if body != nil {
		buf, err := protoutil.Marshal(body)
		if err != nil {
			t.Fatal(err)
		}
		b = bytes.NewReader(buf)
	}
	req, err := http.NewRequest(method, url, b)
	if err != nil {
		t.Fatalf("%s %s: error building request: %s", method, url, err)
	}
	if b != nil {
		req.Header.Add(util.ContentTypeHeader, util.ProtoContentType)
	}

	return client.Do(req)
}
func (t *parallelTest) setup(spec *parTestSpec) {
	if spec.ClusterSize == 0 {
		spec.ClusterSize = 1
	}

	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize)
	}

	args := base.TestClusterArgs{
		ServerArgs: base.TestServerArgs{
			MaxOffset: logicMaxOffset,
			Knobs: base.TestingKnobs{
				SQLExecutor: &sql.ExecutorTestingKnobs{
					WaitForGossipUpdate:   true,
					CheckStmtStringChange: true,
				},
			},
		},
	}
	t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args)
	t.clients = make([][]*gosql.DB, spec.ClusterSize)
	for i := range t.clients {
		t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i))
	}
	r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0])

	if spec.RangeSplitSize != 0 {
		if testing.Verbose() || log.V(1) {
			log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize)
		}
		zoneCfg := config.DefaultZoneConfig()
		zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize)
		zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2
		buf, err := protoutil.Marshal(&zoneCfg)
		if err != nil {
			t.Fatal(err)
		}
		objID := keys.RootNamespaceID
		r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf)
	}

	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Creating database")
	}

	r0.Exec("CREATE DATABASE test")
	for i := range t.clients {
		sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test")
	}

	if spec.ClusterSize >= 3 {
		if testing.Verbose() || log.V(1) {
			log.Infof(t.ctx, "Waiting for full replication")
		}
		if err := t.cluster.WaitForFullReplication(); err != nil {
			t.Fatal(err)
		}
	}
	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Test setup done")
	}
}
func snapshot(
	ctx context.Context,
	snap engine.Reader,
	rangeID roachpb.RangeID,
	eCache *raftEntryCache,
	startKey roachpb.RKey,
) (raftpb.Snapshot, error) {
	start := timeutil.Now()
	var snapData roachpb.RaftSnapshotData

	truncState, err := loadTruncatedState(ctx, snap, rangeID)
	if err != nil {
		return raftpb.Snapshot{}, err
	}
	firstIndex := truncState.Index + 1

	// Read the range metadata from the snapshot instead of the members
	// of the Range struct because they might be changed concurrently.
	appliedIndex, _, err := loadAppliedIndex(ctx, snap, rangeID)
	if err != nil {
		return raftpb.Snapshot{}, err
	}

	var desc roachpb.RangeDescriptor
	// We ignore intents on the range descriptor (consistent=false) because we
	// know they cannot be committed yet; operations that modify range
	// descriptors resolve their own intents when they commit.
	ok, err := engine.MVCCGetProto(ctx, snap, keys.RangeDescriptorKey(startKey),
		hlc.MaxTimestamp, false /* !consistent */, nil, &desc)
	if err != nil {
		return raftpb.Snapshot{}, errors.Errorf("failed to get desc: %s", err)
	}
	if !ok {
		return raftpb.Snapshot{}, errors.Errorf("couldn't find range descriptor")
	}

	// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
	snapData.RangeDescriptor = desc

	// Iterate over all the data in the range, including local-only data like
	// the sequence cache.
	iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
	defer iter.Close()
	var alloc bufalloc.ByteAllocator
	for ; iter.Valid(); iter.Next() {
		var key engine.MVCCKey
		var value []byte
		alloc, key, value = iter.allocIterKeyValue(alloc)
		snapData.KV = append(snapData.KV,
			roachpb.RaftSnapshotData_KeyValue{
				Key:       key.Key,
				Value:     value,
				Timestamp: key.Timestamp,
			})
	}

	endIndex := appliedIndex + 1
	snapData.LogEntries = make([][]byte, 0, endIndex-firstIndex)

	scanFunc := func(kv roachpb.KeyValue) (bool, error) {
		bytes, err := kv.Value.GetBytes()
		if err == nil {
			snapData.LogEntries = append(snapData.LogEntries, bytes)
		}
		return false, err
	}

	if err := iterateEntries(ctx, snap, rangeID, firstIndex, endIndex, scanFunc); err != nil {
		return raftpb.Snapshot{}, err
	}

	data, err := protoutil.Marshal(&snapData)
	if err != nil {
		return raftpb.Snapshot{}, err
	}

	// Synthesize our raftpb.ConfState from desc.
	var cs raftpb.ConfState
	for _, rep := range desc.Replicas {
		cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
	}

	term, err := term(ctx, snap, rangeID, eCache, appliedIndex)
	if err != nil {
		return raftpb.Snapshot{}, errors.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
	}

	log.Infof(ctx, "generated snapshot for range %s at index %d in %s. encoded size=%d, %d KV pairs, %d log entries",
		rangeID, appliedIndex, timeutil.Since(start), len(data), len(snapData.KV), len(snapData.LogEntries))

	return raftpb.Snapshot{
		Data: data,
		Metadata: raftpb.SnapshotMetadata{
			Index:     appliedIndex,
			Term:      term,
			ConfState: cs,
		},
	}, nil
}
Exemple #13
0
// MarshalResponse examines the request Accept header to determine the
// client's preferred response encoding. Supported content types
// include JSON, protobuf, and YAML. If the Accept header is not
// available, the Content-Type header specifying the request encoding
// is used. The value parameter is marshalled using the response
// encoding and the resulting body and content type are returned. If
// the encoding could not be determined by either header, the response
// is marshalled using JSON. Falls back to JSON when the protobuf format
// cannot be used for the given value.
func MarshalResponse(r *http.Request, value interface{}, allowed []EncodingType) (
	body []byte, contentType string, err error) {
	// TODO(spencer): until there's a nice (free) way to parse the
	//   Accept header and properly use the request's preference for a
	//   content type, we simply find out which of "json", "protobuf" or
	//   "yaml" appears first in the Accept header. If neither do, we
	//   default to JSON.
	jsonIdx := int32(math.MaxInt32)
	protoIdx := int32(math.MaxInt32)
	yamlIdx := int32(math.MaxInt32)

	accept := r.Header.Get(AcceptHeader)
	if isAllowed(JSONEncoding, allowed) {
		jsonIdx = getEncodingIndex("json", accept)
	}
	if isAllowed(ProtoEncoding, allowed) {
		protoIdx = getEncodingIndex("protobuf", accept)
	}
	if isAllowed(YAMLEncoding, allowed) {
		yamlIdx = getEncodingIndex("yaml", accept)
	}

	if jsonIdx == math.MaxInt32 && yamlIdx == math.MaxInt32 && protoIdx == math.MaxInt32 {
		switch GetContentType(r) {
		case JSONContentType, AltJSONContentType:
			if isAllowed(JSONEncoding, allowed) {
				jsonIdx = 0
			}
		case ProtoContentType, AltProtoContentType:
			if isAllowed(ProtoEncoding, allowed) {
				protoIdx = 0
			}
		case YAMLContentType, AltYAMLContentType:
			if isAllowed(YAMLEncoding, allowed) {
				yamlIdx = 0
			}
		}
	}

	// Reset protoIdx if value cannot be converted to a protocol message
	if protoIdx < math.MaxInt32 {
		if _, ok := value.(proto.Message); !ok {
			protoIdx = int32(math.MaxInt32)
		}
	}

	if protoIdx < jsonIdx && protoIdx < yamlIdx {
		// Protobuf-encode the config.
		contentType = ProtoContentType
		if body, err = protoutil.Marshal(value.(proto.Message)); err != nil {
			err = Errorf("unable to marshal %+v to protobuf: %s", value, err)
		}
	} else if yamlIdx < jsonIdx && yamlIdx < protoIdx {
		// YAML-encode the config.
		contentType = YAMLContentType
		if body, err = yaml.Marshal(value); err != nil {
			err = Errorf("unable to marshal %+v to yaml: %s", value, err)
		} else {
			body = sanitizeYAML(body)
		}
	} else {
		// Always fall back to JSON-encode the config.
		contentType = JSONContentType
		switch reflect.ValueOf(value).Kind() {
		case reflect.Array, reflect.Slice:
			value = jsonWrapper{Data: value}
		}
		if body, err = json.MarshalIndent(value, "", "  "); err != nil {
			err = Errorf("unable to marshal %+v to json: %s", value, err)
		}
	}
	return
}
Exemple #14
0
func TestDropDatabase(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB := setup(t)
	defer cleanup(s, sqlDB)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR);
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
		t.Fatal(err)
	}

	dbNameKey := sqlbase.MakeNameMetadataKey(keys.RootNamespaceID, "t")
	r, pErr := kvDB.Get(dbNameKey)
	if pErr != nil {
		t.Fatal(pErr)
	}
	if !r.Exists() {
		t.Fatalf(`database "t" does not exist`)
	}
	dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt()))
	desc := &sqlbase.Descriptor{}
	if pErr := kvDB.GetProto(dbDescKey, desc); pErr != nil {
		t.Fatal(pErr)
	}
	dbDesc := desc.GetDatabase()

	tbNameKey := sqlbase.MakeNameMetadataKey(dbDesc.ID, "kv")
	gr, pErr := kvDB.Get(tbNameKey)
	if pErr != nil {
		t.Fatal(pErr)
	}
	if !gr.Exists() {
		t.Fatalf(`table "kv" does not exist`)
	}
	tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
	if pErr := kvDB.GetProto(tbDescKey, desc); pErr != nil {
		t.Fatal(pErr)
	}
	tbDesc := desc.GetTable()

	// Add a zone config for both the table and database.
	cfg := config.DefaultZoneConfig()
	buf, err := protoutil.Marshal(&cfg)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.ID, buf); err != nil {
		t.Fatal(err)
	}

	tbZoneKey := sqlbase.MakeZoneKey(tbDesc.ID)
	dbZoneKey := sqlbase.MakeZoneKey(dbDesc.ID)
	if gr, err := kvDB.Get(tbZoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("table zone config entry not found")
	}
	if gr, err := kvDB.Get(dbZoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("database zone config entry not found")
	}

	tablePrefix := keys.MakeTablePrefix(uint32(tbDesc.ID))
	tableStartKey := roachpb.Key(tablePrefix)
	tableEndKey := tableStartKey.PrefixEnd()
	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 6; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if _, err := sqlDB.Exec(`DROP DATABASE t`); err != nil {
		t.Fatal(err)
	}

	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 0; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if gr, err := kvDB.Get(tbDescKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor still exists after database is dropped: %q", tbDescKey)
	}

	if gr, err := kvDB.Get(tbNameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor key still exists after database is dropped")
	}

	if gr, err := kvDB.Get(dbDescKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database descriptor still exists after database is dropped")
	}

	if gr, err := kvDB.Get(dbNameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database descriptor key still exists after database is dropped")
	}

	if gr, err := kvDB.Get(tbZoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table zone config entry still exists after the database is dropped")
	}

	if gr, err := kvDB.Get(dbZoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database zone config entry still exists after the database is dropped")
	}
}
Exemple #15
0
func TestDropTable(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB := setup(t)
	defer cleanup(s, sqlDB)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR);
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
		t.Fatal(err)
	}

	nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv")
	gr, pErr := kvDB.Get(nameKey)
	if pErr != nil {
		t.Fatal(pErr)
	}

	if !gr.Exists() {
		t.Fatalf("Name entry %q does not exist", nameKey)
	}

	descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
	desc := &sqlbase.Descriptor{}
	if pErr := kvDB.GetProto(descKey, desc); pErr != nil {
		t.Fatal(pErr)
	}
	tableDesc := desc.GetTable()

	// Add a zone config for the table.
	cfg := config.DefaultZoneConfig()
	buf, err := protoutil.Marshal(&cfg)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil {
		t.Fatal(err)
	}

	zoneKey := sqlbase.MakeZoneKey(tableDesc.ID)
	if gr, err := kvDB.Get(zoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("zone config entry not found")
	}

	tablePrefix := keys.MakeTablePrefix(uint32(tableDesc.ID))
	tableStartKey := roachpb.Key(tablePrefix)
	tableEndKey := tableStartKey.PrefixEnd()
	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 6; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil {
		t.Fatal(err)
	}

	// Test that deleted table cannot be used. This prevents regressions where
	// name -> descriptor ID caches might make this statement erronously work.
	if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) {
		t.Fatalf("different error than expected: %s", err)
	}

	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 0; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if gr, err := kvDB.Get(descKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(nameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table namekey still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(zoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("zone config entry still exists after the table is dropped")
	}
}
Exemple #16
0
func init() {
	var err error
	if protobufConfig, err = protoutil.Marshal(&testConfig); err != nil {
		log.Fatalf("unable to marshal test config %+v: %s", testConfig, err)
	}
}
Exemple #17
0
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// Disable splitting. We're using bad attributes in zone configs
	// to be able to match.
	defer config.TestingDisableTableSplits()()
	s, sqlDB, _ := setup(t)
	defer cleanup(s, sqlDB)

	expectedCounter := uint32(keys.MaxReservedDescID + 1)

	defaultZoneConfig := config.DefaultZoneConfig()
	defaultZoneConfig.RangeMinBytes = 1 << 20
	defaultZoneConfig.RangeMaxBytes = 1 << 20
	defaultZoneConfig.GC.TTLSeconds = 60

	{
		buf, err := protoutil.Marshal(&defaultZoneConfig)
		if err != nil {
			t.Fatal(err)
		}
		objID := keys.RootNamespaceID
		if _, err = sqlDB.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf); err != nil {
			t.Fatalf("problem writing zone %+v: %s", defaultZoneConfig, err)
		}
	}

	// Naming scheme for database and tables:
	// db1 has tables tb11 and tb12
	// db2 has tables tb21 and tb22

	db1 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	db2 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb11 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb12 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb21 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb22 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	cfg := forceNewConfig(t, s)

	// We have no custom zone configs.
	testCases := []struct {
		key     roachpb.RKey
		zoneCfg *config.ZoneConfig
	}{
		{roachpb.RKeyMin, &defaultZoneConfig},
		{keys.MakeTablePrefix(0), &defaultZoneConfig},
		{keys.MakeTablePrefix(1), &defaultZoneConfig},
		{keys.MakeTablePrefix(keys.MaxReservedDescID), &defaultZoneConfig},
		{keys.MakeTablePrefix(db1), &defaultZoneConfig},
		{keys.MakeTablePrefix(db2), &defaultZoneConfig},
		{keys.MakeTablePrefix(tb11), &defaultZoneConfig},
		{keys.MakeTablePrefix(tb12), &defaultZoneConfig},
		{keys.MakeTablePrefix(tb21), &defaultZoneConfig},
		{keys.MakeTablePrefix(tb22), &defaultZoneConfig},
	}

	for tcNum, tc := range testCases {
		zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
		if err != nil {
			t.Fatalf("#%d: err=%s", tcNum, err)
		}

		if !reflect.DeepEqual(zoneCfg, tc.zoneCfg) {
			t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
		}
	}

	// Now set some zone configs. We don't have a nice way of using table
	// names for this, so we do raw puts.
	// Here is the list of dbs/tables and whether they have a custom zone config:
	// db1: true
	//   tb1: true
	//   tb2: false
	// db1: false
	//   tb1: true
	//   tb2: false
	db1Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db1"}}}}
	tb11Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db1.tb1"}}}}
	tb21Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db2.tb1"}}}}
	for objID, objZone := range map[uint32]config.ZoneConfig{
		db1:  db1Cfg,
		tb11: tb11Cfg,
		tb21: tb21Cfg,
	} {
		buf, err := protoutil.Marshal(&objZone)
		if err != nil {
			t.Fatal(err)
		}
		if _, err = sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, objID, buf); err != nil {
			t.Fatalf("problem writing zone %+v: %s", objZone, err)
		}
	}

	cfg = forceNewConfig(t, s)

	testCases = []struct {
		key     roachpb.RKey
		zoneCfg *config.ZoneConfig
	}{
		{roachpb.RKeyMin, &defaultZoneConfig},
		{keys.MakeTablePrefix(0), &defaultZoneConfig},
		{keys.MakeTablePrefix(1), &defaultZoneConfig},
		{keys.MakeTablePrefix(keys.MaxReservedDescID), &defaultZoneConfig},
		{keys.MakeTablePrefix(db1), &db1Cfg},
		{keys.MakeTablePrefix(db2), &defaultZoneConfig},
		{keys.MakeTablePrefix(tb11), &tb11Cfg},
		{keys.MakeTablePrefix(tb12), &db1Cfg},
		{keys.MakeTablePrefix(tb21), &tb21Cfg},
		{keys.MakeTablePrefix(tb22), &defaultZoneConfig},
	}

	for tcNum, tc := range testCases {
		zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
		if err != nil {
			t.Fatalf("#%d: err=%s", tcNum, err)
		}

		if !reflect.DeepEqual(zoneCfg, tc.zoneCfg) {
			t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
		}
	}
}
// Snapshot implements the raft.Storage interface.
// Snapshot requires that the replica lock is held.
func (r *Replica) Snapshot() (raftpb.Snapshot, error) {
	start := timeutil.Now()
	// Copy all the data from a consistent RocksDB snapshot into a RaftSnapshotData.
	snap := r.store.NewSnapshot()
	defer snap.Close()
	var snapData roachpb.RaftSnapshotData

	firstIndex, err := r.FirstIndex()
	if err != nil {
		return raftpb.Snapshot{}, err
	}

	// Read the range metadata from the snapshot instead of the members
	// of the Range struct because they might be changed concurrently.
	appliedIndex, err := r.loadAppliedIndexLocked(snap)
	if err != nil {
		return raftpb.Snapshot{}, err
	}

	var desc roachpb.RangeDescriptor
	// We ignore intents on the range descriptor (consistent=false) because we
	// know they cannot be committed yet; operations that modify range
	// descriptors resolve their own intents when they commit.
	ok, err := engine.MVCCGetProto(context.Background(), snap, keys.RangeDescriptorKey(r.mu.desc.StartKey),
		r.store.Clock().Now(), false /* !consistent */, nil, &desc)
	if err != nil {
		return raftpb.Snapshot{}, util.Errorf("failed to get desc: %s", err)
	}
	if !ok {
		return raftpb.Snapshot{}, util.Errorf("couldn't find range descriptor")
	}

	// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
	snapData.RangeDescriptor = desc

	// Iterate over all the data in the range, including local-only data like
	// the sequence cache.
	iter := newReplicaDataIterator(&desc, snap, true /* !replicatedOnly */)
	defer iter.Close()
	for ; iter.Valid(); iter.Next() {
		key := iter.Key()
		snapData.KV = append(snapData.KV,
			roachpb.RaftSnapshotData_KeyValue{
				Key:       key.Key,
				Value:     iter.Value(),
				Timestamp: key.Timestamp,
			})
	}

	entries, err := r.entries(snap, firstIndex, appliedIndex+1, 0)
	if err != nil {
		return raftpb.Snapshot{}, err
	}
	snapData.LogEntries = entries

	data, err := protoutil.Marshal(&snapData)
	if err != nil {
		return raftpb.Snapshot{}, err
	}

	// Synthesize our raftpb.ConfState from desc.
	var cs raftpb.ConfState
	for _, rep := range desc.Replicas {
		cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
	}

	term, err := r.Term(appliedIndex)
	if err != nil {
		return raftpb.Snapshot{}, util.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
	}

	log.Infof("generated snapshot for range %s at index %d in %s. encoded size=%d, %d KV pairs, %d log entries",
		r.RangeID, appliedIndex, timeutil.Now().Sub(start), len(data), len(snapData.KV), len(snapData.LogEntries))

	return raftpb.Snapshot{
		Data: data,
		Metadata: raftpb.SnapshotMetadata{
			Index:     appliedIndex,
			Term:      term,
			ConfState: cs,
		},
	}, nil
}
Exemple #19
0
// runSetZone parses the yaml input file, converts it to proto, and inserts it
// in the system.zones table.
func runSetZone(cmd *cobra.Command, args []string) error {
	if len(args) != 2 {
		mustUsage(cmd)
		return nil
	}

	conn, err := makeSQLClient()
	if err != nil {
		return err
	}
	defer conn.Close()

	names, err := parseZoneName(args[0])
	if err != nil {
		return err
	}

	if err := conn.Exec(`BEGIN`, nil); err != nil {
		return err
	}

	path, err := queryDescriptorIDPath(conn, names)
	if err != nil {
		if err == io.EOF {
			fmt.Printf("%s not found\n", args[0])
			return nil
		}
		return err
	}

	zoneID, zone, err := queryZonePath(conn, path)
	if err != nil {
		return err
	}

	// Convert it to proto and marshal it again to put into the table. This is a
	// bit more tedious than taking protos directly, but yaml is a more widely
	// understood format.
	origReplicaAttrs := zone.ReplicaAttrs
	zone.ReplicaAttrs = nil
	if err := yaml.Unmarshal([]byte(args[1]), zone); err != nil {
		return fmt.Errorf("unable to parse zone config file %q: %s", args[1], err)
	}
	if zone.ReplicaAttrs == nil {
		zone.ReplicaAttrs = origReplicaAttrs
	}

	if err := zone.Validate(); err != nil {
		return err
	}

	buf, err := protoutil.Marshal(zone)
	if err != nil {
		return fmt.Errorf("unable to parse zone config file %q: %s", args[1], err)
	}

	id := path[len(path)-1]
	if id == zoneID {
		err = runQueryAndFormatResults(conn, os.Stdout,
			makeQuery(`UPDATE system.zones SET config = $2 WHERE id = $1`, id, buf), true)
	} else {
		err = runQueryAndFormatResults(conn, os.Stdout,
			makeQuery(`INSERT INTO system.zones VALUES ($1, $2)`, id, buf), true)
	}
	if err != nil {
		return err
	}
	if err := conn.Exec(`COMMIT`, nil); err != nil {
		return err
	}

	res, err := yaml.Marshal(zone)
	if err != nil {
		return err
	}
	fmt.Print(string(res))
	return nil
}
Exemple #20
0
// Marshal implements gwruntime.Marshaler.
func (*ProtoPb) Marshal(v interface{}) ([]byte, error) {
	if p, ok := v.(proto.Message); ok {
		return protoutil.Marshal(p)
	}
	return nil, errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
}