Esempio n. 1
0
func TestBelowRaftProtos(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// Enable the additional checks in TestMain. NB: running this test by itself
	// will fail those extra checks - such failures are safe to ignore, so long
	// as this test passes when run with the entire package's tests.
	verifyBelowRaftProtos = true

	slice := make([]byte, 1<<20)
	for typ, fix := range belowRaftGoldenProtos {
		if b, err := protoutil.Marshal(reflect.New(typ.Elem()).Interface().(proto.Message)); err != nil {
			t.Fatal(err)
		} else if err := verifyHash(b, fix.emptySum); err != nil {
			t.Errorf("%s (empty): %s\n", typ, err)
		}

		randGen := rand.New(rand.NewSource(goldenSeed))

		bytes := slice
		numBytes := 0
		for i := 0; i < itersPerProto; i++ {
			if n, err := marshalTo(fix.populatedConstructor(randGen), bytes); err != nil {
				t.Fatal(err)
			} else {
				bytes = bytes[n:]
				numBytes += n
			}
		}
		if err := verifyHash(slice[:numBytes], fix.populatedSum); err != nil {
			t.Errorf("%s (populated): %s\n", typ, err)
		}
	}
}
Esempio n. 2
0
// AddInfoProto adds or updates an info object. Returns an error if info
// couldn't be added.
func (g *Gossip) AddInfoProto(key string, msg proto.Message, ttl time.Duration) error {
	bytes, err := protoutil.Marshal(msg)
	if err != nil {
		return err
	}
	return g.AddInfo(key, bytes, ttl)
}
Esempio n. 3
0
// SetProto encodes the specified proto message into the bytes field of the
// receiver and clears the checksum. If the proto message is an
// InternalTimeSeriesData, the tag will be set to TIMESERIES rather than BYTES.
func (v *Value) SetProto(msg proto.Message) error {
	// Fast-path for when the proto implements MarshalTo and Size (i.e. all of
	// our protos). The fast-path marshals directly into the Value.RawBytes field
	// instead of allocating a separate []byte and copying.
	type marshalTo interface {
		MarshalTo(data []byte) (int, error)
		Size() int
	}
	if m, ok := msg.(marshalTo); ok {
		size := m.Size()
		v.RawBytes = make([]byte, headerSize+size)
		// NB: This call to protoutil.Interceptor would be more natural
		// encapsulated in protoutil.MarshalTo, yet that approach imposes a
		// significant (~30%) slowdown. It is unclear why. See
		// BenchmarkValueSetProto.
		protoutil.Interceptor(msg)
		if _, err := m.MarshalTo(v.RawBytes[headerSize:]); err != nil {
			return err
		}
		// Special handling for timeseries data.
		if _, ok := msg.(*InternalTimeSeriesData); ok {
			v.setTag(ValueType_TIMESERIES)
		} else {
			v.setTag(ValueType_BYTES)
		}
		return nil
	}

	data, err := protoutil.Marshal(msg)
	if err != nil {
		return err
	}
	v.SetBytes(data)
	return nil
}
Esempio n. 4
0
func mustMarshal(m proto.Message) []byte {
	b, err := protoutil.Marshal(m)
	if err != nil {
		panic(err)
	}
	return b
}
Esempio n. 5
0
func (t *parallelTest) setup(spec *parTestSpec) {
	if spec.ClusterSize == 0 {
		spec.ClusterSize = 1
	}

	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize)
	}

	args := base.TestClusterArgs{
		ServerArgs: base.TestServerArgs{
			Knobs: base.TestingKnobs{
				SQLExecutor: &sql.ExecutorTestingKnobs{
					WaitForGossipUpdate:   true,
					CheckStmtStringChange: true,
				},
			},
		},
	}
	t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args)
	t.clients = make([][]*gosql.DB, spec.ClusterSize)
	for i := range t.clients {
		t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i))
	}
	r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0])

	if spec.RangeSplitSize != 0 {
		if testing.Verbose() || log.V(1) {
			log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize)
		}
		zoneCfg := config.DefaultZoneConfig()
		zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize)
		zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2
		buf, err := protoutil.Marshal(&zoneCfg)
		if err != nil {
			t.Fatal(err)
		}
		objID := keys.RootNamespaceID
		r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf)
	}

	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Creating database")
	}

	r0.Exec("CREATE DATABASE test")
	for i := range t.clients {
		sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test")
	}

	if testing.Verbose() || log.V(1) {
		log.Infof(t.ctx, "Test setup done")
	}
}
Esempio n. 6
0
// PutProto sets the given key to the protobuf-serialized byte string
// of msg and the provided timestamp. Returns the length in bytes of
// key and the value.
func PutProto(engine Writer, key MVCCKey, msg proto.Message) (keyBytes, valBytes int64, err error) {
	bytes, err := protoutil.Marshal(msg)
	if err != nil {
		return 0, 0, err
	}

	if err := engine.Put(key, bytes); err != nil {
		return 0, 0, err
	}

	return int64(key.EncodedSize()), int64(len(bytes)), nil
}
Esempio n. 7
0
// UpdateZoneConfig updates the default zone config for the cluster.
func (c *Cluster) UpdateZoneConfig(rangeMinBytes, rangeMaxBytes int64) {
	zone := config.DefaultZoneConfig()
	zone.RangeMinBytes = rangeMinBytes
	zone.RangeMaxBytes = rangeMaxBytes

	buf, err := protoutil.Marshal(&zone)
	if err != nil {
		log.Fatal(context.Background(), err)
	}
	_, err = c.DB[0].Exec(`UPSERT INTO system.zones (id, config) VALUES (0, $1)`, buf)
	if err != nil {
		log.Fatal(context.Background(), err)
	}
}
Esempio n. 8
0
func TestBatchProto(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := NewInMem(roachpb.Attributes{}, 1<<20)
	stopper.AddCloser(e)

	b := e.NewBatch()
	defer b.Close()

	val := roachpb.MakeValueFromString("value")
	if _, _, err := PutProto(b, mvccKey("proto"), &val); err != nil {
		t.Fatal(err)
	}
	getVal := &roachpb.Value{}
	ok, keySize, valSize, err := b.GetProto(mvccKey("proto"), getVal)
	if !ok || err != nil {
		t.Fatalf("expected GetProto to success ok=%t: %s", ok, err)
	}
	if keySize != 6 {
		t.Errorf("expected key size 6; got %d", keySize)
	}
	data, err := protoutil.Marshal(&val)
	if err != nil {
		t.Fatal(err)
	}
	if valSize != int64(len(data)) {
		t.Errorf("expected value size %d; got %d", len(data), valSize)
	}
	if !proto.Equal(getVal, &val) {
		t.Errorf("expected %v; got %v", &val, getVal)
	}
	// Before commit, proto will not be available via engine.
	if ok, _, _, err := e.GetProto(mvccKey("proto"), getVal); ok || err != nil {
		t.Fatalf("expected GetProto to fail ok=%t: %s", ok, err)
	}
	// Commit and verify the proto can be read directly from the engine.
	if err := b.Commit(); err != nil {
		t.Fatal(err)
	}
	if ok, _, _, err := e.GetProto(mvccKey("proto"), getVal); !ok || err != nil {
		t.Fatalf("expected GetProto to success ok=%t: %s", ok, err)
	}
	if !proto.Equal(getVal, &val) {
		t.Errorf("expected %v; got %v", &val, getVal)
	}
}
Esempio n. 9
0
// MergeInternalTimeSeriesData exports the engine's C++ merge logic for
// InternalTimeSeriesData to higher level packages. This is intended primarily
// for consumption by high level testing of time series functionality.
func MergeInternalTimeSeriesData(
	sources ...roachpb.InternalTimeSeriesData,
) (roachpb.InternalTimeSeriesData, error) {
	// Wrap each proto in an inlined MVCC value, and marshal each wrapped value
	// to bytes. This is the format required by the engine.
	srcBytes := make([][]byte, 0, len(sources))
	for _, src := range sources {
		var val roachpb.Value
		if err := val.SetProto(&src); err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		bytes, err := protoutil.Marshal(&enginepb.MVCCMetadata{
			RawBytes: val.RawBytes,
		})
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		srcBytes = append(srcBytes, bytes)
	}

	// Merge every element into a nil byte slice, one at a time.
	var (
		mergedBytes []byte
		err         error
	)
	for _, bytes := range srcBytes {
		mergedBytes, err = goMerge(mergedBytes, bytes)
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
	}

	// Unmarshal merged bytes and extract the time series value within.
	var meta enginepb.MVCCMetadata
	if err := proto.Unmarshal(mergedBytes, &meta); err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	mergedTS, err := MakeValue(meta).GetTimeseries()
	if err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	return mergedTS, nil
}
Esempio n. 10
0
func doHTTPReq(
	t *testing.T, client http.Client, method, url string, body proto.Message,
) (*http.Response, error) {
	var b io.Reader
	if body != nil {
		buf, err := protoutil.Marshal(body)
		if err != nil {
			t.Fatal(err)
		}
		b = bytes.NewReader(buf)
	}
	req, err := http.NewRequest(method, url, b)
	if err != nil {
		t.Fatalf("%s %s: error building request: %s", method, url, err)
	}
	if b != nil {
		req.Header.Add(httputil.ContentTypeHeader, httputil.ProtoContentType)
	}

	return client.Do(req)
}
Esempio n. 11
0
func TestDropDatabase(t *testing.T) {
	defer leaktest.AfterTest(t)()
	params, _ := createTestServerParams()
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()
	ctx := context.TODO()

	// Fix the column families so the key counts below don't change if the
	// family heuristics are updated.
	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR, FAMILY (k), FAMILY (v));
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
		t.Fatal(err)
	}

	dbNameKey := sqlbase.MakeNameMetadataKey(keys.RootNamespaceID, "t")
	r, err := kvDB.Get(ctx, dbNameKey)
	if err != nil {
		t.Fatal(err)
	}
	if !r.Exists() {
		t.Fatalf(`database "t" does not exist`)
	}
	dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt()))
	desc := &sqlbase.Descriptor{}
	if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil {
		t.Fatal(err)
	}
	dbDesc := desc.GetDatabase()

	tbNameKey := sqlbase.MakeNameMetadataKey(dbDesc.ID, "kv")
	gr, err := kvDB.Get(ctx, tbNameKey)
	if err != nil {
		t.Fatal(err)
	}
	if !gr.Exists() {
		t.Fatalf(`table "kv" does not exist`)
	}
	tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))
	if err := kvDB.GetProto(ctx, tbDescKey, desc); err != nil {
		t.Fatal(err)
	}
	tbDesc := desc.GetTable()

	// Add a zone config for both the table and database.
	cfg := config.DefaultZoneConfig()
	buf, err := protoutil.Marshal(&cfg)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.ID, buf); err != nil {
		t.Fatal(err)
	}

	tbZoneKey := sqlbase.MakeZoneKey(tbDesc.ID)
	dbZoneKey := sqlbase.MakeZoneKey(dbDesc.ID)
	if gr, err := kvDB.Get(ctx, tbZoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("table zone config entry not found")
	}
	if gr, err := kvDB.Get(ctx, dbZoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("database zone config entry not found")
	}

	tablePrefix := keys.MakeTablePrefix(uint32(tbDesc.ID))
	tableStartKey := roachpb.Key(tablePrefix)
	tableEndKey := tableStartKey.PrefixEnd()
	if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 6; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if _, err := sqlDB.Exec(`DROP DATABASE t`); err != nil {
		t.Fatal(err)
	}

	if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 0; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if gr, err := kvDB.Get(ctx, tbDescKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor still exists after database is dropped: %q", tbDescKey)
	}

	if gr, err := kvDB.Get(ctx, tbNameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor key still exists after database is dropped")
	}

	if gr, err := kvDB.Get(ctx, dbDescKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database descriptor still exists after database is dropped")
	}

	if gr, err := kvDB.Get(ctx, dbNameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database descriptor key still exists after database is dropped")
	}

	if gr, err := kvDB.Get(ctx, tbZoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table zone config entry still exists after the database is dropped")
	}

	if gr, err := kvDB.Get(ctx, dbZoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("database zone config entry still exists after the database is dropped")
	}
}
Esempio n. 12
0
func TestDropTable(t *testing.T) {
	defer leaktest.AfterTest(t)()
	params, _ := createTestServerParams()
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()
	ctx := context.TODO()

	numRows := 2*sql.TableTruncateChunkSize + 1
	createKVTable(t, sqlDB, numRows)

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
	nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv")
	gr, err := kvDB.Get(ctx, nameKey)

	if err != nil {
		t.Fatal(err)
	}

	if !gr.Exists() {
		t.Fatalf("Name entry %q does not exist", nameKey)
	}

	descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))

	// Add a zone config for the table.
	cfg := config.DefaultZoneConfig()
	buf, err := protoutil.Marshal(&cfg)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil {
		t.Fatal(err)
	}

	zoneKey := sqlbase.MakeZoneKey(tableDesc.ID)
	if gr, err := kvDB.Get(ctx, zoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("zone config entry not found")
	}

	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))

	checkKeyCount(t, kvDB, tablePrefix, 3*numRows)
	if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil {
		t.Fatal(err)
	}
	checkKeyCount(t, kvDB, tablePrefix, 0)

	// Test that deleted table cannot be used. This prevents regressions where
	// name -> descriptor ID caches might make this statement erronously work.
	if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) {
		t.Fatalf("different error than expected: %v", err)
	}

	if gr, err := kvDB.Get(ctx, descKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(ctx, nameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table namekey still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(ctx, zoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("zone config entry still exists after the table is dropped")
	}
}
Esempio n. 13
0
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
	defer leaktest.AfterTest(t)()
	params, _ := createTestServerParams()
	srv, sqlDB, _ := serverutils.StartServer(t, params)
	defer srv.Stopper().Stop()
	s := srv.(*server.TestServer)

	expectedCounter := uint32(keys.MaxReservedDescID)

	defaultZoneConfig := config.DefaultZoneConfig()
	defaultZoneConfig.RangeMinBytes = 1 << 20
	defaultZoneConfig.RangeMaxBytes = 1 << 20
	defaultZoneConfig.GC.TTLSeconds = 60

	{
		buf, err := protoutil.Marshal(&defaultZoneConfig)
		if err != nil {
			t.Fatal(err)
		}
		objID := keys.RootNamespaceID
		if _, err = sqlDB.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf); err != nil {
			t.Fatalf("problem writing zone %+v: %s", defaultZoneConfig, err)
		}
	}

	// Naming scheme for database and tables:
	// db1 has tables tb11 and tb12
	// db2 has tables tb21 and tb22

	expectedCounter++
	db1 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
		t.Fatal(err)
	}

	expectedCounter++
	db2 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
		t.Fatal(err)
	}

	expectedCounter++
	tb11 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}

	expectedCounter++
	tb12 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}

	expectedCounter++
	tb21 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}

	expectedCounter++
	tb22 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}

	{
		cfg := forceNewConfig(t, s)

		// We have no custom zone configs.
		testCases := []struct {
			key     roachpb.RKey
			zoneCfg config.ZoneConfig
		}{
			{roachpb.RKeyMin, defaultZoneConfig},
			{keys.MakeTablePrefix(0), defaultZoneConfig},
			{keys.MakeTablePrefix(1), defaultZoneConfig},
			{keys.MakeTablePrefix(keys.MaxReservedDescID), defaultZoneConfig},
			{keys.MakeTablePrefix(db1), defaultZoneConfig},
			{keys.MakeTablePrefix(db2), defaultZoneConfig},
			{keys.MakeTablePrefix(tb11), defaultZoneConfig},
			{keys.MakeTablePrefix(tb12), defaultZoneConfig},
			{keys.MakeTablePrefix(tb21), defaultZoneConfig},
			{keys.MakeTablePrefix(tb22), defaultZoneConfig},
		}

		for tcNum, tc := range testCases {
			zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
			if err != nil {
				t.Fatalf("#%d: err=%s", tcNum, err)
			}

			if !proto.Equal(&zoneCfg, &tc.zoneCfg) {
				t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
			}
		}
	}

	// Now set some zone configs. We don't have a nice way of using table
	// names for this, so we do raw puts.
	// Here is the list of dbs/tables and whether they have a custom zone config:
	// db1: true
	//   tb1: true
	//   tb2: false
	// db1: false
	//   tb1: true
	//   tb2: false
	db1Cfg := config.ZoneConfig{
		NumReplicas: 1,
		Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1"}}},
	}
	tb11Cfg := config.ZoneConfig{
		NumReplicas: 1,
		Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1.tb1"}}},
	}
	tb21Cfg := config.ZoneConfig{
		NumReplicas: 1,
		Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb1"}}},
	}
	for objID, objZone := range map[uint32]config.ZoneConfig{
		db1:  db1Cfg,
		tb11: tb11Cfg,
		tb21: tb21Cfg,
	} {
		buf, err := protoutil.Marshal(&objZone)
		if err != nil {
			t.Fatal(err)
		}
		if _, err = sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, objID, buf); err != nil {
			t.Fatalf("problem writing zone %+v: %s", objZone, err)
		}
	}

	{
		cfg := forceNewConfig(t, s)

		testCases := []struct {
			key     roachpb.RKey
			zoneCfg config.ZoneConfig
		}{
			{roachpb.RKeyMin, defaultZoneConfig},
			{keys.MakeTablePrefix(0), defaultZoneConfig},
			{keys.MakeTablePrefix(1), defaultZoneConfig},
			{keys.MakeTablePrefix(keys.MaxReservedDescID), defaultZoneConfig},
			{keys.MakeTablePrefix(db1), db1Cfg},
			{keys.MakeTablePrefix(db2), defaultZoneConfig},
			{keys.MakeTablePrefix(tb11), tb11Cfg},
			{keys.MakeTablePrefix(tb12), db1Cfg},
			{keys.MakeTablePrefix(tb21), tb21Cfg},
			{keys.MakeTablePrefix(tb22), defaultZoneConfig},
		}

		for tcNum, tc := range testCases {
			zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
			if err != nil {
				t.Fatalf("#%d: err=%s", tcNum, err)
			}

			if !proto.Equal(&zoneCfg, &tc.zoneCfg) {
				t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
			}
		}
	}
}
Esempio n. 14
0
// runSetZone parses the yaml input file, converts it to proto, and inserts it
// in the system.zones table.
func runSetZone(cmd *cobra.Command, args []string) error {
	if len(args) != 1 {
		return usageAndError(cmd)
	}

	conn, err := getPasswordAndMakeSQLClient()
	if err != nil {
		return err
	}
	defer conn.Close()

	names, err := parseZoneName(args[0])
	if err != nil {
		return err
	}

	if err := conn.Exec(`BEGIN`, nil); err != nil {
		return err
	}

	path, err := queryDescriptorIDPath(conn, names)
	if err != nil {
		if err == io.EOF {
			fmt.Printf("%s not found\n", args[0])
			return nil
		}
		return err
	}

	_, zone, err := queryZonePath(conn, path)
	if err != nil {
		return err
	}
	// Convert it to proto and marshal it again to put into the table. This is a
	// bit more tedious than taking protos directly, but yaml is a more widely
	// understood format.
	// Read zoneConfig file to conf.

	conf, err := readZoneConfig()
	if err != nil {
		return fmt.Errorf("error reading zone config: %s", err)
	}
	if err := yaml.Unmarshal(conf, &zone); err != nil {
		return fmt.Errorf("unable to parse zoneConfig file: %s", err)
	}

	if err := zone.Validate(); err != nil {
		return err
	}

	buf, err := protoutil.Marshal(&zone)
	if err != nil {
		return fmt.Errorf("unable to parse zone config file %q: %s", args[1], err)
	}

	id := path[len(path)-1]
	_, _, _, err = runQuery(conn, makeQuery(
		`UPSERT INTO system.zones (id, config) VALUES ($1, $2)`,
		id, buf), false)
	if err != nil {
		return err
	}
	if err := conn.Exec(`COMMIT`, nil); err != nil {
		return err
	}

	res, err := yaml.Marshal(zone)
	if err != nil {
		return err
	}
	fmt.Print(string(res))
	return nil
}