Ejemplo n.º 1
0
// TestSender mocks out some of the txn coordinator sender's
// functionality. It responds to PutRequests using testPutResp.
func newTestSender(pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)) SenderFunc {
	txnKey := roachpb.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		ba.UserPriority = proto.Int32(-1)
		if ba.Txn != nil && len(ba.Txn.ID) == 0 {
			ba.Txn.Key = txnKey
			ba.Txn.ID = txnID
		}

		var br *roachpb.BatchResponse
		var pErr *roachpb.Error
		if pre != nil {
			br, pErr = pre(ba)
		} else {
			br = ba.CreateReply()
		}
		if pErr != nil {
			return nil, pErr
		}
		var writing bool
		status := roachpb.PENDING
		for i, req := range ba.Requests {
			args := req.GetInner()
			if _, ok := args.(*roachpb.PutRequest); ok {
				if !br.Responses[i].SetValue(proto.Clone(testPutResp).(roachpb.Response)) {
					panic("failed to set put response")
				}
			}
			if roachpb.IsTransactionWrite(args) {
				writing = true
			}
		}
		if args, ok := ba.GetArg(roachpb.EndTransaction); ok {
			et := args.(*roachpb.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = roachpb.COMMITTED
			} else {
				status = roachpb.ABORTED
			}
		}
		br.Txn = proto.Clone(ba.Txn).(*roachpb.Transaction)
		if br.Txn != nil && pErr == nil {
			br.Txn.Writing = writing
			br.Txn.Status = status
		}

		if post != nil {
			br, pErr = post(ba)
		}
		return br, pErr
	}
}
Ejemplo n.º 2
0
func newTestSender(pre, post func(proto.BatchRequest) (*proto.BatchResponse, *proto.Error)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
		ba.UserPriority = gogoproto.Int32(-1)
		if ba.Txn != nil && len(ba.Txn.ID) == 0 {
			ba.Txn.Key = txnKey
			ba.Txn.ID = txnID
		}

		var br *proto.BatchResponse
		var pErr *proto.Error
		if pre != nil {
			br, pErr = pre(ba)
		} else {
			br = &proto.BatchResponse{}
		}
		if pErr != nil {
			return nil, pErr
		}
		var writing bool
		status := proto.PENDING
		if _, ok := ba.GetArg(proto.Put); ok {
			br.Add(gogoproto.Clone(testPutResp).(proto.Response))
			writing = true
		}
		if args, ok := ba.GetArg(proto.EndTransaction); ok {
			et := args.(*proto.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		}
		br.Txn = gogoproto.Clone(ba.Txn).(*proto.Transaction)
		if br.Txn != nil && pErr == nil {
			br.Txn.Writing = writing
			br.Txn.Status = status
		}

		if post != nil {
			br, pErr = post(ba)
		}
		return br, pErr
	}
}
Ejemplo n.º 3
0
// Future home of the aysynchronous schema changer that picks up
// queued schema changes and processes them.
//
// applyMutations applies the queued mutations for a table.
// TODO(vivek): Eliminate the need to pass in tableName.
func (p *planner) applyMutations(tableDesc *TableDescriptor, tableName *parser.QualifiedName) error {
	if len(tableDesc.Mutations) == 0 {
		return nil
	}
	newTableDesc := proto.Clone(tableDesc).(*TableDescriptor)
	// Make all mutations active.
	for _, mutation := range newTableDesc.Mutations {
		newTableDesc.makeMutationComplete(mutation)
	}
	newTableDesc.Mutations = nil
	if err := newTableDesc.Validate(); err != nil {
		return err
	}

	b := client.Batch{}
	if err := p.backfillBatch(&b, tableName, tableDesc, newTableDesc); err != nil {
		return err
	}
	// TODO(pmattis): This is a hack. Remove when schema change operations work
	// properly.
	p.hackNoteSchemaChange(newTableDesc)

	b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc))

	if err := p.txn.Run(&b); err != nil {
		return convertBatchError(newTableDesc, b, err)
	}
	return nil
}
Ejemplo n.º 4
0
// MaybeWrap wraps the given argument in a batch, unless it is already one.
func maybeWrap(args proto.Request) (*proto.BatchRequest, func(*proto.BatchResponse) proto.Response) {
	if ba, ok := args.(*proto.BatchRequest); ok {
		return ba, func(br *proto.BatchResponse) proto.Response { return br }
	}
	ba := &proto.BatchRequest{}
	ba.RequestHeader = *(gogoproto.Clone(args.Header()).(*proto.RequestHeader))
	ba.Add(args)
	return ba, func(br *proto.BatchResponse) proto.Response {
		var unwrappedReply proto.Response
		if len(br.Responses) == 0 {
			unwrappedReply = args.CreateReply()
		} else {
			unwrappedReply = br.Responses[0].GetInner()
		}
		// The ReplyTxn is propagated from one response to the next request,
		// and we adopt the mechanism that whenever the Txn changes, it needs
		// to be set in the reply, for example to ratched up the transaction
		// timestamp on writes when necessary.
		// This is internally necessary to sequentially execute the batch,
		// so it makes some sense to take the burden of updating the Txn
		// from TxnCoordSender - it will only need to act on retries/aborts
		// in the future.
		unwrappedReply.Header().Txn = br.Txn
		if unwrappedReply.Header().Error == nil {
			unwrappedReply.Header().Error = br.Error
		}
		return unwrappedReply
	}
}
Ejemplo n.º 5
0
// Send implements the client.Sender interface.
func (rls *retryableLocalSender) Send(_ context.Context, call proto.Call) {
	// Instant retry to handle the case of a range split, which is
	// exposed here as a RangeKeyMismatchError.
	retryOpts := retry.Options{}
	// In local tests, the RPCs are not actually sent over the wire. We
	// need to clone the Txn in order to avoid unexpected sharing
	// between TxnCoordSender and client.Txn.
	if header := call.Args.Header(); header.Txn != nil {
		header.Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
	}

	var err error
	for r := retry.Start(retryOpts); r.Next(); {
		call.Reply.Header().Error = nil
		rls.LocalSender.Send(context.TODO(), call)
		// Check for range key mismatch error (this could happen if
		// range was split between lookup and execution). In this case,
		// reset header.Replica and engage retry loop.
		if err = call.Reply.Header().GoError(); err != nil {
			if _, ok := err.(*proto.RangeKeyMismatchError); ok {
				// Clear request replica.
				call.Args.Header().Replica = proto.Replica{}
				log.Warning(err)
				continue
			}
		}
		return
	}
	panic(fmt.Sprintf("local sender did not succeed: %s", err))
}
Ejemplo n.º 6
0
// getTableLease acquires a lease for the specified table. The lease will be
// released when the planner closes.
func (p *planner) getTableLease(qname *parser.QualifiedName) (*TableDescriptor, *roachpb.Error) {
	if err := qname.NormalizeTableName(p.session.Database); err != nil {
		return nil, roachpb.NewError(err)
	}

	if qname.Database() == systemDB.Name || testDisableTableLeases {
		// We don't go through the normal lease mechanism for system tables. The
		// system.lease and system.descriptor table, in particular, are problematic
		// because they are used for acquiring leases itself, creating a
		// chicken&egg problem.
		return p.getTableDesc(qname)
	}

	tableID, pErr := p.getTableID(qname)
	if pErr != nil {
		return nil, pErr
	}

	if p.leases == nil {
		p.leases = make(map[ID]*LeaseState)
	}

	lease, ok := p.leases[tableID]
	if !ok {
		var pErr *roachpb.Error
		lease, pErr = p.leaseMgr.Acquire(p.txn, tableID, 0)
		if pErr != nil {
			return nil, pErr
		}
		p.leases[tableID] = lease
	}

	return proto.Clone(&lease.TableDescriptor).(*TableDescriptor), nil
}
Ejemplo n.º 7
0
func newTestSender(handler func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()
		var writing bool
		switch call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil {
			txn.Writing = writing
		}

		if handler != nil {
			handler(call)
		}
	}
}
Ejemplo n.º 8
0
func TestClone(t *testing.T) {
	m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
	if !proto.Equal(m, cloneTestMessage) {
		t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
	}

	// Verify it was a deep copy.
	*m.Inner.Port++
	if proto.Equal(m, cloneTestMessage) {
		t.Error("Mutating clone changed the original")
	}
	// Byte fields and repeated fields should be copied.
	if &m.Pet[0] == &cloneTestMessage.Pet[0] {
		t.Error("Pet: repeated field not copied")
	}
	if &m.Others[0] == &cloneTestMessage.Others[0] {
		t.Error("Others: repeated field not copied")
	}
	if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
		t.Error("Others[0].Value: bytes field not copied")
	}
	if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
		t.Error("RepBytes: repeated field not copied")
	}
	if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
		t.Error("RepBytes[0]: bytes field not copied")
	}
}
Ejemplo n.º 9
0
// StartWithStopper is the same as Start, but allows passing a stopper
// explicitly.
func (ts *TestServer) StartWithStopper(stopper *stop.Stopper) error {
	if ts.Ctx == nil {
		ts.Ctx = NewTestContext()
	}

	if stopper == nil {
		stopper = stop.NewStopper()
	}

	// Change the replication requirements so we don't get log spam
	// about ranges not being replicated enough.
	// TODO(marc): set this in the zones table when we have an entry
	// for the default cluster-wide zone config and remove these
	// shenanigans about mutating the global default.
	oldDefaultZC := proto.Clone(config.DefaultZoneConfig).(*config.ZoneConfig)
	config.DefaultZoneConfig.ReplicaAttrs = []roachpb.Attributes{{}}
	stopper.AddCloser(stop.CloserFn(func() {
		config.DefaultZoneConfig = oldDefaultZC
	}))

	var err error
	ts.Server, err = NewServer(ts.Ctx, stopper)
	if err != nil {
		return err
	}

	// Ensure we have the correct number of engines. Add in in-memory ones where
	// needed.  There must be at least one store/engine.
	if ts.StoresPerNode < 1 {
		ts.StoresPerNode = 1
	}
	for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ {
		ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, ts.Server.stopper))
	}

	if !ts.SkipBootstrap {
		stopper := stop.NewStopper()
		_, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper)
		if err != nil {
			return util.Errorf("could not bootstrap cluster: %s", err)
		}
		stopper.Stop()
	}
	if err := ts.Server.Start(true); err != nil {
		return err
	}

	// If enabled, wait for initial splits to complete before returning control.
	// If initial splits do not complete, the server is stopped before
	// returning.
	if config.TestingTableSplitsDisabled() {
		return nil
	}
	if err := ts.WaitForInitialSplits(); err != nil {
		ts.Stop()
		return err
	}

	return nil
}
Ejemplo n.º 10
0
func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse {
	// Begin by allocating a generator. The request and response structures are stored there
	// so we can do error handling easily - the response structure contains the field to
	// report failure.
	g := generator.New()
	g.Request = req

	g.CommandLineParameters(g.Request.GetParameter())

	// Create a wrapped version of the Descriptors and EnumDescriptors that
	// point to the file that defines them.
	g.WrapTypes()

	g.SetPackageNames()
	g.BuildTypeNameMap()

	g.GenerateAllFiles()

	if err := goformat(g.Response); err != nil {
		g.Error(err)
	}

	testReq := proto.Clone(req).(*plugin.CodeGeneratorRequest)

	testResp := GeneratePlugin(testReq, testgen.NewPlugin(), "pb_test.go")

	for i := 0; i < len(testResp.File); i++ {
		if strings.Contains(*testResp.File[i].Content, `//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) {
			g.Response.File = append(g.Response.File, testResp.File[i])
		}
	}

	return g.Response
}
Ejemplo n.º 11
0
// Future home of the asynchronous schema changer that picks up
// queued schema changes and processes them.
//
// applyMutations applies the queued mutations for a table.
func (p *planner) applyMutations(tableDesc *TableDescriptor) error {
	if len(tableDesc.Mutations) == 0 {
		return nil
	}
	newTableDesc := proto.Clone(tableDesc).(*TableDescriptor)
	p.applyUpVersion(newTableDesc)
	// Make all mutations active.
	for _, mutation := range newTableDesc.Mutations {
		newTableDesc.makeMutationComplete(mutation)
	}
	newTableDesc.Mutations = nil
	if err := newTableDesc.Validate(); err != nil {
		return err
	}

	b := client.Batch{}
	if err := p.backfillBatch(&b, tableDesc, newTableDesc); err != nil {
		return err
	}

	b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc))

	if err := p.txn.Run(&b); err != nil {
		return convertBatchError(newTableDesc, b, err)
	}
	p.notifyCompletedSchemaChange(newTableDesc.ID)
	return nil
}
Ejemplo n.º 12
0
func TestProto3SetDefaults(t *testing.T) {
	in := &pb.Message{
		Terrain: map[string]*pb.Nested{
			"meadow": new(pb.Nested),
		},
		Proto2Field: new(tpb.SubDefaults),
		Proto2Value: map[string]*tpb.SubDefaults{
			"badlands": new(tpb.SubDefaults),
		},
	}

	got := proto.Clone(in).(*pb.Message)
	proto.SetDefaults(got)

	// There are no defaults in proto3.  Everything should be the zero value, but
	// we need to remember to set defaults for nested proto2 messages.
	want := &pb.Message{
		Terrain: map[string]*pb.Nested{
			"meadow": new(pb.Nested),
		},
		Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
		Proto2Value: map[string]*tpb.SubDefaults{
			"badlands": {N: proto.Int64(7)},
		},
	}

	if !proto.Equal(got, want) {
		t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
	}
}
Ejemplo n.º 13
0
func TestMerge(t *testing.T) {
	for _, m := range mergeTests {
		got := proto.Clone(m.dst)
		proto.Merge(got, m.src)
		if !proto.Equal(got, m.want) {
			t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
		}
	}
}
Ejemplo n.º 14
0
// Clone uses proto.Clone to return a deep copy of pb. It panics if pb
// recursively contains any instances of types which are known to be
// unsupported by proto.Clone.
//
// This function and its associated lint (see `make check`) exist to ensure we
// do not attempt to proto.Clone types which are not supported by proto.Clone.
// This hackery is necessary because proto.Clone gives no direct indication
// that it has incompletely cloned a type; it merely logs to standard output
// (see https://github.com/golang/protobuf/blob/89238a3/proto/clone.go#L204).
//
// The concrete case against which this is currently guarding may be resolved
// upstream, see https://github.com/gogo/protobuf/issues/147.
func Clone(pb proto.Message) proto.Message {
	for _, verbotenKind := range verbotenKinds {
		if v := findVerboten(reflect.ValueOf(pb), verbotenKind); v != nil {
			panic(fmt.Sprintf("attempt to clone %+v, which contains %+v", pb, v))
		}
	}

	return proto.Clone(pb)
}
Ejemplo n.º 15
0
func sendRPC(opts Options, addrs []net.Addr, rpcContext *Context, name string,
	args, reply gogoproto.Message) ([]gogoproto.Message, error) {
	getArgs := func(addr net.Addr) gogoproto.Message {
		return args
	}
	getReply := func() gogoproto.Message {
		return gogoproto.Clone(reply)
	}
	return Send(opts, name, addrs, getArgs, getReply, rpcContext)
}
Ejemplo n.º 16
0
func newTestSender(pre, post func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()

		if pre != nil {
			pre(call)
		}

		var writing bool
		status := proto.PENDING
		if _, ok := call.Args.(*proto.BatchRequest).GetArg(proto.Put); ok {
			call.Reply.(*proto.BatchResponse).Add(gogoproto.Clone(testPutResp).(proto.Response))
			writing = true
		}
		if args, ok := call.Args.(*proto.BatchRequest).GetArg(proto.EndTransaction); ok {
			et := args.(*proto.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil && call.Reply.Header().GoError() == nil {
			txn.Writing = writing
			txn.Status = status
		}

		if post != nil {
			post(call)
		}
	}
}
Ejemplo n.º 17
0
func sendRPC(opts SendOptions, addrs []net.Addr, rpcContext *rpc.Context, name string,
	args, reply proto.Message) (proto.Message, error) {
	getArgs := func(addr net.Addr) proto.Message {
		return args
	}
	getReply := func() proto.Message {
		return proto.Clone(reply)
	}
	return send(opts, name, addrs, getArgs, getReply, rpcContext)
}
Ejemplo n.º 18
0
func TestCloneProto(t *testing.T) {
	testCases := []struct {
		pb          proto.Message
		shouldPanic bool
	}{
		// Uncloneable types (all contain UUID fields).
		{&roachpb.StoreIdent{}, true},
		{&enginepb.TxnMeta{}, true},
		{&roachpb.Transaction{}, true},
		{&roachpb.Error{}, true},

		// Cloneable types. This includes all types for which a
		// protoutil.Clone call exists in the codebase as of 2016-11-21.
		{&config.ZoneConfig{}, false},
		{&gossip.Info{}, false},
		{&gossip.BootstrapInfo{}, false},
		{&tracing.SpanContextCarrier{}, false},
		{&sqlbase.IndexDescriptor{}, false},
		{&roachpb.SplitTrigger{}, false},
		{&roachpb.Value{}, false},
		{&storagebase.ReplicaState{}, false},
		{&roachpb.RangeDescriptor{}, false},
	}
	for _, tc := range testCases {
		var clone proto.Message
		var panicObj interface{}
		func() {
			defer func() {
				panicObj = recover()
			}()
			clone = protoutil.Clone(tc.pb)
		}()

		if tc.shouldPanic {
			if panicObj == nil {
				t.Errorf("%T: expected panic but didn't get one", tc.pb)
			} else {
				if panicStr := fmt.Sprint(panicObj); !strings.Contains(panicStr, "attempt to clone") {
					t.Errorf("%T: got unexpected panic %s", tc.pb, panicStr)
				}
			}
		} else {
			if panicObj != nil {
				t.Errorf("%T: got unexpected panic %v", tc.pb, panicObj)
			}
		}

		if panicObj == nil {
			realClone := proto.Clone(tc.pb)
			if !reflect.DeepEqual(clone, realClone) {
				t.Errorf("%T: clone did not equal original. expected:\n%+v\ngot:\n%+v", tc.pb, realClone, clone)
			}
		}
	}
}
Ejemplo n.º 19
0
// CreateIndex creates an index.
// Privileges: CREATE on table.
//   notes: postgres requires CREATE on the table.
//          mysql requires INDEX on the table.
func (p *planner) CreateIndex(n *parser.CreateIndex) (planNode, error) {
	tableDesc, err := p.getTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if _, err := tableDesc.FindIndexByName(string(n.Name)); err == nil {
		if n.IfNotExists {
			// Noop.
			return &valuesNode{}, nil
		}
		return nil, fmt.Errorf("index %q already exists", string(n.Name))
	}

	if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil {
		return nil, err
	}

	indexDesc := IndexDescriptor{
		Name:             string(n.Name),
		Unique:           n.Unique,
		ColumnNames:      n.Columns,
		StoreColumnNames: n.Storing,
	}

	newTableDesc := proto.Clone(tableDesc).(*TableDescriptor)

	if err := newTableDesc.AddIndex(indexDesc, false); err != nil {
		return nil, err
	}

	if err := newTableDesc.AllocateIDs(); err != nil {
		return nil, err
	}

	b := client.Batch{}
	if err := p.backfillBatch(&b, n.Table, tableDesc, newTableDesc); err != nil {
		return nil, err
	}

	// TODO(pmattis): This is a hack. Remove when schema change operations work
	// properly.
	p.hackNoteSchemaChange(newTableDesc)

	b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc))

	if err := p.txn.Run(&b); err != nil {
		return nil, convertBatchError(newTableDesc, b, err)
	}

	return &valuesNode{}, nil
}
Ejemplo n.º 20
0
// Clone uses proto.Clone to return a deep copy of pb. It panics if pb
// recursively contains any instances of types which are known to be
// unsupported by proto.Clone.
//
// This function and its associated lint (see `make check`) exist to ensure we
// do not attempt to proto.Clone types which are not supported by proto.Clone.
// This hackery is necessary because proto.Clone gives no direct indication
// that it has incompletely cloned a type; it merely logs to standard output
// (see https://github.com/golang/protobuf/blob/89238a3/proto/clone.go#L204).
//
// The concrete case against which this is currently guarding may be resolved
// upstream, see https://github.com/gogo/protobuf/issues/147.
func Clone(pb proto.Message) proto.Message {
	for _, verbotenKind := range verbotenKinds {
		if typeIsOrContainsVerboten(reflect.TypeOf(pb), verbotenKind) {
			// Try to find an example of the problematic field.
			if v := findVerboten(reflect.ValueOf(pb), verbotenKind); v != nil {
				panic(fmt.Sprintf("attempt to clone %T, which contains uncloneable %T %+v", pb, v, v))
			}
			// If we couldn't find one, panic anyway.
			panic(fmt.Sprintf("attempt to clone %T, which contains uncloneable fields", pb))
		}
	}

	return proto.Clone(pb)
}
Ejemplo n.º 21
0
func (g *Generator) GetMapKeyField(field, keyField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto {
	if !gogoproto.IsCastKey(field) {
		return keyField
	}
	keyField = proto.Clone(keyField).(*descriptor.FieldDescriptorProto)
	if keyField.Options == nil {
		keyField.Options = &descriptor.FieldOptions{}
	}
	keyType := gogoproto.GetCastKey(field)
	if err := proto.SetExtension(keyField.Options, gogoproto.E_Casttype, &keyType); err != nil {
		g.Fail(err.Error())
	}
	return keyField
}
Ejemplo n.º 22
0
// TestTxnResetTxnOnAbort verifies transaction is reset on abort.
func TestTxnResetTxnOnAbort(t *testing.T) {
	defer leaktest.AfterTest(t)
	db := newDB(newTestSender(func(call proto.Call) {
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		call.Reply.Header().SetGoError(&proto.TransactionAbortedError{})
	}))

	txn := NewTxn(*db)
	txn.db.Sender.Send(context.Background(),
		proto.Call{Args: testPutReq, Reply: &proto.PutResponse{}})

	if len(txn.Proto.ID) != 0 {
		t.Errorf("expected txn to be cleared")
	}
}
Ejemplo n.º 23
0
func (ls *Stores) updateBootstrapInfo(bi *gossip.BootstrapInfo) error {
	if bi.Timestamp.Less(ls.biLatestTS) {
		return nil
	}
	// Update the latest timestamp and set cached version.
	ls.biLatestTS = bi.Timestamp
	ls.latestBI = proto.Clone(bi).(*gossip.BootstrapInfo)
	// Update all stores.
	for _, s := range ls.storeMap {
		if err := engine.MVCCPutProto(s.engine, nil, keys.StoreGossipKey(), roachpb.ZeroTimestamp, nil, bi); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 24
0
// DropIndex drops an index.
// Privileges: CREATE on table.
//   Notes: postgres allows only the index owner to DROP an index.
//          mysql requires the INDEX privilege on the table.
func (p *planner) DropIndex(n *parser.DropIndex) (planNode, error) {
	b := client.Batch{}
	for _, indexQualifiedName := range n.Names {
		if err := indexQualifiedName.NormalizeTableName(p.session.Database); err != nil {
			return nil, err
		}

		tableDesc, err := p.getTableDesc(indexQualifiedName)
		if err != nil {
			return nil, err
		}

		if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil {
			return nil, err
		}

		newTableDesc := proto.Clone(tableDesc).(*TableDescriptor)

		idxName := indexQualifiedName.Index()
		i, err := newTableDesc.FindIndexByName(idxName)
		if err != nil {
			if n.IfExists {
				// Noop.
				return &valuesNode{}, nil
			}
			// Index does not exist, but we want it to: error out.
			return nil, err
		}
		newTableDesc.Indexes = append(newTableDesc.Indexes[:i], newTableDesc.Indexes[i+1:]...)

		if err := p.backfillBatch(&b, indexQualifiedName, tableDesc, newTableDesc); err != nil {
			return nil, err
		}

		if err := newTableDesc.Validate(); err != nil {
			return nil, err
		}

		descKey := MakeDescMetadataKey(newTableDesc.GetID())
		b.Put(descKey, wrapDescriptor(newTableDesc))
	}

	if err := p.txn.Run(&b); err != nil {
		return nil, err
	}

	return &valuesNode{}, nil
}
Ejemplo n.º 25
0
// registerOnce returns true if we should attempt another registration later; it is *not*
// guarded by eventLock: all access to mutable members of MesosSchedulerDriver should be
// explicitly synchronized.
func (driver *MesosSchedulerDriver) registerOnce() bool {
	var (
		failover bool
		pid      *upid.UPID
		info     *mesos.FrameworkInfo
	)
	if func() bool {
		driver.eventLock.RLock()
		defer driver.eventLock.RUnlock()

		if driver.stopped() || driver.connected || driver.masterPid == nil || (driver.credential != nil && !driver.authenticated) {
			log.V(1).Infof("skipping registration request: stopped=%v, connected=%v, authenticated=%v",
				driver.stopped(), driver.connected, driver.authenticated)
			return false
		}
		failover = driver.failover
		pid = driver.masterPid
		info = proto.Clone(driver.frameworkInfo).(*mesos.FrameworkInfo)
		return true
	}() {
		// register framework
		var message proto.Message
		if len(info.GetId().GetValue()) > 0 {
			// not the first time, or failing over
			log.V(1).Infof("Reregistering with master: %v", pid)
			message = &mesos.ReregisterFrameworkMessage{
				Framework: info,
				Failover:  proto.Bool(failover),
			}
			fmt.Printf("Reregistering with master: %v\nwith message:\n%v\n", pid, message)
		} else {
			log.V(1).Infof("Registering with master: %v", pid)
			message = &mesos.RegisterFrameworkMessage{
				Framework: info,
			}
			fmt.Printf("Reregistering with master: %v\nwith message:\n%v\n", pid, message)
		}
		if err := driver.send(pid, message); err != nil {
			log.Errorf("failed to send RegisterFramework message: %v", err)
			if _, err = driver.Stop(failover); err != nil {
				log.Errorf("failed to stop scheduler driver: %v", err)
			}
		}
		return true
	}
	return false
}
Ejemplo n.º 26
0
// TestTxnResetTxnOnAbort verifies transaction is reset on abort.
func TestTxnResetTxnOnAbort(t *testing.T) {
	defer leaktest.AfterTest(t)
	db := newDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		return nil, roachpb.NewErrorWithTxn(&roachpb.TransactionAbortedError{},
			proto.Clone(ba.Txn).(*roachpb.Transaction))
	}, nil))

	txn := NewTxn(*db)
	_, pErr := txn.db.sender.Send(context.Background(), testPut())
	if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
		t.Fatalf("expected TransactionAbortedError, got %v", pErr)
	}

	if len(txn.Proto.ID) != 0 {
		t.Errorf("expected txn to be cleared")
	}
}
Ejemplo n.º 27
0
func newTestSender(pre, post func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()

		if pre != nil {
			pre(call)
		}

		var writing bool
		var status proto.TransactionStatus
		switch t := call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
			if t.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil && call.Reply.Header().GoError() == nil {
			txn.Writing = writing
			txn.Status = status
		}

		if post != nil {
			post(call)
		}
	}
}
Ejemplo n.º 28
0
func TestCloneProto(t *testing.T) {
	u := uuid.MakeV4()

	testCases := []struct {
		pb          proto.Message
		shouldPanic bool
	}{
		{&roachpb.StoreIdent{}, false},
		{&roachpb.StoreIdent{ClusterID: uuid.MakeV4()}, true},
		{&enginepb.TxnMeta{}, false},
		{&enginepb.TxnMeta{ID: &u}, true},
		{&roachpb.Transaction{}, false},
		{&config.ZoneConfig{RangeMinBytes: 123, RangeMaxBytes: 456}, false},
	}
	for _, tc := range testCases {
		var clone proto.Message
		var panicObj interface{}
		func() {
			defer func() {
				panicObj = recover()
			}()
			clone = protoutil.Clone(tc.pb)
		}()

		if tc.shouldPanic {
			if panicObj == nil {
				t.Errorf("%T: expected panic but didn't get one", tc.pb)
			}
		} else {
			if panicObj != nil {
				if panicStr := fmt.Sprint(panicObj); !strings.Contains(panicStr, "attempt to clone") {
					t.Errorf("%T: got unexpected panic %s", tc.pb, panicStr)
				}
			}
		}

		if panicObj == nil {
			realClone := proto.Clone(tc.pb)
			if !reflect.DeepEqual(clone, realClone) {
				t.Errorf("%T: clone did not equal original. expected:\n%+v\ngot:\n%+v", tc.pb, realClone, clone)
			}
		}
	}
}
Ejemplo n.º 29
0
// New creates a customized ExecutorInfo for a host
//
// Note: New modifies Command.Arguments and Resources and intentionally
// does not update the executor id (although that originally depended on the
// command arguments and the resources). But as the hostname is constant for a
// given host, and the resources are compatible by the registry logic here this
// will not weaken our litmus test comparing the prototype ExecutorId with the
// id of running executors when an offer comes in.
func (r *registry) New(
	hostname string,
	resources []*mesosproto.Resource,
) *mesosproto.ExecutorInfo {
	e := proto.Clone(r.prototype).(*mesosproto.ExecutorInfo)
	e.Resources = resources
	setCommandArgument(e, "--hostname-override", hostname)

	r.mu.Lock()
	defer r.mu.Unlock()

	cached, ok := r.cache.Get(hostname)
	if ok {
		return cached
	}

	r.cache.Add(hostname, e)
	return e
}
Ejemplo n.º 30
0
func TestUnmarshalling(t *testing.T) {
	for _, tt := range unmarshallingTests {
		// Make a new instance of the type of our expected object.
		p := proto.Clone(tt.pb)
		p.Reset()

		err := UnmarshalString(tt.json, p)
		if err != nil {
			t.Error(err)
			continue
		}

		// For easier diffs, compare text strings of the protos.
		exp := proto.MarshalTextString(tt.pb)
		act := proto.MarshalTextString(p)
		if string(exp) != string(act) {
			t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
		}
	}
}