Ejemplo n.º 1
0
func newTestSender(handler func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()
		var writing bool
		switch call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil {
			txn.Writing = writing
		}

		if handler != nil {
			handler(call)
		}
	}
}
Ejemplo n.º 2
0
// AddCmd adds a command for execution on this range. The command's
// affected keys are verified to be contained within the range and the
// range's leadership is confirmed. The command is then dispatched
// either along the read-only execution path or the read-write Raft
// command queue.
func (r *Range) AddCmd(ctx context.Context, call proto.Call) error {
	args := call.Args
	// TODO(tschottdorf) Some (internal) requests go here directly, so they
	// won't be traced.
	trace := tracer.FromCtx(ctx)
	// Differentiate between admin, read-only and read-write.
	var reply proto.Response
	var err error
	if proto.IsAdmin(args) {
		defer trace.Epoch("admin path")()
		reply, err = r.addAdminCmd(ctx, args)
	} else if proto.IsReadOnly(args) {
		defer trace.Epoch("read-only path")()
		reply, err = r.addReadOnlyCmd(ctx, args)
	} else if proto.IsWrite(args) {
		defer trace.Epoch("read-write path")()
		reply, err = r.addWriteCmd(ctx, args, nil)
	} else {
		panic(fmt.Sprintf("don't know how to handle command %T", args))
	}

	if reply != nil {
		gogoproto.Merge(call.Reply, reply)
	}

	if err != nil {
		replyHeader := call.Reply.Header()
		if replyHeader.Error != nil {
			panic("the world is on fire")
		}
		replyHeader.SetGoError(err)
	}

	return err
}
Ejemplo n.º 3
0
// InternalHeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Range) InternalHeartbeatTxn(batch engine.Engine, ms *engine.MVCCStats,
	args *proto.InternalHeartbeatTxnRequest, reply *proto.InternalHeartbeatTxnResponse) {
	key := keys.TransactionKey(args.Txn.Key, args.Txn.ID)

	var txn proto.Transaction
	ok, err := engine.MVCCGetProto(batch, key, proto.ZeroTimestamp, true, nil, &txn)
	if err != nil {
		reply.SetGoError(err)
		return
	}
	// If no existing transaction record was found, initialize
	// to the transaction in the request header.
	if !ok {
		gogoproto.Merge(&txn, args.Txn)
	}
	if txn.Status == proto.PENDING {
		if txn.LastHeartbeat == nil {
			txn.LastHeartbeat = &proto.Timestamp{}
		}
		if txn.LastHeartbeat.Less(args.Header().Timestamp) {
			*txn.LastHeartbeat = args.Header().Timestamp
		}
		if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &txn); err != nil {
			reply.SetGoError(err)
			return
		}
	}
	reply.Txn = &txn
}
Ejemplo n.º 4
0
// SendCallConverted is a wrapped to go from the (ctx,call) interface to the
// one used by batch.Sender.
// TODO(tschottdorf): remove when new proto.Call is gone.
func SendCallConverted(sender BatchSender, ctx context.Context, call proto.Call) {
	call, unwrap := MaybeWrapCall(call)
	defer unwrap(call)

	{
		br := *call.Args.(*proto.BatchRequest)
		if len(br.Requests) == 0 {
			panic(br)
		}
		br.Key, br.EndKey = keys.Range(br)
		if bytes.Equal(br.Key, proto.KeyMax) {
			panic(br)
		}
	}

	reply, err := sender.SendBatch(ctx, *call.Args.(*proto.BatchRequest))

	if reply != nil {
		call.Reply.Reset() // required for BatchRequest (concats response otherwise)
		gogoproto.Merge(call.Reply, reply)
	}
	if call.Reply.Header().GoError() != nil {
		panic(proto.ErrorUnexpectedlySet)
	}
	if err != nil {
		call.Reply.Header().SetGoError(err)
	}
}
Ejemplo n.º 5
0
func (db *testSender) sendOne(call proto.Call) {
	switch call.Args.(type) {
	case *proto.EndTransactionRequest:
		safeSetGoError(call.Reply, util.Errorf("%s method not supported", call.Method()))
		return
	}
	// Lookup range and direct request.
	header := call.Args.Header()
	if rng := db.store.LookupRange(header.Key, header.EndKey); rng != nil {
		header.RangeID = rng.Desc().RangeID
		replica := rng.GetReplica()
		if replica == nil {
			safeSetGoError(call.Reply, util.Errorf("own replica missing in range"))
		}
		header.Replica = *replica
		reply, err := db.store.ExecuteCmd(context.Background(), call.Args)
		if reply != nil {
			gogoproto.Merge(call.Reply, reply)
		}
		if call.Reply.Header().Error != nil {
			panic(proto.ErrorUnexpectedlySet)
		}
		if err != nil {
			call.Reply.Header().SetGoError(err)
		}
	} else {
		safeSetGoError(call.Reply, proto.NewRangeKeyMismatchError(header.Key, header.EndKey, nil))
	}
}
Ejemplo n.º 6
0
// send runs the specified calls synchronously in a single batch and
// returns any errors.
func (db *DB) send(calls ...proto.Call) (pErr *proto.Error) {
	if len(calls) == 0 {
		return nil
	}

	if len(calls) == 1 {
		c := calls[0]
		// We only send BatchRequest. Everything else needs to go into one.
		if _, ok := calls[0].Args.(*proto.BatchRequest); ok {
			if c.Args.Header().UserPriority == nil && db.userPriority != 0 {
				c.Args.Header().UserPriority = gogoproto.Int32(db.userPriority)
			}
			resetClientCmdID(c.Args)
			_ = SendCall(db.sender, c)
			pErr = c.Reply.Header().Error
			if pErr != nil {
				if log.V(1) {
					log.Infof("failed %s: %s", c.Method(), pErr)
				}
			} else if c.Post != nil {
				pErr = proto.NewError(c.Post())
			}
			return pErr
		}
	}

	ba, br := &proto.BatchRequest{}, &proto.BatchResponse{}
	for _, call := range calls {
		ba.Add(call.Args)
	}
	pErr = db.send(proto.Call{Args: ba, Reply: br})

	// Recover from protobuf merge panics.
	defer func() {
		if r := recover(); r != nil {
			// Take care to log merge error and to return it if no error has
			// already been set.
			mergeErr := util.Errorf("unable to merge response: %s", r)
			log.Error(mergeErr)
			if pErr == nil {
				pErr = proto.NewError(mergeErr)
			}
		}
	}()

	// Transfer individual responses from batch response to prepared replies.
	for i, reply := range br.Responses {
		c := calls[i]
		gogoproto.Merge(c.Reply, reply.GetInner())
		if c.Post != nil {
			if e := c.Post(); e != nil && pErr != nil {
				pErr = proto.NewError(e)
			}
		}
	}
	return
}
Ejemplo n.º 7
0
func TestMerge(t *testing.T) {
	for _, m := range mergeTests {
		got := proto.Clone(m.dst)
		proto.Merge(got, m.src)
		if !proto.Equal(got, m.want) {
			t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
		}
	}
}
Ejemplo n.º 8
0
// send runs the specified calls synchronously in a single batch and
// returns any errors.
func (db *DB) send(calls ...proto.Call) (err error) {
	if len(calls) == 0 {
		return nil
	}

	if len(calls) == 1 {
		c := calls[0]
		if c.Args.Header().UserPriority == nil && db.userPriority != 0 {
			c.Args.Header().UserPriority = gogoproto.Int32(db.userPriority)
		}
		resetClientCmdID(c.Args)
		db.sender.Send(context.TODO(), c)
		err = c.Reply.Header().GoError()
		if err != nil {
			if log.V(1) {
				log.Infof("failed %s: %s", c.Method(), err)
			}
		} else if c.Post != nil {
			err = c.Post()
		}
		return
	}

	bArgs, bReply := &proto.BatchRequest{}, &proto.BatchResponse{}
	for _, call := range calls {
		bArgs.Add(call.Args)
	}
	err = db.send(proto.Call{Args: bArgs, Reply: bReply})

	// Recover from protobuf merge panics.
	defer func() {
		if r := recover(); r != nil {
			// Take care to log merge error and to return it if no error has
			// already been set.
			mergeErr := util.Errorf("unable to merge response: %s", r)
			log.Error(mergeErr)
			if err == nil {
				err = mergeErr
			}
		}
	}()

	// Transfer individual responses from batch response to prepared replies.
	for i, reply := range bReply.Responses {
		c := calls[i]
		gogoproto.Merge(c.Reply, reply.GetValue().(gogoproto.Message))
		if c.Post != nil {
			if e := c.Post(); e != nil && err != nil {
				err = e
			}
		}
	}
	return
}
Ejemplo n.º 9
0
// Send implements the client.Sender interface. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each store's LookupRange method.
func (ls *LocalSender) Send(ctx context.Context, call proto.Call) {
	var err error
	var store *storage.Store

	trace := tracer.FromCtx(ctx)

	// If we aren't given a Replica, then a little bending over
	// backwards here. This case applies exclusively to unittests.
	header := call.Args.Header()
	if header.RaftID == 0 || header.Replica.StoreID == 0 {
		var repl *proto.Replica
		var raftID proto.RaftID
		raftID, repl, err = ls.lookupReplica(header.Key, header.EndKey)
		if err == nil {
			header.RaftID = raftID
			header.Replica = *repl
		}
	}
	ctx = log.Add(ctx,
		log.Method, call.Method(),
		log.Key, header.Key,
		log.RaftID, header.RaftID)

	if err == nil {
		store, err = ls.GetStore(header.Replica.StoreID)
	}
	var reply proto.Response
	if err == nil {
		// For calls that read data within a txn, we can avoid uncertainty
		// related retries in certain situations. If the node is in
		// "CertainNodes", we need not worry about uncertain reads any
		// more. Setting MaxTimestamp=Timestamp for the operation
		// accomplishes that. See proto.Transaction.CertainNodes for details.
		if header.Txn != nil && header.Txn.CertainNodes.Contains(header.Replica.NodeID) {
			// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
			trace.Event("read has no clock uncertainty")
			header.Txn.MaxTimestamp = header.Txn.Timestamp
		}
		reply, err = store.ExecuteCmd(ctx, call.Args)
	}
	if reply != nil {
		gogoproto.Merge(call.Reply, reply)
	}
	if call.Reply.Header().Error != nil {
		panic(proto.ErrorUnexpectedlySet)
	}
	if err != nil {
		call.Reply.Header().SetGoError(err)
	}
}
Ejemplo n.º 10
0
// GetResponse looks up a response matching the specified cmdID and
// returns true if found. The response is deserialized into the
// supplied reply parameter. If no response is found, returns
// false. If a command is pending already for the cmdID, then this
// method will block until the the command is completed or the
// response cache is cleared.
func (rc *ResponseCache) GetResponse(e engine.Engine, cmdID proto.ClientCmdID, reply proto.Response) (bool, error) {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return false, nil
	}

	// Pull response from the cache and read into reply if available.
	rwResp := proto.ReadWriteCmdResponse{}
	key := keys.ResponseCacheKey(rc.raftID, &cmdID)
	ok, err := engine.MVCCGetProto(e, key, proto.ZeroTimestamp, true, nil, &rwResp)
	if ok && err == nil {
		gogoproto.Merge(reply, rwResp.GetValue().(gogoproto.Message))
	}
	return ok, err
}
Ejemplo n.º 11
0
// GetResponse looks up a response matching the specified cmdID and
// returns true if found. The response is deserialized into the
// supplied reply parameter. If no response is found, returns
// false. If a command is pending already for the cmdID, then this
// method will block until the the command is completed or the
// response cache is cleared.
func (rc *ResponseCache) GetResponse(cmdID proto.ClientCmdID, reply proto.Response) (bool, error) {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return false, nil
	}

	// If the response is in the cache or we experienced an error, return.
	rwResp := proto.ReadWriteCmdResponse{}
	key := keys.ResponseCacheKey(rc.raftID, &cmdID)
	ok, err := engine.MVCCGetProto(rc.engine, key, proto.ZeroTimestamp, true, nil, &rwResp)
	if ok && err == nil {
		gogoproto.Merge(reply, rwResp.GetValue().(gogoproto.Message))
	}
	return ok, err
}
Ejemplo n.º 12
0
// MaybeWrapCall returns a new call which wraps the original Args and Reply
// in a batch, if necessary.
// TODO(tschottdorf): will go when proto.Call does.
func MaybeWrapCall(call proto.Call) (proto.Call, func(proto.Call) proto.Call) {
	var unwrap func(proto.Response) proto.Response
	call.Args, unwrap = MaybeWrap(call.Args)
	newUnwrap := func(origReply proto.Response) func(proto.Call) proto.Call {
		return func(newCall proto.Call) proto.Call {
			origReply.Reset()
			gogoproto.Merge(origReply, unwrap(newCall.Reply))
			*origReply.Header() = *newCall.Reply.Header()
			newCall.Reply = origReply
			return newCall
		}
	}(call.Reply)
	call.Reply = call.Args.CreateReply()
	return call, newUnwrap
}
Ejemplo n.º 13
0
func newTestSender(pre, post func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()

		if pre != nil {
			pre(call)
		}

		var writing bool
		var status proto.TransactionStatus
		switch t := call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
			if t.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil && call.Reply.Header().GoError() == nil {
			txn.Writing = writing
			txn.Status = status
		}

		if post != nil {
			post(call)
		}
	}
}
Ejemplo n.º 14
0
// Flush sends all previously prepared calls, buffered by invocations
// of Prepare(). The calls are organized into a single batch command
// and sent together. Flush returns nil if all prepared calls are
// executed successfully. Otherwise, Flush returns the first error,
// where calls are executed in the order in which they were prepared.
// After Flush returns, all prepared reply structs will be valid.
func (kv *KV) Flush() (err error) {
	if len(kv.prepared) == 0 {
		return
	} else if len(kv.prepared) == 1 {
		call := kv.prepared[0]
		kv.prepared = []*Call{}
		err = kv.Call(call.Method, call.Args, call.Reply)
		return
	}
	replies := make([]proto.Response, 0, len(kv.prepared))
	bArgs, bReply := &proto.BatchRequest{}, &proto.BatchResponse{}
	for _, call := range kv.prepared {
		bArgs.Add(call.Args)
		replies = append(replies, call.Reply)
	}
	kv.prepared = []*Call{}
	err = kv.Call(proto.Batch, bArgs, bReply)

	// Recover from protobuf merge panics.
	defer func() {
		if r := recover(); r != nil {
			// Take care to log merge error and to return it if no error has
			// already been set.
			mergeErr := util.Errorf("unable to merge response: %s", r)
			log.Error(mergeErr)
			if err == nil {
				err = mergeErr
			}
		}
	}()

	// Transfer individual responses from batch response to prepared replies.
	for i, reply := range bReply.Responses {
		replies[i].Reset()
		gogoproto.Merge(replies[i], reply.GetValue().(gogoproto.Message))
	}
	return
}
Ejemplo n.º 15
0
// GetResponse looks up a response matching the specified cmdID and
// returns true if found. The response is deserialized into the
// supplied reply parameter. If no response is found, returns
// false. If a command is pending already for the cmdID, then this
// method will block until the the command is completed or the
// response cache is cleared.
func (rc *ResponseCache) GetResponse(cmdID proto.ClientCmdID, reply proto.Response) (bool, error) {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return false, nil
	}
	// If the command is inflight, wait for it to complete.
	rc.Lock()
	for {
		if cond, ok := rc.inflight[makeCmdIDKey(cmdID)]; ok {
			log.Infof("waiting on cmdID: %s", &cmdID)
			cond.Wait()
		} else {
			break
		}
	}
	// Adding inflight here is preemptive; we don't want to hold lock
	// while fetching from the on-disk cache. The vast, vast majority of
	// calls to GetResponse will be cache misses, so this saves us
	// from acquiring the lock twice: once here and once below in the
	// event we experience a cache miss.
	rc.addInflightLocked(cmdID)
	rc.Unlock()

	// If the response is in the cache or we experienced an error, return.
	rwResp := proto.ReadWriteCmdResponse{}
	key := engine.ResponseCacheKey(rc.raftID, &cmdID)
	if ok, err := engine.MVCCGetProto(rc.engine, key, proto.ZeroTimestamp, nil, &rwResp); ok || err != nil {
		rc.Lock() // Take lock after fetching response from cache.
		defer rc.Unlock()
		rc.removeInflightLocked(cmdID)
		if err == nil && rwResp.GetValue() != nil {
			gogoproto.Merge(reply.(gogoproto.Message), rwResp.GetValue().(gogoproto.Message))
		}
		return ok, err
	}
	// There's no command result cached for this ID; but inflight was added above.
	return false, nil
}
Ejemplo n.º 16
0
func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
	// We may see multiple instances of the same extension in the wire
	// format. For example, the proto compiler may encode custom options in
	// this way. Here, we verify that we merge the extensions together.
	tests := []struct {
		name string
		ext  []*pb.ComplexExtension
	}{
		{
			"two fields",
			[]*pb.ComplexExtension{
				{First: proto.Int32(7)},
				{Second: proto.Int32(11)},
			},
		},
		{
			"repeated field",
			[]*pb.ComplexExtension{
				{Third: []int32{1000}},
				{Third: []int32{2000}},
			},
		},
		{
			"two fields and repeated field",
			[]*pb.ComplexExtension{
				{Third: []int32{1000}},
				{First: proto.Int32(9)},
				{Second: proto.Int32(21)},
				{Third: []int32{2000}},
			},
		},
	}
	for _, test := range tests {
		var buf bytes.Buffer
		var want pb.ComplexExtension

		// Generate a serialized representation of a repeated extension
		// by catenating bytes together.
		for i, e := range test.ext {
			// Merge to create the wanted proto.
			proto.Merge(&want, e)

			// serialize the message
			msg := new(pb.OtherMessage)
			err := proto.SetExtension(msg, pb.E_Complex, e)
			if err != nil {
				t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
			}
			b, err := proto.Marshal(msg)
			if err != nil {
				t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
			}
			buf.Write(b)
		}

		// Unmarshal and read the merged proto.
		msg2 := new(pb.OtherMessage)
		err := proto.Unmarshal(buf.Bytes(), msg2)
		if err != nil {
			t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
		}
		e, err := proto.GetExtension(msg2, pb.E_Complex)
		if err != nil {
			t.Fatalf("[%s] Error getting extension: %v", test.name, err)
		}
		ext := e.(*pb.ComplexExtension)
		if ext == nil {
			t.Fatalf("[%s] Invalid extension", test.name)
		}
		if !reflect.DeepEqual(*ext, want) {
			t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, want)
		}
	}
}