Ejemplo n.º 1
0
func (ts *txnSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
	// Send call through wrapped sender.
	ba.Txn = &ts.Proto
	br, pErr := ts.wrapped.Send(ctx, ba)
	if br != nil && br.Error != nil {
		panic(proto.ErrorUnexpectedlySet(ts.wrapped, br))
	}

	// TODO(tschottdorf): see about using only the top-level *proto.Error
	// information for this restart logic (includes adding the Txn).
	err := pErr.GoError()
	// Only successful requests can carry an updated Txn in their response
	// header. Any error (e.g. a restart) can have a Txn attached to them as
	// well; those update our local state in the same way for the next attempt.
	// The exception is if our transaction was aborted and needs to restart
	// from scratch, in which case we do just that.
	if err == nil {
		ts.Proto.Update(br.Txn)
		return br, nil
	} else if abrtErr, ok := err.(*proto.TransactionAbortedError); ok {
		// On Abort, reset the transaction so we start anew on restart.
		ts.Proto = proto.Transaction{
			Name:      ts.Proto.Name,
			Isolation: ts.Proto.Isolation,
			// Acts as a minimum priority on restart.
			Priority: abrtErr.Transaction().GetPriority(),
		}
	} else if txnErr, ok := err.(proto.TransactionRestartError); ok {
		ts.Proto.Update(txnErr.Transaction())
	}
	return nil, pErr
}
Ejemplo n.º 2
0
// sendCallConverted is a wrapped to go from the (ctx,call) interface to the
// one used by batch.Sender.
// TODO(tschottdorf): remove when new proto.Call is gone.
func sendCallConverted(sender Sender, ctx context.Context, call proto.Call) {
	call, unwrap := MaybeWrapCall(call)
	defer unwrap(call)

	{
		br := *call.Args.(*proto.BatchRequest)
		if len(br.Requests) == 0 {
			panic(br)
		}
		br.Key, br.EndKey = keys.Range(br)
		if bytes.Equal(br.Key, proto.KeyMax) {
			panic(br)
		}
	}

	reply, pErr := sender.Send(ctx, *call.Args.(*proto.BatchRequest))

	if reply != nil {
		call.Reply.Reset() // required for BatchRequest (concats response otherwise)
		gogoproto.Merge(call.Reply, reply)
	}
	if call.Reply.Header().GoError() != nil {
		panic(proto.ErrorUnexpectedlySet(sender, call.Reply))
	}
	if pErr != nil {
		call.Reply.Header().Error = pErr
	}
}
Ejemplo n.º 3
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeDesc := &proto.NodeDescriptor{NodeID: 1}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)

	ltc.localSender = NewLocalSender()
	var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
		_ *rpc.Context) ([]gogoproto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
		if br == nil {
			br = &proto.BatchResponse{}
		}
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
		}
		br.Error = pErr
		return []gogoproto.Message{br}, nil
	}
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &defaultRPCRetryOptions,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,         // defined above
		RangeDescriptorDB:        ltc.localSender, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.localSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
Ejemplo n.º 4
0
func (ss *notifyingSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
	br, pErr := ss.wrapped.Send(ctx, ba)
	if br != nil && br.Error != nil {
		panic(proto.ErrorUnexpectedlySet(ss.wrapped, br))
	}
	if ss.waiter != nil {
		ss.waiter.Done()
	}
	return br, pErr
}
Ejemplo n.º 5
0
// shouldCacheResponse returns whether the response should be cached.
// Responses with write-too-old, write-intent and not leader errors
// are retried on the server, and so are not recorded in the response
// cache in the hopes of retrying to a successful outcome.
func (rc *ResponseCache) shouldCacheResponse(replyWithErr proto.ResponseWithError) bool {
	if err := replyWithErr.Reply.Header().Error; err != nil {
		panic(proto.ErrorUnexpectedlySet(rc, replyWithErr.Reply))
	}

	switch replyWithErr.Err.(type) {
	case *proto.WriteTooOldError, *proto.WriteIntentError, *proto.NotLeaderError:
		return false
	}
	return true
}
Ejemplo n.º 6
0
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *LocalSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
	trace := tracer.FromCtx(ctx)
	var store *storage.Store
	var err error

	// If we aren't given a Replica, then a little bending over
	// backwards here. This case applies exclusively to unittests.
	if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
		var repl *proto.Replica
		var rangeID proto.RangeID
		rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
		if err == nil {
			ba.RangeID = rangeID
			ba.Replica = *repl
		}
	}

	ctx = log.Add(ctx,
		log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
		log.Key, ba.Key,
		log.RangeID, ba.RangeID)

	if err == nil {
		store, err = ls.GetStore(ba.Replica.StoreID)
	}

	var br *proto.BatchResponse
	if err != nil {
		return nil, proto.NewError(err)
	}
	// For calls that read data within a txn, we can avoid uncertainty
	// related retries in certain situations. If the node is in
	// "CertainNodes", we need not worry about uncertain reads any
	// more. Setting MaxTimestamp=Timestamp for the operation
	// accomplishes that. See proto.Transaction.CertainNodes for details.
	if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
		// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
		trace.Event("read has no clock uncertainty")
		ba.Txn.MaxTimestamp = ba.Txn.Timestamp
	}
	// TODO(tschottdorf): &ba -> ba
	tmpR, pErr := store.ExecuteCmd(ctx, &ba)
	// TODO(tschottdorf): remove this dance once BatchResponse is returned.
	if tmpR != nil {
		br = tmpR.(*proto.BatchResponse)
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(store, br))
		}
	}
	return br, pErr
}
Ejemplo n.º 7
0
// executeCmd interprets the given message as a *proto.BatchRequest and sends it
// via the local sender.
func (s *DBServer) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) {
	ba := argsI.(*proto.BatchRequest)
	if err := verifyRequest(ba); err != nil {
		return nil, err
	}
	br, pErr := s.sender.Send(context.TODO(), *ba)
	if pErr != nil {
		br = &proto.BatchResponse{}
	}
	if br.Error != nil {
		panic(proto.ErrorUnexpectedlySet(s.sender, br))
	}
	br.Error = pErr
	return br, nil
}
Ejemplo n.º 8
0
// executeCmd creates a proto.Call struct and sends it via our local sender.
func (s *DBServer) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) {
	args := argsI.(proto.Request)
	ba, unwrap := client.MaybeWrap(args)
	if err := verifyRequest(args); err != nil {
		return nil, err
	}
	br, pErr := s.sender.Send(context.TODO(), *ba)
	if pErr != nil {
		br = &proto.BatchResponse{}
	}
	if br.Error != nil {
		panic(proto.ErrorUnexpectedlySet(s.sender, br))
	}
	br.Error = pErr
	return unwrap(br), nil
}
Ejemplo n.º 9
0
// executeCmd interprets the given message as a *proto.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) {
	ba := argsI.(*proto.BatchRequest)
	// TODO(tschottdorf) get a hold of the client's ID, add it to the
	// context before dispatching, and create an ID for tracing the request.
	ba.CmdID = ba.GetOrCreateCmdID(n.ctx.Clock.PhysicalNow())
	trace := n.ctx.Tracer.NewTrace(ba)
	defer trace.Finalize()
	defer trace.Epoch("node")()
	ctx := tracer.ToCtx((*Node)(n).context(), trace)

	br, pErr := n.lSender.Send(ctx, *ba)
	if pErr != nil {
		br = &proto.BatchResponse{}
		trace.Event(fmt.Sprintf("error: %T", pErr.GoError()))
	}
	if br.Error != nil {
		panic(proto.ErrorUnexpectedlySet(n.lSender, br))
	}
	n.feed.CallComplete(*ba, pErr)
	br.Error = pErr
	return br, nil
}