예제 #1
0
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
		return ba.CreateReply().(*proto.BatchResponse), nil
	}), clock, false, nil, stopper)

	// Stop the stopper manually, prior to trying the transaction. This has the
	// effect of returning a NodeUnavailableError for any attempts at launching
	// a heartbeat goroutine.
	stopper.Stop()

	var ba proto.BatchRequest
	put := &proto.PutRequest{}
	put.Key = proto.Key("test")
	ba.Add(put)
	ba.Add(&proto.EndTransactionRequest{})
	ba.Txn = &proto.Transaction{Name: "test"}
	_, pErr := ts.Send(context.Background(), ba)
	if pErr != nil {
		t.Fatal(pErr)
	}
}
예제 #2
0
// rangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) rangeLookup(key proto.Key, options lookupOptions,
	desc *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) {
	ba := proto.BatchRequest{}
	ba.ReadConsistency = proto.INCONSISTENT
	ba.Add(&proto.RangeLookupRequest{
		RequestHeader: proto.RequestHeader{
			Key:             key,
			ReadConsistency: proto.INCONSISTENT,
		},
		MaxRanges:       ds.rangeLookupMaxRanges,
		ConsiderIntents: options.considerIntents,
		Reverse:         options.useReverseScan,
	})
	replicas := newReplicaSlice(ds.gossip, desc)
	// TODO(tschottdorf) consider a Trace here, potentially that of the request
	// that had the cache miss and waits for the result.
	reply, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, &ba)
	if err != nil {
		return nil, err
	}
	br := reply.(*proto.BatchResponse)
	if err := br.GoError(); err != nil {
		return nil, err
	}
	return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil
}
예제 #3
0
파일: db.go 프로젝트: kumarh1982/cockroach
// send runs the specified calls synchronously in a single batch and
// returns any errors.
func (db *DB) send(reqs ...proto.Request) (*proto.BatchResponse, *proto.Error) {
	if len(reqs) == 0 {
		return &proto.BatchResponse{}, nil
	}

	if len(reqs) == 1 {
		// We only send BatchRequest. Everything else needs to go into one.
		if ba, ok := reqs[0].(*proto.BatchRequest); ok {
			if ba.UserPriority == nil && db.userPriority != 0 {
				ba.UserPriority = gogoproto.Int32(db.userPriority)
			}
			resetClientCmdID(ba)
			br, pErr := db.sender.Send(context.TODO(), *ba)
			if pErr != nil {
				if log.V(1) {
					log.Infof("failed %s: %s", ba.Method(), pErr)
				}
				return nil, pErr
			}
			return br, nil
		}
	}

	ba := proto.BatchRequest{}
	ba.Add(reqs...)

	br, pErr := db.send(&ba)

	if pErr != nil {
		return nil, pErr
	}
	return br, nil
}
예제 #4
0
func testPut() proto.BatchRequest {
	var ba proto.BatchRequest
	ba.Timestamp = testTS
	put := &proto.PutRequest{}
	put.Key = testKey
	ba.Add(put)
	return ba
}
예제 #5
0
func (tc *TxnCoordSender) heartbeat(id string, trace *tracer.Trace, ctx context.Context) bool {
	tc.Lock()
	proceed := true
	txnMeta := tc.txns[id]
	// Before we send a heartbeat, determine whether this transaction
	// should be considered abandoned. If so, exit heartbeat.
	if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
		// TODO(tschottdorf): should we be more proactive here?
		// The client might be continuing the transaction
		// through another coordinator, but in the most likely
		// case it's just gone and the open transaction record
		// could block concurrent operations.
		if log.V(1) {
			log.Infof("transaction %s abandoned; stopping heartbeat",
				txnMeta.txn)
		}
		proceed = false
	}
	// txnMeta.txn is possibly replaced concurrently,
	// so grab a copy before unlocking.
	txn := txnMeta.txn
	tc.Unlock()
	if !proceed {
		return false
	}

	hb := &proto.HeartbeatTxnRequest{}
	hb.Key = txn.Key
	ba := proto.BatchRequest{}
	ba.Timestamp = tc.clock.Now()
	ba.Key = txn.Key
	ba.Txn = &txn
	ba.Add(hb)

	epochEnds := trace.Epoch("heartbeat")
	_, err := tc.wrapped.Send(ctx, ba)
	epochEnds()
	// If the transaction is not in pending state, then we can stop
	// the heartbeat. It's either aborted or committed, and we resolve
	// write intents accordingly.
	if err != nil {
		log.Warningf("heartbeat to %s failed: %s", txn, err)
	}
	// TODO(bdarnell): once we have gotten a heartbeat response with
	// Status != PENDING, future heartbeats are useless. However, we
	// need to continue the heartbeatLoop until the client either
	// commits or abandons the transaction. We could save a little
	// pointless work by restructuring this loop to stop sending
	// heartbeats between the time that the transaction is aborted and
	// the client finds out. Furthermore, we could use this information
	// to send TransactionAbortedErrors to the client so it can restart
	// immediately instead of running until its EndTransaction.
	return true
}
예제 #6
0
// rangeLookup implements the rangeDescriptorDB interface. It looks up
// the descriptors for the given (meta) key.
func (ls *LocalSender) rangeLookup(key proto.Key, options lookupOptions, _ *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) {
	ba := proto.BatchRequest{}
	ba.ReadConsistency = proto.INCONSISTENT
	ba.Add(&proto.RangeLookupRequest{
		RequestHeader: proto.RequestHeader{
			Key:             key,
			ReadConsistency: proto.INCONSISTENT,
		},
		MaxRanges:       1,
		ConsiderIntents: options.considerIntents,
		Reverse:         options.useReverseScan,
	})
	br, pErr := ls.Send(context.Background(), ba)
	if pErr != nil {
		return nil, pErr.GoError()
	}
	return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil
}
예제 #7
0
// TestBatchPrevNext tests batch.{Prev,Next}.
func TestBatchPrevNext(t *testing.T) {
	defer leaktest.AfterTest(t)
	span := func(strs ...string) []keys.Span {
		var r []keys.Span
		for i, str := range strs {
			if i%2 == 0 {
				r = append(r, keys.Span{Start: proto.Key(str)})
			} else {
				r[len(r)-1].End = proto.Key(str)
			}
		}
		return r
	}
	max, min := string(proto.KeyMax), string(proto.KeyMin)
	abc := span("a", "", "b", "", "c", "")
	testCases := []struct {
		spans             []keys.Span
		key, expFW, expBW string
	}{
		{spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"},
		{spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"},
		{spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"},
		{spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"},
		{spans: abc, key: "b", expFW: "b", expBW: "b"},
		{spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"},
		{spans: abc, key: "bb", expFW: "c", expBW: "b"},
		{spans: span(), key: "whatevs", expFW: max, expBW: min},
	}

	for i, test := range testCases {
		var ba proto.BatchRequest
		for _, span := range test.spans {
			args := &proto.ScanRequest{}
			args.Key, args.EndKey = span.Start, span.End
			ba.Add(args)
		}
		if next := next(ba, proto.Key(test.key)); !bytes.Equal(next, proto.Key(test.expFW)) {
			t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next)
		}
		if prev := prev(ba, proto.Key(test.key)); !bytes.Equal(prev, proto.Key(test.expBW)) {
			t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev)
		}
	}
}
예제 #8
0
func wrap(args proto.Request) proto.BatchRequest {
	var ba proto.BatchRequest
	ba.Add(args)
	return ba
}