Esempio n. 1
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)()
	engineStopper := stop.NewStopper()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	defer engineStopper.Stop()
	if _, err := bootstrapCluster([]engine.Engine{e}, kv.NewTxnMetrics(metric.NewRegistry())); err != nil {
		t.Fatal(err)
	}

	// Set the cluster ID to the empty UUID.
	sIdent := roachpb.StoreIdent{
		ClusterID: *uuid.EmptyUUID,
		NodeID:    1,
		StoreID:   1,
	}
	if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), roachpb.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	_, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t)
	stopper.Stop()
	if err := node.start(serverAddr, engines, roachpb.Attributes{}); !testutils.IsError(err, "unidentified store") {
		t.Errorf("unexpected error %v", err)
	}
}
Esempio n. 2
0
// append the given entries to the raft log. Takes the previous value
// of r.lastIndex and returns a new value. We do this rather than
// modifying r.lastIndex directly because this modification needs to
// be atomic with the commit of the batch.
func (r *Replica) append(batch engine.Engine, prevLastIndex uint64, entries []raftpb.Entry) (uint64, error) {
	if len(entries) == 0 {
		return prevLastIndex, nil
	}
	for i := range entries {
		ent := &entries[i]
		key := keys.RaftLogKey(r.RangeID, ent.Index)
		if err := engine.MVCCPutProto(batch, nil, key, roachpb.ZeroTimestamp, nil, ent); err != nil {
			return 0, err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(batch, nil,
			keys.RaftLogKey(r.RangeID, i), roachpb.ZeroTimestamp, nil)
		if err != nil {
			return 0, err
		}
	}

	// Commit the batch and update the last index.
	if err := setLastIndex(batch, r.RangeID, lastIndex); err != nil {
		return 0, err
	}

	return lastIndex, nil
}
// TestRangeLookupWithOpenTransaction verifies that range lookups are
// done in such a way (e.g. using inconsistent reads) that they
// proceed in the event that a write intent is extant at the meta
// index record being read.
func TestRangeLookupWithOpenTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := server.StartTestServer(t)
	defer s.Stop()
	db := createTestClient(t, s.Stopper(), s.ServingAddr())

	// Create an intent on the meta1 record by writing directly to the
	// engine.
	key := testutils.MakeKey(keys.Meta1Prefix, roachpb.KeyMax)
	now := s.Clock().Now()
	txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, roachpb.SERIALIZABLE, now, 0)
	if err := engine.MVCCPutProto(s.Ctx.Engines[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil {
		t.Fatal(err)
	}

	// Now, with an intent pending, attempt (asynchronously) to read
	// from an arbitrary key. This will cause the distributed sender to
	// do a range lookup, which will encounter the intent. We're
	// verifying here that the range lookup doesn't fail with a write
	// intent error. If it did, it would go into a deadloop attempting
	// to push the transaction, which in turn requires another range
	// lookup, etc, ad nauseam.
	if _, err := db.Get("a"); err != nil {
		t.Fatal(err)
	}
}
Esempio n. 4
0
// append the given entries to the raft log. Takes the previous value
// of r.lastIndex and returns a new value. We do this rather than
// modifying r.lastIndex directly because this modification needs to
// be atomic with the commit of the batch.
func (r *Replica) append(batch engine.ReadWriter, prevLastIndex uint64, entries []raftpb.Entry) (uint64, error) {
	if len(entries) == 0 {
		return prevLastIndex, nil
	}
	for i := range entries {
		ent := &entries[i]
		key := keys.RaftLogKey(r.RangeID, ent.Index)
		if err := engine.MVCCPutProto(context.Background(), batch, nil, key, hlc.ZeroTimestamp, nil, ent); err != nil {
			return 0, err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(context.Background(), batch, nil,
			keys.RaftLogKey(r.RangeID, i), hlc.ZeroTimestamp, nil)
		if err != nil {
			return 0, err
		}
	}

	if err := setLastIndex(batch, r.RangeID, lastIndex); err != nil {
		return 0, err
	}

	return lastIndex, nil
}
Esempio n. 5
0
// append the given entries to the raft log.
func (r *Replica) append(batch engine.Engine, entries []raftpb.Entry) error {
	if len(entries) == 0 {
		return nil
	}
	for _, ent := range entries {
		err := engine.MVCCPutProto(batch, nil, keys.RaftLogKey(r.RangeID, ent.Index),
			roachpb.ZeroTimestamp, nil, &ent)
		if err != nil {
			return err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	prevLastIndex := atomic.LoadUint64(&r.lastIndex)
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(batch, nil,
			keys.RaftLogKey(r.RangeID, i), roachpb.ZeroTimestamp, nil)
		if err != nil {
			return err
		}
	}

	// Commit the batch and update the last index.
	if err := setLastIndex(batch, r.RangeID, lastIndex); err != nil {
		return err
	}
	batch.Defer(func() {
		atomic.StoreUint64(&r.lastIndex, lastIndex)
	})

	return nil
}
Esempio n. 6
0
// append the given entries to the raft log. Takes the previous values of
// r.mu.lastIndex and r.mu.raftLogSize, and returns new values. We do this
// rather than modifying them directly because these modifications need to be
// atomic with the commit of the batch.
func (r *Replica) append(batch engine.ReadWriter, prevLastIndex uint64, prevRaftLogSize int64, entries []raftpb.Entry) (uint64, int64, error) {
	if len(entries) == 0 {
		return prevLastIndex, prevRaftLogSize, nil
	}
	var diff enginepb.MVCCStats
	ctx := context.Background()
	for i := range entries {
		ent := &entries[i]
		key := keys.RaftLogKey(r.RangeID, ent.Index)
		if err := engine.MVCCPutProto(ctx, batch, &diff, key, hlc.ZeroTimestamp, nil /* txn */, ent); err != nil {
			return 0, 0, err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(ctx, batch, &diff, keys.RaftLogKey(r.RangeID, i),
			hlc.ZeroTimestamp, nil /* txn */)
		if err != nil {
			return 0, 0, err
		}
	}

	if err := setLastIndex(batch, r.RangeID, lastIndex); err != nil {
		return 0, 0, err
	}

	raftLogSize := prevRaftLogSize + diff.SysBytes

	return lastIndex, raftLogSize, nil
}
Esempio n. 7
0
// InternalTruncateLog discards a prefix of the raft log.
func (r *Range) InternalTruncateLog(batch engine.Engine, ms *engine.MVCCStats, args *proto.InternalTruncateLogRequest, reply *proto.InternalTruncateLogResponse) {
	// args.Index is the first index to keep.
	term, err := r.Term(args.Index - 1)
	if err != nil {
		reply.SetGoError(err)
		return
	}
	start := keys.RaftLogKey(r.Desc().RaftID, 0)
	end := keys.RaftLogKey(r.Desc().RaftID, args.Index)
	err = batch.Iterate(engine.MVCCEncodeKey(start), engine.MVCCEncodeKey(end),
		func(kv proto.RawKeyValue) (bool, error) {
			err := batch.Clear(kv.Key)
			return false, err
		})
	if err != nil {
		reply.SetGoError(err)
		return
	}
	ts := proto.RaftTruncatedState{
		Index: args.Index - 1,
		Term:  term,
	}
	err = engine.MVCCPutProto(batch, ms, keys.RaftTruncatedStateKey(r.Desc().RaftID),
		proto.ZeroTimestamp, nil, &ts)
	reply.SetGoError(err)
}
Esempio n. 8
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)
	engineStopper := stop.NewStopper()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	defer engineStopper.Stop()
	if _, err := bootstrapCluster([]engine.Engine{e}); err != nil {
		t.Fatal(err)
	}

	// Set the cluster ID to an empty string.
	sIdent := roachpb.StoreIdent{
		ClusterID: "",
		NodeID:    1,
		StoreID:   1,
	}
	if err := engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), roachpb.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	server, serverAddr, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	stopper.Stop()
	if err := node.start(server, serverAddr, engines, roachpb.Attributes{}); !testutils.IsError(err, "unidentified store") {
		t.Errorf("unexpected error %v", err)
	}
}
Esempio n. 9
0
// InternalHeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Range) InternalHeartbeatTxn(batch engine.Engine, ms *engine.MVCCStats,
	args *proto.InternalHeartbeatTxnRequest, reply *proto.InternalHeartbeatTxnResponse) {
	key := keys.TransactionKey(args.Txn.Key, args.Txn.ID)

	var txn proto.Transaction
	ok, err := engine.MVCCGetProto(batch, key, proto.ZeroTimestamp, true, nil, &txn)
	if err != nil {
		reply.SetGoError(err)
		return
	}
	// If no existing transaction record was found, initialize
	// to the transaction in the request header.
	if !ok {
		gogoproto.Merge(&txn, args.Txn)
	}
	if txn.Status == proto.PENDING {
		if txn.LastHeartbeat == nil {
			txn.LastHeartbeat = &proto.Timestamp{}
		}
		if txn.LastHeartbeat.Less(args.Header().Timestamp) {
			*txn.LastHeartbeat = args.Header().Timestamp
		}
		if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &txn); err != nil {
			reply.SetGoError(err)
			return
		}
	}
	reply.Txn = &txn
}
Esempio n. 10
0
// SetupRangeTree creates a new RangeTree. This should only be called as part
// of store.BootstrapRange.
func SetupRangeTree(batch engine.Engine, ms *engine.MVCCStats, timestamp roachpb.Timestamp, startKey roachpb.RKey) error {
	tree := &RangeTree{
		RootKey: startKey,
	}
	node := &RangeTreeNode{
		Key:   startKey,
		Black: true,
	}
	if err := engine.MVCCPutProto(batch, ms, keys.RangeTreeRoot, timestamp, nil, tree); err != nil {
		return err
	}
	if err := engine.MVCCPutProto(batch, ms, keys.RangeTreeNodeKey(startKey), timestamp, nil, node); err != nil {
		return err
	}
	return nil
}
Esempio n. 11
0
func setHardState(
	ctx context.Context, batch engine.ReadWriter, rangeID roachpb.RangeID, st raftpb.HardState,
) error {
	return engine.MVCCPutProto(ctx, batch, nil,
		keys.RaftHardStateKey(rangeID),
		hlc.ZeroTimestamp, nil, &st)
}
Esempio n. 12
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)
	eagerStopper := stop.NewStopper()
	e := engine.NewInMem(proto.Attributes{}, 1<<20)
	_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, eagerStopper)
	if err != nil {
		t.Fatal(err)
	}
	eagerStopper.Stop()

	// Set the cluster ID to an empty string.
	sIdent := proto.StoreIdent{
		ClusterID: "",
		NodeID:    1,
		StoreID:   1,
	}
	if err = engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), proto.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	server, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	if err := node.start(server, engines, proto.Attributes{}, stopper); err == nil {
		t.Errorf("unexpected success")
	}
	stopper.Stop()
}
Esempio n. 13
0
func (ls *Stores) readBootstrapInfoLocked(bi *gossip.BootstrapInfo) error {
	latestTS := roachpb.ZeroTimestamp
	timestamps := map[roachpb.StoreID]roachpb.Timestamp{}

	// Find the most recent bootstrap info, collecting timestamps for
	// each store along the way.
	for id, s := range ls.storeMap {
		var storeBI gossip.BootstrapInfo
		ok, err := engine.MVCCGetProto(s.engine, keys.StoreGossipKey(), roachpb.ZeroTimestamp, true, nil, &storeBI)
		if err != nil {
			return err
		}
		timestamps[id] = storeBI.Timestamp
		if ok && latestTS.Less(storeBI.Timestamp) {
			latestTS = storeBI.Timestamp
			*bi = storeBI
		}
	}

	// Update all stores with an earlier timestamp.
	for id, s := range ls.storeMap {
		if timestamps[id].Less(latestTS) {
			if err := engine.MVCCPutProto(s.engine, nil, keys.StoreGossipKey(), roachpb.ZeroTimestamp, nil, bi); err != nil {
				return err
			}
			log.Infof("updated gossip bootstrap info to %s", s)
		}
	}

	ls.biLatestTS = latestTS
	return nil
}
Esempio n. 14
0
// InternalHeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Range) InternalHeartbeatTxn(batch engine.Engine, ms *engine.MVCCStats, args proto.InternalHeartbeatTxnRequest) (proto.InternalHeartbeatTxnResponse, error) {
	var reply proto.InternalHeartbeatTxnResponse

	key := keys.TransactionKey(args.Txn.Key, args.Txn.ID)

	var txn proto.Transaction
	if ok, err := engine.MVCCGetProto(batch, key, proto.ZeroTimestamp, true, nil, &txn); err != nil {
		return reply, err
	} else if !ok {
		// If no existing transaction record was found, initialize to a
		// shallow copy of the transaction in the request header. We copy
		// to avoid mutating the original below.
		txn = *args.Txn
	}

	if txn.Status == proto.PENDING {
		if txn.LastHeartbeat == nil {
			txn.LastHeartbeat = &proto.Timestamp{}
		}
		if txn.LastHeartbeat.Less(args.Header().Timestamp) {
			*txn.LastHeartbeat = args.Header().Timestamp
		}
		if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &txn); err != nil {
			return reply, err
		}
	}

	reply.Txn = &txn
	return reply, nil
}
Esempio n. 15
0
// TestVerifyQueueShouldQueue verifies shouldQueue method correctly
// indicates that a range should be queued for verification if the
// time since last verification exceeds the threshold limit.
func TestVerifyQueueShouldQueue(t *testing.T) {
	defer leaktest.AfterTest(t)
	tc := testContext{}
	tc.Start(t)
	defer tc.Stop()

	// Put empty verification timestamp
	key := keys.RangeLastVerificationTimestampKey(tc.rng.Desc().RangeID)
	if err := engine.MVCCPutProto(tc.rng.rm.Engine(), nil, key, roachpb.ZeroTimestamp, nil, &roachpb.Timestamp{}); err != nil {
		t.Fatal(err)
	}

	testCases := []struct {
		now      roachpb.Timestamp
		shouldQ  bool
		priority float64
	}{
		// No GC'able bytes, no intent bytes, verification interval elapsed.
		{makeTS(verificationInterval.Nanoseconds(), 0), false, 0},
		// No GC'able bytes, no intent bytes, verification interval * 2 elapsed.
		{makeTS(verificationInterval.Nanoseconds()*2, 0), true, 2},
	}

	verifyQ := newVerifyQueue(tc.gossip, nil)

	for i, test := range testCases {
		shouldQ, priority := verifyQ.shouldQueue(test.now, tc.rng, nil /* system config not used */)
		if shouldQ != test.shouldQ {
			t.Errorf("%d: should queue expected %t; got %t", i, test.shouldQ, shouldQ)
		}
		if math.Abs(priority-test.priority) > 0.00001 {
			t.Errorf("%d: priority expected %f; got %f", i, test.priority, priority)
		}
	}
}
Esempio n. 16
0
// Put writes an entry for the specified transaction ID.
func (sc *AbortCache) Put(
	e engine.Engine, ms *engine.MVCCStats, txnID *uuid.UUID, entry *roachpb.AbortCacheEntry) error {
	if txnID == nil {
		return errEmptyTxnID
	}
	key := keys.AbortCacheKey(sc.rangeID, txnID)
	return engine.MVCCPutProto(e, ms, key, roachpb.ZeroTimestamp, nil /* txn */, entry)
}
Esempio n. 17
0
func setTruncatedState(
	eng engine.ReadWriter,
	ms *enginepb.MVCCStats,
	rangeID roachpb.RangeID,
	truncState roachpb.RaftTruncatedState,
) error {
	return engine.MVCCPutProto(context.Background(), eng, ms,
		keys.RaftTruncatedStateKey(rangeID), hlc.ZeroTimestamp, nil, &truncState)
}
Esempio n. 18
0
func setGCThreshold(
	ctx context.Context,
	eng engine.ReadWriter,
	ms *enginepb.MVCCStats,
	rangeID roachpb.RangeID,
	threshold *hlc.Timestamp,
) error {
	return engine.MVCCPutProto(ctx, eng, ms,
		keys.RangeLastGCKey(rangeID), hlc.ZeroTimestamp, nil, threshold)
}
Esempio n. 19
0
// InternalGC iterates through the list of keys to garbage collect
// specified in the arguments. MVCCGarbageCollect is invoked on each
// listed key along with the expiration timestamp. The GC metadata
// specified in the args is persisted after GC.
func (r *Range) InternalGC(batch engine.Engine, ms *engine.MVCCStats, args *proto.InternalGCRequest, reply *proto.InternalGCResponse) {
	// Garbage collect the specified keys by expiration timestamps.
	if err := engine.MVCCGarbageCollect(batch, ms, args.Keys, args.Timestamp); err != nil {
		reply.SetGoError(err)
		return
	}

	// Store the GC metadata for this range.
	key := keys.RangeGCMetadataKey(r.Desc().RaftID)
	err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &args.GCMeta)
	reply.SetGoError(err)
}
Esempio n. 20
0
// WriteBootstrapInfo implements the gossip.Storage interface. Write
// persists the supplied bootstrap info to every known store. Returns
// nil on success; otherwise returns first error encountered writing
// to the stores.
func (ls *Stores) WriteBootstrapInfo(bi *gossip.BootstrapInfo) error {
	ls.mu.RLock()
	defer ls.mu.RUnlock()
	ls.biLatestTS = ls.clock.Now()
	bi.Timestamp = ls.biLatestTS
	for _, s := range ls.storeMap {
		if err := engine.MVCCPutProto(s.engine, nil, keys.StoreGossipKey(), roachpb.ZeroTimestamp, nil, bi); err != nil {
			return err
		}
		log.Infof("wrote gossip bootstrap info to %s", s)
	}
	return nil
}
Esempio n. 21
0
// Put writes an entry for the specified transaction ID.
func (sc *AbortCache) Put(
	ctx context.Context,
	e engine.ReadWriter,
	ms *engine.MVCCStats,
	txnID *uuid.UUID,
	entry *roachpb.AbortCacheEntry,
) error {
	if txnID == nil {
		return errEmptyTxnID
	}
	key := keys.AbortCacheKey(sc.rangeID, txnID)
	return engine.MVCCPutProto(ctx, e, ms, key, roachpb.ZeroTimestamp, nil /* txn */, entry)
}
Esempio n. 22
0
// Put writes a sequence number for the specified transaction ID.
func (sc *SequenceCache) Put(e engine.Engine, id []byte, epoch, seq uint32, txnKey roachpb.Key, txnTS roachpb.Timestamp, pErr *roachpb.Error) error {
	if seq <= 0 || len(id) == 0 {
		return errEmptyTxnID
	}
	if !sc.shouldCacheError(pErr) {
		return nil
	}

	// Write the response value to the engine.
	key := keys.SequenceCacheKey(sc.rangeID, id, epoch, seq)
	sc.scratchEntry = roachpb.SequenceCacheEntry{Key: txnKey, Timestamp: txnTS}
	return engine.MVCCPutProto(e, nil /* ms */, key, roachpb.ZeroTimestamp, nil /* txn */, &sc.scratchEntry)
}
Esempio n. 23
0
func setLease(
	eng engine.ReadWriter,
	ms *enginepb.MVCCStats,
	rangeID roachpb.RangeID,
	lease *roachpb.Lease, // TODO(tschottdorf): better if this is never nil
) error {
	if lease == nil {
		return nil
	}
	return engine.MVCCPutProto(
		context.Background(), eng, ms,
		keys.RangeLeaderLeaseKey(rangeID),
		hlc.ZeroTimestamp, nil, lease)
}
Esempio n. 24
0
// InternalGC iterates through the list of keys to garbage collect
// specified in the arguments. MVCCGarbageCollect is invoked on each
// listed key along with the expiration timestamp. The GC metadata
// specified in the args is persisted after GC.
func (r *Range) InternalGC(batch engine.Engine, ms *engine.MVCCStats, args proto.InternalGCRequest) (proto.InternalGCResponse, error) {
	var reply proto.InternalGCResponse

	// Garbage collect the specified keys by expiration timestamps.
	if err := engine.MVCCGarbageCollect(batch, ms, args.Keys, args.Timestamp); err != nil {
		return reply, err
	}

	// Store the GC metadata for this range.
	key := keys.RangeGCMetadataKey(r.Desc().RaftID)
	if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &args.GCMeta); err != nil {
		return reply, err
	}
	return reply, nil
}
Esempio n. 25
0
func (ls *Stores) updateBootstrapInfo(bi *gossip.BootstrapInfo) error {
	if bi.Timestamp.Less(ls.biLatestTS) {
		return nil
	}
	// Update the latest timestamp and set cached version.
	ls.biLatestTS = bi.Timestamp
	ls.latestBI = protoutil.Clone(bi).(*gossip.BootstrapInfo)
	// Update all stores.
	for _, s := range ls.storeMap {
		if err := engine.MVCCPutProto(context.Background(), s.engine, nil, keys.StoreGossipKey(), hlc.ZeroTimestamp, nil, bi); err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 26
0
// PutResponse writes a response to the cache for the specified cmdID.
func (rc *ResponseCache) PutResponse(e engine.Engine, cmdID proto.ClientCmdID, reply proto.Response) error {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return nil
	}
	// Write the response value to the engine.
	var err error
	if rc.shouldCacheResponse(reply) {
		key := keys.ResponseCacheKey(rc.raftID, &cmdID)
		rwResp := &proto.ReadWriteCmdResponse{}
		if !rwResp.SetValue(reply) {
			log.Fatalf("response %T not supported by response cache", reply)
		}
		err = engine.MVCCPutProto(e, nil, key, proto.ZeroTimestamp, nil, rwResp)
	}

	return err
}
Esempio n. 27
0
// PutResponse writes a response and an error associated with it to the
// cache for the specified cmdID.
func (rc *ResponseCache) PutResponse(e engine.Engine, cmdID roachpb.ClientCmdID, replyWithErr roachpb.ResponseWithError) error {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return nil
	}

	// Write the response value to the engine.
	if rc.shouldCacheResponse(replyWithErr) {
		// Write the error into the reply before caching.
		replyWithErr.Reply.SetGoError(replyWithErr.Err)
		// Be sure to clear it when you're done!
		defer func() { replyWithErr.Reply.BatchResponse_Header.Error = nil }()

		key := keys.ResponseCacheKey(rc.rangeID, &cmdID)
		return engine.MVCCPutProto(e, nil, key, roachpb.ZeroTimestamp, nil, replyWithErr.Reply)
	}

	return nil
}
Esempio n. 28
0
// Put writes a sequence number for the specified transaction ID.
func (sc *SequenceCache) Put(
	e engine.Engine,
	ms *engine.MVCCStats,
	txnID *uuid.UUID,
	epoch, seq uint32,
	txnKey roachpb.Key,
	txnTS roachpb.Timestamp,
	pErr *roachpb.Error,
) error {
	if seq <= 0 || txnID == nil {
		return errEmptyTxnID
	}
	if !sc.shouldCacheError(pErr) {
		return nil
	}

	// Write the response value to the engine.
	key := keys.SequenceCacheKey(sc.rangeID, txnID, epoch, seq)
	sc.scratchEntry = roachpb.SequenceCacheEntry{Key: txnKey, Timestamp: txnTS}
	return engine.MVCCPutProto(e, ms, key, roachpb.ZeroTimestamp, nil /* txn */, &sc.scratchEntry)
}
Esempio n. 29
0
// PutResponse writes a response to the cache for the specified cmdID.
// The inflight entry corresponding to cmdID is removed from the
// inflight map. Any requests waiting on the outcome of the inflight
// command will be signaled to wakeup and read the command response
// from the cache.
func (rc *ResponseCache) PutResponse(cmdID proto.ClientCmdID, reply proto.Response) error {
	// Do nothing if command ID is empty.
	if cmdID.IsEmpty() {
		return nil
	}
	// Write the response value to the engine.
	var err error
	if rc.shouldCacheResponse(reply) {
		key := engine.ResponseCacheKey(rc.raftID, &cmdID)
		rwResp := &proto.ReadWriteCmdResponse{}
		rwResp.SetValue(reply)
		err = engine.MVCCPutProto(rc.engine, nil, key, proto.ZeroTimestamp, nil, rwResp)
	}

	// Take lock after writing response to cache!
	rc.Lock()
	defer rc.Unlock()
	// Even on error, we remove the entry from the inflight map.
	rc.removeInflightLocked(cmdID)

	return err
}
// Append implements the multiraft.WriteableGroupStorage interface.
func (r *Replica) Append(entries []raftpb.Entry) error {
	if len(entries) == 0 {
		return nil
	}
	batch := r.rm.Engine().NewBatch()
	defer batch.Close()

	rangeID := r.Desc().RangeID

	for _, ent := range entries {
		err := engine.MVCCPutProto(batch, nil, keys.RaftLogKey(rangeID, ent.Index),
			proto.ZeroTimestamp, nil, &ent)
		if err != nil {
			return err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	prevLastIndex := atomic.LoadUint64(&r.lastIndex)
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(batch, nil,
			keys.RaftLogKey(rangeID, i), proto.ZeroTimestamp, nil)
		if err != nil {
			return err
		}
	}

	// Commit the batch and update the last index.
	if err := setLastIndex(batch, rangeID, lastIndex); err != nil {
		return err
	}
	if err := batch.Commit(); err != nil {
		return err
	}

	atomic.StoreUint64(&r.lastIndex, lastIndex)
	return nil
}