// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	initialTxn := client.NewTxn(*s.DB)
	if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}

	// Verify 3 heartbeats.
	var heartbeatTS roachpb.Timestamp
	for i := 0; i < 3; i++ {
		util.SucceedsSoon(t, func() error {
			ok, txn, pErr := getTxn(s.Sender, &initialTxn.Proto)
			if !ok || pErr != nil {
				t.Fatalf("got txn: %t: %s", ok, pErr)
			}
			// Advance clock by 1ns.
			// Locking the TxnCoordSender to prevent a data race.
			s.Sender.Lock()
			s.Manual.Increment(1)
			s.Sender.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return nil
			}
			return util.Errorf("expected heartbeat")
		})
	}
}
Exemple #2
0
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func processSequenceCache(snap engine.Engine, rangeID roachpb.RangeID, now, cutoff roachpb.Timestamp, prevTxns map[uuid.UUID]*roachpb.Transaction, infoMu *lockableGCInfo, pushTxn pushFunc) []roachpb.GCRequest_GCKey {
	txns := make(map[uuid.UUID]*roachpb.Transaction)
	idToKeys := make(map[uuid.UUID][]roachpb.GCRequest_GCKey)
	seqCache := NewSequenceCache(rangeID)
	infoMu.Lock()
	seqCache.Iterate(snap, func(key []byte, txnIDPtr *uuid.UUID, v roachpb.SequenceCacheEntry) {
		txnID := *txnIDPtr
		// If we've pushed this Txn previously, attempt cleanup (in case the
		// push was successful). Initiate new pushes only for newly discovered
		// "old" entries.
		infoMu.SequenceSpanTotal++
		if prevTxn, ok := prevTxns[txnID]; ok && prevTxn.Status != roachpb.PENDING {
			txns[txnID] = prevTxn
			idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
		} else if !cutoff.Less(v.Timestamp) {
			infoMu.SequenceSpanConsidered++
			txns[txnID] = &roachpb.Transaction{
				TxnMeta: roachpb.TxnMeta{ID: txnIDPtr, Key: v.Key},
				Status:  roachpb.PENDING,
			}
			idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
		}
	})
	infoMu.Unlock()

	var wg sync.WaitGroup
	// TODO(tschottdorf): use stopper.LimitedAsyncTask.
	wg.Add(len(txns))
	for _, txn := range txns {
		if txn.Status != roachpb.PENDING {
			wg.Done()
			continue
		}
		// Check if the Txn is still alive. If this indicates that the Txn is
		// aborted and old enough to guarantee that any running coordinator
		// would have realized that the transaction wasn't running by means
		// of a heartbeat, then we're free to remove the sequence cache entry.
		// In the most likely case, there isn't even an entry (which will
		// be apparent by a zero timestamp and nil last heartbeat).
		go pushTxn(now, txn, roachpb.PUSH_TOUCH, &wg)
	}
	wg.Wait()

	var gcKeys []roachpb.GCRequest_GCKey
	for txnID, txn := range txns {
		if txn.Status == roachpb.PENDING {
			continue
		}
		ts := txn.Timestamp
		if txn.LastHeartbeat != nil {
			ts.Forward(*txn.LastHeartbeat)
		}
		if !cutoff.Less(ts) {
			// This is it, we can delete our sequence cache entries.
			gcKeys = append(gcKeys, idToKeys[txnID]...)
			infoMu.SequenceSpanGCNum++
		}
	}
	return gcKeys
}
Exemple #3
0
// UpdateDeadlineMaybe sets the transactions deadline to the lower of the
// current one (if any) and the passed value.
func (txn *Txn) UpdateDeadlineMaybe(deadline roachpb.Timestamp) bool {
	if txn.deadline == nil || deadline.Less(*txn.deadline) {
		txn.deadline = &deadline
		return true
	}
	return false
}
// Add the specified timestamp to the cache as covering the range of
// keys from start to end. If end is nil, the range covers the start
// key only. txnID is nil for no transaction. readOnly specifies
// whether the command adding this timestamp was read-only or not.
func (tc *TimestampCache) Add(start, end roachpb.Key, timestamp roachpb.Timestamp, txnID []byte, readOnly bool) {
	// This gives us a memory-efficient end key if end is empty.
	if len(end) == 0 {
		end = start.Next()
		start = end[:len(start)]
	}
	if tc.latest.Less(timestamp) {
		tc.latest = timestamp
	}
	// Only add to the cache if the timestamp is more recent than the
	// low water mark.
	if tc.lowWater.Less(timestamp) {
		// Check existing, overlapping entries. Remove superseded
		// entries or return without adding this entry if necessary.
		key := tc.cache.NewKey(start, end)
		for _, o := range tc.cache.GetOverlaps(start, end) {
			ce := o.Value.(cacheEntry)
			if ce.readOnly != readOnly {
				continue
			}
			if o.Key.Contains(key) && !ce.timestamp.Less(timestamp) {
				return // don't add this key; there's already a cache entry with >= timestamp.
			} else if key.Contains(o.Key) && !timestamp.Less(ce.timestamp) {
				tc.cache.Del(o.Key) // delete existing key; this cache entry supersedes.
			}
		}
		ce := cacheEntry{timestamp: timestamp, txnID: txnID, readOnly: readOnly}
		tc.cache.Add(key, ce)
	}
}
Exemple #5
0
func TestBatchBuilderStress(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := NewInMem(roachpb.Attributes{}, 1<<20, stopper)

	rng, _ := randutil.NewPseudoRand()

	for i := 0; i < 1000; i++ {
		count := 1 + rng.Intn(1000)

		func() {
			batch := e.NewBatch().(*rocksDBBatch)
			defer batch.Close()

			builder := &rocksDBBatchBuilder{}

			for j := 0; j < count; j++ {
				var ts roachpb.Timestamp
				if rng.Float32() <= 0.9 {
					// Give 90% of keys timestamps.
					ts.WallTime = rng.Int63()
					if rng.Float32() <= 0.1 {
						// Give 10% of timestamps a non-zero logical component.
						ts.Logical = rng.Int31()
					}
				}
				key := MVCCKey{
					Key:       []byte(fmt.Sprintf("%d", rng.Intn(10000))),
					Timestamp: ts,
				}
				// Generate a random mixture of puts, deletes and merges.
				switch rng.Intn(3) {
				case 0:
					if err := dbPut(batch.batch, key, []byte("value")); err != nil {
						t.Fatal(err)
					}
					builder.Put(key, []byte("value"))
				case 1:
					if err := dbClear(batch.batch, key); err != nil {
						t.Fatal(err)
					}
					builder.Clear(key)
				case 2:
					if err := dbMerge(batch.batch, key, appender("bar")); err != nil {
						t.Fatal(err)
					}
					builder.Merge(key, appender("bar"))
				}
			}

			batchRepr := batch.Repr()
			builderRepr := builder.Finish()
			if !bytes.Equal(batchRepr, builderRepr) {
				t.Fatalf("expected [% x], but got [% x]", batchRepr, builderRepr)
			}
		}()
	}
}
Exemple #6
0
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func (gcq *gcQueue) processSequenceCache(r *Replica, now, cutoff roachpb.Timestamp, prevTxns map[uuid.UUID]*roachpb.Transaction) []roachpb.GCRequest_GCKey {
	snap := r.store.Engine().NewSnapshot()
	defer snap.Close()

	txns := make(map[uuid.UUID]*roachpb.Transaction)
	idToKeys := make(map[uuid.UUID][]roachpb.GCRequest_GCKey)
	r.sequence.Iterate(snap, func(key []byte, txnIDPtr *uuid.UUID, v roachpb.SequenceCacheEntry) {
		txnID := *txnIDPtr
		// If we've pushed this Txn previously, attempt cleanup (in case the
		// push was successful). Initiate new pushes only for newly discovered
		// "old" entries.
		if prevTxn, ok := prevTxns[txnID]; ok && prevTxn.Status != roachpb.PENDING {
			txns[txnID] = prevTxn
			idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
		} else if !cutoff.Less(v.Timestamp) {
			txns[txnID] = &roachpb.Transaction{
				TxnMeta: roachpb.TxnMeta{ID: txnIDPtr, Key: v.Key},
				Status:  roachpb.PENDING,
			}
			idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
		}
	})

	var wg sync.WaitGroup
	wg.Add(len(txns))
	// TODO(tschottdorf): a lot of these transactions will be on our local range,
	// so we should simply read those from a snapshot, and only push those which
	// are PENDING.
	for _, txn := range txns {
		// Check if the Txn is still alive. If this indicates that the Txn is
		// aborted and old enough to guarantee that any running coordinator
		// would have realized that the transaction wasn't running by means
		// of a heartbeat, then we're free to remove the sequence cache entry.
		// In the most likely case, there isn't even an entry (which will
		// be apparent by a zero timestamp and nil last heartbeat).
		go gcq.pushTxn(r, now, txn, roachpb.PUSH_TOUCH, &wg)
	}
	wg.Wait()

	var gcKeys []roachpb.GCRequest_GCKey
	for txnID, txn := range txns {
		if txn.Status == roachpb.PENDING {
			continue
		}
		ts := txn.Timestamp
		if txn.LastHeartbeat != nil {
			ts.Forward(*txn.LastHeartbeat)
		}
		if !cutoff.Less(ts) {
			// This is it, we can delete our sequence cache entries.
			gcKeys = append(gcKeys, idToKeys[txnID]...)
		}
	}
	return gcKeys
}
// shouldQueue determines whether a replica should be queued for GC,
// and if so at what priority. To be considered for possible GC, a
// replica's leader lease must not have been active for longer than
// ReplicaGCQueueInactivityThreshold. Further, the last replica GC
// check must have occurred more than ReplicaGCQueueInactivityThreshold
// in the past.
func (*replicaGCQueue) shouldQueue(now roachpb.Timestamp, rng *Replica, _ config.SystemConfig) (bool, float64) {
	lastCheck, err := rng.getLastReplicaGCTimestamp()
	if err != nil {
		log.Errorf("could not read last replica GC timestamp: %s", err)
		return false, 0
	}
	// Return false immediately if the previous check was less than the
	// check interval in the past.
	if now.Less(lastCheck.Add(ReplicaGCQueueInactivityThreshold.Nanoseconds(), 0)) {
		return false, 0
	}
	// Return whether or not lease activity occurred within the inactivity threshold.
	return rng.getLeaderLease().Expiration.Add(ReplicaGCQueueInactivityThreshold.Nanoseconds(), 0).Less(now), 0
}
Exemple #8
0
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func processSequenceCache(r *Replica, now, cutoff roachpb.Timestamp, prevTxns map[string]*roachpb.Transaction) []roachpb.GCRequest_GCKey {
	snap := r.store.Engine().NewSnapshot()
	defer snap.Close()

	txns := make(map[string]*roachpb.Transaction)
	idToKeys := make(map[string][]roachpb.GCRequest_GCKey)
	r.sequence.Iterate(snap, func(key, id []byte, v roachpb.SequenceCacheEntry) {
		idStr := string(id)
		// If we've pushed this Txn previously, attempt cleanup (in case the
		// push was successful). Initiate new pushes only for newly discovered
		// "old" entries.
		if prevTxn, ok := prevTxns[idStr]; ok && prevTxn.Status != roachpb.PENDING {
			txns[idStr] = prevTxn
			idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
		} else if !cutoff.Less(v.Timestamp) {
			txns[idStr] = &roachpb.Transaction{ID: id, Key: v.Key, Status: roachpb.PENDING}
			idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
		}
	})

	var wg sync.WaitGroup
	wg.Add(len(txns))
	for _, txn := range txns {
		// Check if the Txn is still alive. If this indicates that the Txn is
		// aborted and old enough to guarantee that any running coordinator
		// would have realized that the transaction wasn't running by means
		// of a heartbeat, then we're free to remove the sequence cache entry.
		// In the most likely case, there isn't even an entry (which will
		// be apparent by a zero timestamp and nil last heartbeat).
		go pushTxn(r, now, txn, roachpb.CLEANUP_TXN, &wg)
	}
	wg.Wait()

	var gcKeys []roachpb.GCRequest_GCKey
	for idStr, txn := range txns {
		if txn.Status == roachpb.PENDING {
			continue
		}
		ts := txn.Timestamp
		if txn.LastHeartbeat != nil {
			ts.Forward(*txn.LastHeartbeat)
		}
		if !cutoff.Less(ts) {
			// This is it, we can delete our sequence cache entries.
			gcKeys = append(gcKeys, idToKeys[idStr]...)
		}
	}
	return gcKeys
}
Exemple #9
0
// TestClock performs a complete test of all basic phenomena,
// including backward jumps in local physical time and clock offset.
func TestClock(t *testing.T) {
	m := NewManualClock(0)
	c := NewClock(m.UnixNano)
	c.SetMaxOffset(1000)
	expectedHistory := []struct {
		// The physical time that this event should take place at.
		wallClock int64
		event     Event
		// If this is a receive event, this holds the "input" timestamp.
		input *roachpb.Timestamp
		// The expected timestamp generated from the input.
		expected roachpb.Timestamp
	}{
		// A few valid steps to warm up.
		{5, SEND, nil, roachpb.Timestamp{WallTime: 5, Logical: 0}},
		{6, SEND, nil, roachpb.Timestamp{WallTime: 6, Logical: 0}},
		{10, RECV, &roachpb.Timestamp{WallTime: 10, Logical: 5}, roachpb.Timestamp{WallTime: 10, Logical: 6}},
		// Our clock mysteriously jumps back.
		{7, SEND, nil, roachpb.Timestamp{WallTime: 10, Logical: 7}},
		// Wall clocks coincide, but the local logical clock wins.
		{8, RECV, &roachpb.Timestamp{WallTime: 10, Logical: 4}, roachpb.Timestamp{WallTime: 10, Logical: 8}},
		// Wall clocks coincide, but the remote logical clock wins.
		{10, RECV, &roachpb.Timestamp{WallTime: 10, Logical: 99}, roachpb.Timestamp{WallTime: 10, Logical: 100}},
		// The physical clock has caught up and takes over.
		{11, RECV, &roachpb.Timestamp{WallTime: 10, Logical: 31}, roachpb.Timestamp{WallTime: 11, Logical: 0}},
		{11, SEND, nil, roachpb.Timestamp{WallTime: 11, Logical: 1}},
	}

	var current roachpb.Timestamp
	for i, step := range expectedHistory {
		m.Set(step.wallClock)
		switch step.event {
		case SEND:
			current = c.Now()
		case RECV:
			fallthrough
		default:
			previous := c.Timestamp()
			current = c.Update(*step.input)
			if current.Equal(previous) {
				t.Errorf("%d: clock not updated", i)
			}
		}
		if !current.Equal(step.expected) {
			t.Fatalf("HLC error: %d expected %v, got %v", i, step.expected, current)
		}
	}
	c.Now()
}
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	initialTxn := newTxn(s.Clock, roachpb.Key("a"))
	put, h := createPutRequest(roachpb.Key("a"), []byte("value"), initialTxn)
	if reply, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil {
		t.Fatal(err)
	} else {
		*initialTxn = *reply.Header().Txn
	}

	// Verify 3 heartbeats.
	var heartbeatTS roachpb.Timestamp
	for i := 0; i < 3; i++ {
		if err := util.IsTrueWithin(func() bool {
			ok, txn, err := getTxn(s.Sender, initialTxn)
			if !ok || err != nil {
				return false
			}
			// Advance clock by 1ns.
			// Locking the TxnCoordSender to prevent a data race.
			s.Sender.Lock()
			s.Manual.Increment(1)
			s.Sender.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return true
			}
			return false
		}, 50*time.Millisecond); err != nil {
			t.Error("expected initial heartbeat within 50ms")
		}
	}
}
Exemple #11
0
// ExampleNewClock shows how to create a new
// hybrid logical clock based on the local machine's
// physical clock. The sanity checks in this example
// will, of course, not fail and the output will be
// the age of the Unix epoch in nanoseconds.
func ExampleNewClock() {
	// Initialize a new clock, using the local
	// physical clock.
	c := NewClock(UnixNano)
	// Update the state of the hybrid clock.
	s := c.Now()
	time.Sleep(50 * time.Nanosecond)
	t := roachpb.Timestamp{WallTime: UnixNano()}
	// The sanity checks below will usually never be triggered.

	if s.Less(t) || !t.Less(s) {
		log.Fatalf("The later timestamp is smaller than the earlier one")
	}

	if t.WallTime-s.WallTime > 0 {
		log.Fatalf("HLC timestamp %d deviates from physical clock %d", s, t)
	}

	if s.Logical > 0 {
		log.Fatalf("Trivial timestamp has logical component")
	}

	fmt.Printf("The Unix Epoch is now approximately %dns old.\n", t.WallTime)
}
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sender := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(sender)

	// Set heartbeat interval to 1ms for testing.
	sender.heartbeatInterval = 1 * time.Millisecond

	initialTxn := client.NewTxn(context.Background(), *s.DB)
	if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}

	// Verify 3 heartbeats.
	var heartbeatTS roachpb.Timestamp
	for i := 0; i < 3; i++ {
		util.SucceedsSoon(t, func() error {
			txn, pErr := getTxn(sender, &initialTxn.Proto)
			if pErr != nil {
				t.Fatal(pErr)
			}
			// Advance clock by 1ns.
			// Locking the TxnCoordSender to prevent a data race.
			sender.Lock()
			s.Manual.Increment(1)
			sender.Unlock()
			if txn.LastHeartbeat != nil && heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return nil
			}
			return util.Errorf("expected heartbeat")
		})
	}

	// Sneakily send an ABORT right to DistSender (bypassing TxnCoordSender).
	{
		var ba roachpb.BatchRequest
		ba.Add(&roachpb.EndTransactionRequest{
			Commit: false,
			Span:   roachpb.Span{Key: initialTxn.Proto.Key},
		})
		ba.Txn = &initialTxn.Proto
		if _, pErr := sender.wrapped.Send(context.Background(), ba); pErr != nil {
			t.Fatal(pErr)
		}
	}

	util.SucceedsSoon(t, func() error {
		sender.Lock()
		defer sender.Unlock()
		if txnMeta, ok := sender.txns[*initialTxn.Proto.ID]; !ok {
			t.Fatal("transaction unregistered prematurely")
		} else if txnMeta.txn.Status != roachpb.ABORTED {
			return fmt.Errorf("transaction is not aborted")
		}
		return nil
	})

	// Trying to do something else should give us a TransactionAbortedError.
	_, err := initialTxn.Get("a")
	assertTransactionAbortedError(t, err)
}
// Add the specified timestamp to the cache as covering the range of
// keys from start to end. If end is nil, the range covers the start
// key only. txnID is nil for no transaction. readTSCache specifies
// whether the command adding this timestamp should update the read
// timestamp; false to update the write timestamp cache.
func (tc *TimestampCache) Add(start, end roachpb.Key, timestamp roachpb.Timestamp, txnID *uuid.UUID, readTSCache bool) {
	// This gives us a memory-efficient end key if end is empty.
	if len(end) == 0 {
		end = start.Next()
		start = end[:len(start)]
	}
	if tc.latest.Less(timestamp) {
		tc.latest = timestamp
	}
	// Only add to the cache if the timestamp is more recent than the
	// low water mark.
	if tc.lowWater.Less(timestamp) {
		cache := tc.wCache
		if readTSCache {
			cache = tc.rCache
		}

		addRange := func(r interval.Range) {
			value := cacheValue{timestamp: timestamp, txnID: txnID}
			key := cache.MakeKey(r.Start, r.End)
			entry := makeCacheEntry(key, value)
			cache.AddEntry(entry)
		}
		r := interval.Range{
			Start: interval.Comparable(start),
			End:   interval.Comparable(end),
		}

		// Check existing, overlapping entries and truncate/split/remove if
		// superseded and in the past. If existing entries are in the future,
		// subtract from the range/ranges that need to be added to cache.
		for _, o := range cache.GetOverlaps(r.Start, r.End) {
			cv := o.Value.(*cacheValue)
			sCmp := r.Start.Compare(o.Key.Start)
			eCmp := r.End.Compare(o.Key.End)
			if !timestamp.Less(cv.timestamp) {
				// The existing interval has a timestamp less than or equal to the new interval.
				// Compare interval ranges to determine how to modify existing interval.
				switch {
				case sCmp == 0 && eCmp == 0:
					// New and old are equal; replace old with new and avoid the need to insert new.
					//
					// New: ------------
					// Old: ------------
					//
					// New: ------------
					*cv = cacheValue{timestamp: timestamp, txnID: txnID}
					cache.MoveToEnd(o.Entry)
					return
				case sCmp <= 0 && eCmp >= 0:
					// New contains or is equal to old; delete old.
					//
					// New: ------------      ------------      ------------
					// Old:   --------    or    ----------  or  ----------
					//
					// Old:
					cache.DelEntry(o.Entry)
				case sCmp > 0 && eCmp < 0:
					// Old contains new; split up old into two.
					//
					// New:     ----
					// Old: ------------
					//
					// Old: ----    ----
					oldEnd := o.Key.End
					o.Key.End = r.Start

					key := cache.MakeKey(r.End, oldEnd)
					entry := makeCacheEntry(key, *cv)
					cache.AddEntryAfter(entry, o.Entry)
				case eCmp >= 0:
					// Left partial overlap; truncate old end.
					//
					// New:     --------          --------
					// Old: --------      or  ------------
					//
					// Old: ----              ----
					o.Key.End = r.Start
				case sCmp <= 0:
					// Right partial overlap; truncate old start.
					//
					// New: --------          --------
					// Old:     --------  or  ------------
					//
					// Old:         ----              ----
					o.Key.Start = r.End
				default:
					panic(fmt.Sprintf("no overlap between %v and %v", o.Key.Range, r))
				}
			} else {
				// The existing interval has a timestamp greater than the new interval.
				// Compare interval ranges to determine how to modify new interval before
				// adding it to the timestamp cache.
				switch {
				case sCmp >= 0 && eCmp <= 0:
					// Old contains or is equal to new; no need to add.
					//
					// Old: -----------      -----------      -----------      -----------
					// New:    -----     or  -----------  or  --------     or     --------
					//
					// New:
					return
				case sCmp < 0 && eCmp > 0:
					// New contains old; split up old into two. We can add the left piece
					// immediately because it is guaranteed to be before the rest of the
					// overlaps.
					//
					// Old:    ------
					// New: ------------
					//
					// New: ---      ---
					lr := interval.Range{Start: r.Start, End: o.Key.Start}
					addRange(lr)

					r.Start = o.Key.End
				case eCmp > 0:
					// Left partial overlap; truncate new start.
					//
					// Old: --------          --------
					// New:     --------  or  ------------
					//
					// New:         ----              ----
					r.Start = o.Key.End
				case sCmp < 0:
					// Right partial overlap; truncate new end.
					//
					// Old:     --------          --------
					// New: --------      or  ------------
					//
					// New: ----              ----
					r.End = o.Key.Start
				default:
					panic(fmt.Sprintf("no overlap between %v and %v", o.Key.Range, r))
				}
			}
		}
		addRange(r)
	}
}
Exemple #14
0
// selectEventTimestamp selects a timestamp for this log message. If the
// transaction this event is being written in has a non-zero timestamp, then that
// timestamp should be used; otherwise, the store's physical clock is used.
// This helps with testing; in normal usage, the logging of an event will never
// be the first action in the transaction, and thus the transaction will have an
// assigned database timestamp. However, in the case of our tests log events
// *are* the first action in a transaction, and we must elect to use the store's
// physical time instead.
func (ev EventLogger) selectEventTimestamp(input roachpb.Timestamp) time.Time {
	if input == roachpb.ZeroTimestamp {
		return ev.LeaseManager.clock.PhysicalTime()
	}
	return input.GoTime()
}
Exemple #15
0
// selectEventTimestamp selects a timestamp for this log message. If the
// transaction this event is being written in has a non-zero timestamp, then that
// timestamp should be used; otherwise, the store's physical clock is used.
// This helps with testing; in normal usage, the logging of an event will never
// be the first action in the transaction, and thus the transaction will have an
// assigned database timestamp. However, in the case of our tests log events
// *are* the first action in a transaction, and we must elect to use the store's
// physical time instead.
func selectEventTimestamp(s *Store, input roachpb.Timestamp) time.Time {
	if input == roachpb.ZeroTimestamp {
		return s.Clock().PhysicalTime()
	}
	return input.GoTime()
}
Exemple #16
0
// SetStmtTimestamp sets the corresponding timestamp in the EvalContext.
func (ctx *EvalContext) SetStmtTimestamp(ts roachpb.Timestamp) {
	ctx.stmtTimestamp.Time = ts.GoTime()
}