Beispiel #1
0
// NewReadQueue returns a new read queue.
func NewReadQueue() *ReadQueue {
	rq := &ReadQueue{
		cache: util.NewIntervalCache(util.CacheConfig{Policy: util.CacheNone}),
	}
	rq.cache.OnEvicted = rq.onEvicted
	return rq
}
// NewCommandQueue returns a new command queue.
func NewCommandQueue() *CommandQueue {
	cq := &CommandQueue{
		cache: util.NewIntervalCache(util.CacheConfig{Policy: util.CacheNone}),
	}
	cq.cache.OnEvicted = cq.onEvicted
	return cq
}
Beispiel #3
0
// NewTimestampCache returns a new timestamp cache with supplied
// hybrid clock.
func NewTimestampCache(clock *hlc.Clock) *TimestampCache {
	tc := &TimestampCache{
		cache: util.NewIntervalCache(util.CacheConfig{Policy: util.CacheFIFO}),
	}
	tc.Clear(clock)
	tc.cache.CacheConfig.ShouldEvict = tc.shouldEvict
	return tc
}
// NewReadTimestampCache returns a new read timestamp cache with
// supplied hybrid clock.
func NewReadTimestampCache(clock *hlc.HLClock) *ReadTimestampCache {
	rtc := &ReadTimestampCache{
		cache: util.NewIntervalCache(util.CacheConfig{Policy: util.CacheFIFO}),
		clock: clock,
	}
	rtc.Clear()
	rtc.cache.CacheConfig.ShouldEvict = rtc.shouldEvict
	return rtc
}
// sendOne sends a single call via the wrapped sender. If the call is
// part of a transaction, the TxnCoordSender adds the transaction to a
// map of active transactions and begins heartbeating it. Every
// subsequent call for the same transaction updates the lastUpdateTS
// to prevent live transactions from being considered abandoned and
// garbage collected. Read/write mutating requests have their key or
// key range added to the transaction's interval tree of key ranges
// for eventual cleanup via resolved write intents.
//
// On success, and if the call is part of a transaction, the affected
// key range is recorded as live intents for eventual cleanup upon
// transaction commit. Upon successful txn commit, initiates cleanup
// of intents.
func (tc *TxnCoordSender) sendOne(call *client.Call) {
	var startNS int64
	header := call.Args.Header()
	// If this call is part of a transaction...
	if header.Txn != nil {
		// Set the timestamp to the original timestamp for read-only
		// commands and to the transaction timestamp for read/write
		// commands.
		if proto.IsReadOnly(call.Method) {
			header.Timestamp = header.Txn.OrigTimestamp
		} else {
			header.Timestamp = header.Txn.Timestamp
		}
		// End transaction must have its key set to the txn ID.
		if call.Method == proto.EndTransaction {
			header.Key = header.Txn.Key
			// Remember when EndTransaction started in case we want to
			// be linearizable.
			startNS = tc.clock.PhysicalNow()
		}
	}

	// Send the command through wrapped sender.
	tc.wrapped.Send(call)

	if header.Txn != nil {
		// If not already set, copy the request txn.
		if call.Reply.Header().Txn == nil {
			call.Reply.Header().Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
		}
		tc.updateResponseTxn(header, call.Reply.Header())
	}

	// If successful, we're in a transaction, and the command leaves
	// transactional intents, add the key or key range to the intents map.
	// If the transaction metadata doesn't yet exist, create it.
	if call.Reply.Header().GoError() == nil && header.Txn != nil && proto.IsTransactional(call.Method) {
		tc.Lock()
		var ok bool
		var txnMeta *txnMetadata
		if txnMeta, ok = tc.txns[string(header.Txn.ID)]; !ok {
			txnMeta = &txnMetadata{
				txn:             *header.Txn,
				keys:            util.NewIntervalCache(util.CacheConfig{Policy: util.CacheNone}),
				lastUpdateTS:    tc.clock.Now(),
				timeoutDuration: tc.clientTimeout,
				closer:          make(chan struct{}),
			}
			tc.txns[string(header.Txn.ID)] = txnMeta

			// TODO(jiajia): Reevaluate this logic of creating a goroutine
			// for each active transaction. Spencer suggests a heap
			// containing next heartbeat timeouts which is processed by a
			// single goroutine.
			go tc.heartbeat(header.Txn, txnMeta.closer)
		}
		txnMeta.lastUpdateTS = tc.clock.Now()
		txnMeta.addKeyRange(header.Key, header.EndKey)
		tc.Unlock()
	}

	// Cleanup intents and transaction map if end of transaction.
	switch t := call.Reply.Header().GoError().(type) {
	case *proto.TransactionAbortedError:
		// If already aborted, cleanup the txn on this TxnCoordSender.
		tc.cleanupTxn(&t.Txn)
	case *proto.OpRequiresTxnError:
		// Run a one-off transaction with that single command.
		log.Infof("%s: auto-wrapping in txn and re-executing", call.Method)
		txnOpts := &client.TransactionOptions{
			Name: "auto-wrap",
		}
		// Must not call Close() on this KV - that would call
		// tc.Close().
		tmpKV := client.NewKV(tc, nil)
		tmpKV.User = call.Args.Header().User
		tmpKV.UserPriority = call.Args.Header().GetUserPriority()
		call.Reply.Reset()
		tmpKV.RunTransaction(txnOpts, func(txn *client.KV) error {
			return txn.Call(call.Method, call.Args, call.Reply)
		})
	case nil:
		var txn *proto.Transaction
		if call.Method == proto.EndTransaction {
			txn = call.Reply.Header().Txn
			// If the -linearizable flag is set, we want to make sure that
			// all the clocks in the system are past the commit timestamp
			// of the transaction. This is guaranteed if either
			// - the commit timestamp is MaxOffset behind startNS
			// - MaxOffset ns were spent in this function
			// when returning to the client. Below we choose the option
			// that involves less waiting, which is likely the first one
			// unless a transaction commits with an odd timestamp.
			if tsNS := txn.Timestamp.WallTime; startNS > tsNS {
				startNS = tsNS
			}
			sleepNS := tc.clock.MaxOffset() -
				time.Duration(tc.clock.PhysicalNow()-startNS)
			if tc.linearizable && sleepNS > 0 {
				defer func() {
					log.V(1).Infof("%v: waiting %dms on EndTransaction for linearizability", txn.ID, sleepNS/1000000)
					time.Sleep(sleepNS)
				}()
			}
		}
		if txn != nil && txn.Status != proto.PENDING {
			tc.cleanupTxn(txn)
		}
	}
}