예제 #1
0
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
	// Load the system config.
	cfg := bq.gossip.GetSystemConfig()
	if cfg == nil {
		bq.eventLog.Infof(log.V(1), "no system config available. skipping")
		return nil
	}

	desc := repl.Desc()
	if !bq.impl.acceptsUnsplitRanges() && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
		// Range needs to be split due to zone configs, but queue does
		// not accept unsplit ranges.
		bq.eventLog.Infof(log.V(3), "%s: split needed; skipping", repl)
		return nil
	}

	// If the queue requires a replica to have the range leader lease in
	// order to be processed, check whether this replica has leader lease
	// and renew or acquire if necessary.
	if bq.impl.needsLeaderLease() {
		// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
		if err := repl.redirectOnOrAcquireLeaderLease(tracing.NilSpan()); err != nil {
			bq.eventLog.Infof(log.V(3), "%s: could not acquire leader lease; skipping", repl)
			return nil
		}
	}

	bq.eventLog.Infof(log.V(3), "%s: processing", repl)
	start := time.Now()
	if err := bq.impl.process(clock.Now(), repl, cfg); err != nil {
		return err
	}
	bq.eventLog.Infof(log.V(2), "%s: done: %s", repl, time.Since(start))
	return nil
}
예제 #2
0
func runCommandAndExpireLease(t *testing.T, clock *hlc.Clock, sqlDB *gosql.DB, sql string) {
	// Run a transaction that lets its table lease expire.
	txn, err := sqlDB.Begin()
	if err != nil {
		t.Fatal(err)
	}
	// Use snapshot isolation so that the transaction is pushed without being
	// restarted.
	if _, err := txn.Exec("SET TRANSACTION ISOLATION LEVEL SNAPSHOT"); err != nil {
		t.Fatal(err)
	}
	if _, err := txn.Exec(sql); err != nil {
		t.Fatal(err)
	}

	// Update the clock to expire the table lease.
	_ = clock.Update(clock.Now().Add(int64(2*csql.LeaseDuration), 0))

	// Run another transaction that pushes the above transaction.
	if _, err := sqlDB.Query("SELECT * FROM t.kv"); err != nil {
		t.Fatal(err)
	}

	// Commit and see the aborted txn.
	if err := txn.Commit(); !testutils.IsError(err, "pq: restart transaction: txn aborted") {
		t.Fatalf("%s, err = %s", sql, err)
	}
}
예제 #3
0
파일: queue.go 프로젝트: nkhuyu/cockroach
func (bq *baseQueue) processOne(clock *hlc.Clock) {
	start := time.Now()
	bq.Lock()
	repl := bq.pop()
	bq.Unlock()
	if repl != nil {
		now := clock.Now()
		if log.V(1) {
			log.Infof("processing replica %s from %s queue...", repl, bq.name)
		}
		// If the queue requires a replica to have the range leader lease in
		// order to be processed, check whether this replica has leader lease
		// and renew or acquire if necessary.
		if bq.impl.needsLeaderLease() {
			// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
			args := &proto.GetRequest{RequestHeader: proto.RequestHeader{Timestamp: now}}
			if err := repl.redirectOnOrAcquireLeaderLease(nil /* Trace */, args.Header().Timestamp); err != nil {
				if log.V(1) {
					log.Infof("this replica of %s could not acquire leader lease; skipping...", repl)
				}
				return
			}
		}
		if err := bq.impl.process(now, repl); err != nil {
			log.Errorf("failure processing replica %s from %s queue: %s", repl, bq.name, err)
		} else if log.V(2) {
			log.Infof("processed replica %s from %s queue in %s", repl, bq.name, time.Now().Sub(start))
		}
	}
}
예제 #4
0
// Clear clears the cache and resets the low water mark to the
// current time plus the maximum clock offset.
func (tc *TimestampCache) Clear(clock *hlc.Clock) {
	tc.rCache.Clear()
	tc.wCache.Clear()
	tc.lowWater = clock.Now()
	tc.lowWater.WallTime += clock.MaxOffset().Nanoseconds()
	tc.latest = tc.lowWater
}
예제 #5
0
파일: queue.go 프로젝트: Gardenya/cockroach
func (bq *baseQueue) processOne(clock *hlc.Clock, stopper *stop.Stopper) {
	stopper.RunTask(func() {
		start := time.Now()
		bq.Lock()
		rng := bq.pop()
		bq.Unlock()
		if rng != nil {
			now := clock.Now()
			if log.V(1) {
				log.Infof("processing range %s from %s queue...", rng, bq.name)
			}
			// If the queue requires the leader lease to process the
			// range, check whether this replica has leader lease and
			// renew or acquire if necessary.
			if bq.impl.needsLeaderLease() {
				// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
				args := &proto.GetRequest{RequestHeader: proto.RequestHeader{Timestamp: now}}
				if err := rng.redirectOnOrAcquireLeaderLease(nil /* Trace */, args.Header().Timestamp); err != nil {
					if log.V(1) {
						log.Infof("this replica of %s could not acquire leader lease; skipping...", rng)
					}
					return
				}
			}
			if err := bq.impl.process(now, rng); err != nil {
				log.Errorf("failure processing range %s from %s queue: %s", rng, bq.name, err)
			}
			if log.V(1) {
				log.Infof("processed range %s from %s queue in %s", rng, bq.name, time.Now().Sub(start))
			}
		}
	})
}
예제 #6
0
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *stop.Stopper,
	repl *Replica) bool {
	waitInterval := rs.paceInterval(start, timeutil.Now())
	rs.waitTimer.Reset(waitInterval)
	if log.V(6) {
		log.Infof("Wait time interval set to %s", waitInterval)
	}
	for {
		select {
		case <-rs.waitTimer.C:
			rs.waitTimer.Read = true
			if repl == nil {
				return false
			}

			return !stopper.RunTask(func() {
				// Try adding replica to all queues.
				for _, q := range rs.queues {
					q.MaybeAdd(repl, clock.Now())
				}
			})
		case repl := <-rs.removed:
			// Remove replica from all queues as applicable.
			for _, q := range rs.queues {
				q.MaybeRemove(repl)
			}
			if log.V(6) {
				log.Infof("removed replica %s", repl)
			}
		case <-stopper.ShouldStop():
			return true
		}
	}
}
예제 #7
0
// waitAndProcess waits for the pace interval and processes the range
// if rng is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a range from queues when it
// is signaled via the removed channel.
func (rs *rangeScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *util.Stopper,
	rng *Range) bool {
	waitInterval := rs.paceInterval(start, time.Now())
	nextTime := time.After(waitInterval)
	if log.V(6) {
		log.Infof("Wait time interval set to %s", waitInterval)
	}
	for {
		select {
		case <-nextTime:
			if rng == nil {
				return false
			}
			if !stopper.StartTask() {
				return true
			}
			// Try adding range to all queues.
			for _, q := range rs.queues {
				q.MaybeAdd(rng, clock.Now())
			}
			stopper.FinishTask()
			return false
		case rng := <-rs.removed:
			// Remove range from all queues as applicable.
			for _, q := range rs.queues {
				q.MaybeRemove(rng)
			}
			if log.V(6) {
				log.Infof("removed range %s", rng)
			}
		case <-stopper.ShouldStop():
			return true
		}
	}
}
예제 #8
0
// newTxn begins a transaction. For testing purposes, this comes with a ID
// pre-initialized, but with the Writing flag set to false.
func newTxn(clock *hlc.Clock, baseKey proto.Key) *proto.Transaction {
	f, l, fun := caller.Lookup(1)
	name := fmt.Sprintf("%s:%d %s", f, l, fun)
	txn := proto.NewTransaction("test", baseKey, 1, proto.SERIALIZABLE, clock.Now(), clock.MaxOffset().Nanoseconds())
	txn.Name = name
	return txn
}
예제 #9
0
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
	start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
	waitInterval := rs.paceInterval(start, timeutil.Now())
	rs.waitTimer.Reset(waitInterval)
	if log.V(6) {
		log.Infof(context.TODO(), "wait timer interval set to %s", waitInterval)
	}
	for {
		select {
		case <-rs.waitTimer.C:
			if log.V(6) {
				log.Infof(context.TODO(), "wait timer fired")
			}
			rs.waitTimer.Read = true
			if repl == nil {
				return false
			}

			return nil != stopper.RunTask(func() {
				// Try adding replica to all queues.
				for _, q := range rs.queues {
					q.MaybeAdd(repl, clock.Now())
				}
			})

		case repl := <-rs.removed:
			rs.removeReplica(repl)

		case <-stopper.ShouldStop():
			return true
		}
	}
}
예제 #10
0
// Clear clears the cache and resets the low water mark to the
// current time plus the maximum clock offset.
func (tc *TimestampCache) Clear(clock *hlc.Clock) {
	tc.rCache.Clear()
	tc.wCache.Clear()
	tc.lowWater = clock.Now()
	// TODO(tschottdorf): It's dangerous to inject timestamps (which will make
	// it into the HLC) like that.
	tc.lowWater.WallTime += clock.MaxOffset().Nanoseconds()
	tc.latest = tc.lowWater
}
예제 #11
0
// scanLoop loops endlessly, scanning through ranges available via
// the range iterator, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *rangeScanner) scanLoop(clock *hlc.Clock, stopper *util.Stopper) {
	start := time.Now()
	stats := &storeStats{}

	for {
		elapsed := time.Now().Sub(start)
		remainingNanos := rs.interval.Nanoseconds() - elapsed.Nanoseconds()
		if remainingNanos < 0 {
			remainingNanos = 0
		}
		nextIteration := time.Duration(remainingNanos)
		if count := rs.iter.EstimatedCount(); count > 0 {
			nextIteration = time.Duration(remainingNanos / int64(count))
		}
		log.V(6).Infof("next range scan iteration in %s", nextIteration)

		select {
		case <-time.After(nextIteration):
			rng := rs.iter.Next()
			if rng != nil {
				// Try adding range to all queues.
				for _, q := range rs.queues {
					q.MaybeAdd(rng, clock.Now())
				}
				stats.RangeCount++
				stats.MVCC.Accumulate(rng.stats.GetMVCC())
			} else {
				// Otherwise, we're done with the iteration. Reset iteration and start time.
				rs.iter.Reset()
				start = time.Now()
				// Increment iteration counter.
				atomic.AddInt64(&rs.count, 1)
				// Store the most recent scan results in the scanner's stats.
				atomic.StorePointer(&rs.stats, unsafe.Pointer(stats))
				stats = &storeStats{}
				log.V(6).Infof("reset range scan iteration")
			}

		case rng := <-rs.removed:
			// Remove range from all queues as applicable.
			for _, q := range rs.queues {
				q.MaybeRemove(rng)
			}
			log.V(6).Infof("removed range %s", rng)

		case <-stopper.ShouldStop():
			// Exit the loop.
			stopper.SetStopped()
			return
		}
	}
}
예제 #12
0
파일: queue.go 프로젝트: harryyeh/cockroach
func (bq *baseQueue) processOne(clock *hlc.Clock) {
	start := time.Now()
	bq.Lock()
	repl := bq.pop()
	bq.Unlock()

	if repl == nil {
		return
	}

	now := clock.Now()

	// Load the system config.
	cfg := bq.gossip.GetSystemConfig()
	if cfg == nil {
		log.Infof("no system config available. skipping...")
		return
	}

	desc := repl.Desc()
	if !bq.impl.acceptsUnsplitRanges() && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
		// Range needs to be split due to zone configs, but queue does
		// not accept unsplit ranges.
		if log.V(3) {
			log.Infof("range %s needs to be split; skipping processing", repl)
		}
		return
	}

	if log.V(3) {
		log.Infof("processing replica %s from %s queue...", repl, bq.name)
	}

	// If the queue requires a replica to have the range leader lease in
	// order to be processed, check whether this replica has leader lease
	// and renew or acquire if necessary.
	if bq.impl.needsLeaderLease() {
		// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
		args := &proto.GetRequest{RequestHeader: proto.RequestHeader{Timestamp: now}}
		if err := repl.redirectOnOrAcquireLeaderLease(nil /* Trace */, args.Header().Timestamp); err != nil {
			if log.V(3) {
				log.Infof("this replica of %s could not acquire leader lease; skipping...", repl)
			}
			return
		}
	}
	if err := bq.impl.process(now, repl, cfg); err != nil {
		log.Errorf("failure processing replica %s from %s queue: %s", repl, bq.name, err)
	} else if log.V(2) {
		log.Infof("processed replica %s from %s queue in %s", repl, bq.name, time.Now().Sub(start))
	}
}
예제 #13
0
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
	bq.processMu.Lock()
	defer bq.processMu.Unlock()

	// Load the system config.
	cfg, ok := bq.gossip.GetSystemConfig()
	if !ok {
		log.VEventf(1, bq.ctx, "no system config available. skipping")
		return nil
	}

	if bq.requiresSplit(cfg, repl) {
		// Range needs to be split due to zone configs, but queue does
		// not accept unsplit ranges.
		log.VEventf(3, bq.ctx, "%s: split needed; skipping", repl)
		return nil
	}

	sp := repl.store.Tracer().StartSpan(bq.name)
	ctx := opentracing.ContextWithSpan(context.Background(), sp)
	defer sp.Finish()
	log.Tracef(ctx, "processing replica %s", repl)

	// If the queue requires a replica to have the range lease in
	// order to be processed, check whether this replica has range lease
	// and renew or acquire if necessary.
	if bq.needsLease {
		// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
		if err := repl.redirectOnOrAcquireLease(ctx); err != nil {
			if _, harmless := err.GetDetail().(*roachpb.NotLeaseHolderError); harmless {
				log.VEventf(3, bq.ctx, "%s: not holding lease; skipping", repl)
				return nil
			}
			return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
		}
		log.Trace(ctx, "got range lease")
	}

	log.VEventf(3, bq.ctx, "%s: processing", repl)
	start := timeutil.Now()
	if err := bq.impl.process(ctx, clock.Now(), repl, cfg); err != nil {
		return err
	}
	log.VEventf(2, bq.ctx, "%s: done: %s", repl, timeutil.Since(start))
	log.Trace(ctx, "done")
	return nil
}
예제 #14
0
파일: queue.go 프로젝트: CubeLite/cockroach
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
	// Load the system config.
	cfg, ok := bq.gossip.GetSystemConfig()
	if !ok {
		bq.eventLog.VInfof(log.V(1), "no system config available. skipping")
		return nil
	}

	desc := repl.Desc()
	if !bq.acceptsUnsplitRanges && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
		// Range needs to be split due to zone configs, but queue does
		// not accept unsplit ranges.
		bq.eventLog.VInfof(log.V(3), "%s: split needed; skipping", repl)
		return nil
	}

	sp := repl.store.Tracer().StartSpan(bq.name)
	ctx := opentracing.ContextWithSpan(repl.context(context.Background()), sp)
	log.Trace(ctx, fmt.Sprintf("queue start for range %d", repl.RangeID))
	defer sp.Finish()

	// If the queue requires a replica to have the range leader lease in
	// order to be processed, check whether this replica has leader lease
	// and renew or acquire if necessary.
	if bq.needsLeaderLease {
		// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
		if err := repl.redirectOnOrAcquireLeaderLease(ctx); err != nil {
			if _, harmless := err.GetDetail().(*roachpb.NotLeaderError); harmless {
				bq.eventLog.VInfof(log.V(3), "%s: not holding lease; skipping", repl)
				return nil
			}
			return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
		}
		log.Trace(ctx, "got range lease")
	}

	bq.eventLog.VInfof(log.V(3), "%s: processing", repl)
	start := timeutil.Now()
	if err := bq.impl.process(ctx, clock.Now(), repl, cfg); err != nil {
		return err
	}
	bq.eventLog.VInfof(log.V(2), "%s: done: %s", repl, timeutil.Since(start))
	log.Trace(ctx, "done")
	return nil
}
예제 #15
0
// NewTransaction creates a new transaction. The transaction key is
// composed using the specified baseKey (for locality with data
// affected by the transaction) and a random UUID to guarantee
// uniqueness. The specified user-level priority is combined with
// a randomly chosen value to yield a final priority, used to settle
// write conflicts in a way that avoids starvation of long-running
// transactions (see Range.InternalPushTxn).
func NewTransaction(baseKey engine.Key, userPriority int32,
	isolation proto.IsolationType, clock *hlc.Clock) *proto.Transaction {
	// Compute priority by adjusting based on userPriority factor.
	if userPriority < 1 {
		userPriority = 1
	}
	priority := math.MaxInt32 - util.CachedRand.Int31n(math.MaxInt32/userPriority)
	// Compute timestamp and max timestamp.
	now := clock.Now()
	max := now
	max.WallTime += clock.MaxDrift().Nanoseconds()

	return &proto.Transaction{
		ID:           append(baseKey, []byte(uuid.New())...),
		Priority:     priority,
		Isolation:    isolation,
		Timestamp:    now,
		MaxTimestamp: max,
	}
}
예제 #16
0
// MakeRuntimeStatSampler constructs a new RuntimeStatSampler object.
func MakeRuntimeStatSampler(clock *hlc.Clock) RuntimeStatSampler {
	// Construct the build info metric. It is constant.
	// We first build set the labels on the metadata.
	info := build.GetInfo()
	timestamp, err := info.Timestamp()
	if err != nil {
		// We can't panic here, tests don't have a build timestamp.
		log.Warningf(context.TODO(), "Could not parse build timestamp: %v", err)
	}

	metaBuildTimestamp.AddLabel("tag", info.Tag)
	metaBuildTimestamp.AddLabel("go_version", info.GoVersion)

	buildTimestamp := metric.NewGauge(metaBuildTimestamp)
	buildTimestamp.Update(timestamp)

	return RuntimeStatSampler{
		clock:          clock,
		startTimeNanos: clock.PhysicalNow(),
		CgoCalls:       metric.NewGauge(metaCgoCalls),
		Goroutines:     metric.NewGauge(metaGoroutines),
		GoAllocBytes:   metric.NewGauge(metaGoAllocBytes),
		GoTotalBytes:   metric.NewGauge(metaGoTotalBytes),
		CgoAllocBytes:  metric.NewGauge(metaCgoAllocBytes),
		CgoTotalBytes:  metric.NewGauge(metaCgoTotalBytes),
		GcCount:        metric.NewGauge(metaGCCount),
		GcPauseNS:      metric.NewGauge(metaGCPauseNS),
		GcPausePercent: metric.NewGaugeFloat64(metaGCPausePercent),
		CPUUserNS:      metric.NewGauge(metaCPUUserNS),
		CPUUserPercent: metric.NewGaugeFloat64(metaCPUUserPercent),
		CPUSysNS:       metric.NewGauge(metaCPUSysNS),
		CPUSysPercent:  metric.NewGaugeFloat64(metaCPUSysPercent),
		Rss:            metric.NewGauge(metaRSS),
		Uptime:         metric.NewGauge(metaUptime),
		BuildTimestamp: buildTimestamp,
	}
}
예제 #17
0
// Clear clears the cache and resets the high water mark to the
// current time plus the maximum clock skew.
func (tc *TimestampCache) Clear(clock *hlc.Clock) {
	tc.cache.Clear()
	tc.highWater = clock.Now()
	tc.highWater.WallTime += clock.MaxDrift().Nanoseconds()
	tc.latest = tc.highWater
}
예제 #18
0
// LeaseExpiration returns an int64 to increment a manual clock with to
// make sure that all active range leases expire.
func (s *Store) LeaseExpiration(clock *hlc.Clock) int64 {
	// Due to lease extensions, the remaining interval can be longer than just
	// the sum of the offset (=length of stasis period) and the active
	// duration, but definitely not by 2x.
	return 2 * int64(s.ctx.rangeLeaseActiveDuration+clock.MaxOffset())
}
예제 #19
0
// newTxn begins a transaction.
func newTxn(clock *hlc.Clock, baseKey proto.Key) *proto.Transaction {
	return proto.NewTransaction("test", baseKey, 1, proto.SERIALIZABLE, clock.Now(), clock.MaxOffset().Nanoseconds())
}
예제 #20
0
// hasSomeLifeLeft returns true if the lease has at least a minimum of lifetime
// left until expiration, and thus can be used.
func (s *LeaseState) hasSomeLifeLeft(clock *hlc.Clock) bool {
	minDesiredExpiration := clock.Now().GoTime().Add(MinLeaseDuration)
	return s.expiration.After(minDesiredExpiration)
}