コード例 #1
0
func getGauge(t *testing.T, s *storage.Store, key string) int64 {
	gauge := s.Registry().GetGauge(key)
	if gauge == nil {
		t.Fatal(util.ErrorfSkipFrames(1, "store did not contain gauge %s", key))
	}
	return gauge.Value()
}
コード例 #2
0
func verifyRocksDBStats(t *testing.T, s *storage.Store) {
	if err := s.ComputeMetrics(0); err != nil {
		t.Fatal(err)
	}

	m := s.Metrics()
	testcases := []struct {
		gauge *metric.Gauge
		min   int64
	}{
		{m.RdbBlockCacheHits, 10},
		{m.RdbBlockCacheMisses, 0},
		{m.RdbBlockCacheUsage, 0},
		{m.RdbBlockCachePinnedUsage, 0},
		{m.RdbBloomFilterPrefixChecked, 20},
		{m.RdbBloomFilterPrefixUseful, 20},
		{m.RdbMemtableHits, 0},
		{m.RdbMemtableMisses, 0},
		{m.RdbMemtableTotalSize, 5000},
		{m.RdbFlushes, 1},
		{m.RdbCompactions, 0},
		{m.RdbTableReadersMemEstimate, 50},
	}
	for _, tc := range testcases {
		if a := tc.gauge.Value(); a < tc.min {
			t.Errorf("gauge %s = %d < min %d", tc.gauge.GetName(), a, tc.min)
		}
	}
}
コード例 #3
0
func getCounter(t *testing.T, s *storage.Store, key string) int64 {
	counter := s.Registry().GetCounter(key)
	if counter == nil {
		t.Fatal(util.ErrorfSkipFrames(1, "store did not contain counter %s", key))
	}
	return counter.Count()
}
コード例 #4
0
ファイル: local_kv.go プロジェクト: bigrats/cockroach
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup of header.Key in the ranges slice.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
	// If the replica isn't specified in the header, look it up.
	var err error
	var store *storage.Store
	// If we aren't given a Replica, then a little bending over
	// backwards here. We need to find the Store, but all we have is the
	// Key. So find its Range locally, and pull out its Replica which we
	// use to find the Store. This lets us use the same codepath below
	// (store.ExecuteCmd) for both locally and remotely originated
	// commands.
	header := args.Header()
	if header.Replica.NodeID == 0 {
		if repl := kv.lookupReplica(header.Key); repl != nil {
			header.Replica = *repl
		} else {
			err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
		}
	}
	if err == nil {
		store, err = kv.GetStore(&header.Replica)
	}
	reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
	if err != nil {
		reply.Header().SetGoError(err)
	} else {
		store.ExecuteCmd(method, args, reply)
	}
	reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
コード例 #5
0
ファイル: local_db.go プロジェクト: hahan/cockroach
// executeCmd runs Store.ExecuteCmd in a goroutine. A channel with
// element type equal to the reply type is created and returned
// immediately. The reply is sent to the channel once the cmd has been
// executed by the store. The store is looked up from the store map
// if specified by header.Replica; otherwise, the command is being
// executed locally, and the replica is determined via lookup of
// header.Key in the ranges slice.
func (db *LocalDB) executeCmd(method string, header *storage.RequestHeader, args, reply interface{}) interface{} {
	chanVal := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, reflect.TypeOf(reply)), 1)
	replyVal := reflect.ValueOf(reply)
	go func() {
		// If the replica isn't specified in the header, look it up.
		var err error

		var store *storage.Store
		// If we aren't given a Replica, then a little bending over backwards here. We need to find the Store, but all
		// we have is the Key. So find its Range locally, and pull out its Replica which we use to find the Store.
		// This lets us use the same codepath below (store.ExecuteCmd) for both locally and remotely originated
		// commands.
		if header.Replica.NodeID == 0 {
			if repl := db.lookupReplica(header.Key); repl != nil {
				header.Replica = *repl
			} else {
				err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
			}
		}
		if err == nil {
			store, err = db.GetStore(&header.Replica)
		}
		if err != nil {
			reflect.Indirect(replyVal).FieldByName("Error").Set(reflect.ValueOf(err))
		} else {
			store.ExecuteCmd(method, header, args, reply)
		}
		chanVal.Send(replyVal)
	}()
	return chanVal.Interface()
}
コード例 #6
0
ファイル: local_db.go プロジェクト: GavinHwa/cockroach
// executeCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup of header.Key in the ranges slice.
func (db *LocalDB) executeCmd(method string, args storage.Request, reply storage.Response) {
	// If the replica isn't specified in the header, look it up.
	var err error
	var store *storage.Store
	// If we aren't given a Replica, then a little bending over
	// backwards here. We need to find the Store, but all we have is the
	// Key. So find its Range locally, and pull out its Replica which we
	// use to find the Store.  This lets us use the same codepath below
	// (store.ExecuteCmd) for both locally and remotely originated
	// commands.
	header := args.Header()
	if header.Replica.NodeID == 0 {
		if repl := db.lookupReplica(header.Key); repl != nil {
			header.Replica = *repl
		} else {
			err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
		}
	}
	if err == nil {
		store, err = db.GetStore(&header.Replica)
	}
	if err != nil {
		reply.Header().Error = err
	} else {
		store.ExecuteCmd(method, args, reply)
	}
}
コード例 #7
0
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written or the given range has split.
func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) {
	src := rand.New(rand.NewSource(0))
	for {
		var ms engine.MVCCStats
		if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
			t.Fatal(err)
		}
		keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
		if keyBytes+valBytes >= bytes {
			return
		}
		key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
		key = keys.MakeNonColumnKey(key)
		val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
		pArgs := putArgs(key, val)
		_, err := client.SendWrappedWith(store, nil, roachpb.Header{
			RangeID: rangeID,
		}, &pArgs)
		// When the split occurs in the background, our writes may start failing.
		// We know we can stop writing when this happens.
		if _, ok := err.(*roachpb.RangeKeyMismatchError); ok {
			return
		} else if err != nil {
			t.Fatal(err)
		}
	}
}
コード例 #8
0
// executeCmd runs Store.ExecuteCmd in a goroutine. A channel with
// element type equal to the reply type is created and returned
// immediately. The reply is sent to the channel once the cmd has been
// executed by the store. The store is looked up from the store map
// if specified by header.Replica; otherwise, the command is being
// executed locally, and the replica is determined via lookup of
// header.Key in the ranges slice.
func (db *LocalDB) executeCmd(method string, header *storage.RequestHeader, args, reply interface{}) interface{} {
	chanVal := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, reflect.TypeOf(reply)), 1)
	replyVal := reflect.ValueOf(reply)
	go func() {
		// If the replica isn't specified in the header, look it up.
		var err error
		var store *storage.Store
		if header.Replica.NodeID == 0 {
			if repl := db.lookupReplica(header.Key); repl != nil {
				header.Replica = *repl
			} else {
				err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
			}
		}
		if err == nil {
			store, err = db.GetStore(&header.Replica)
		}
		if err != nil {
			reflect.Indirect(replyVal).FieldByName("Error").Set(reflect.ValueOf(err))
		} else {
			store.ExecuteCmd(method, header, args, reply)
		}
		chanVal.Send(replyVal)
	}()
	return chanVal.Interface()
}
コード例 #9
0
func verifyRocksDBStats(t *testing.T, s *storage.Store) {
	if err := s.ComputeMetrics(); err != nil {
		t.Fatal(err)
	}

	testcases := []struct {
		gaugeName string
		min       int64
	}{
		{"rocksdb.block.cache.hits", 10},
		{"rocksdb.block.cache.misses", 0},
		{"rocksdb.block.cache.usage", 0},
		{"rocksdb.block.cache.pinned-usage", 0},
		{"rocksdb.bloom.filter.prefix.checked", 20},
		{"rocksdb.bloom.filter.prefix.useful", 20},
		{"rocksdb.memtable.hits", 0},
		{"rocksdb.memtable.misses", 0},
		{"rocksdb.memtable.total-size", 5000},
		{"rocksdb.flushes", 1},
		{"rocksdb.compactions", 0},
		{"rocksdb.table-readers-mem-estimate", 50},
	}
	for _, tc := range testcases {
		if a := getGauge(t, s, tc.gaugeName); a < tc.min {
			t.Errorf("gauge %s = %d < min %d", tc.gaugeName, a, tc.min)
		}
	}
}
コード例 #10
0
ファイル: local_kv.go プロジェクト: bdotdub/cockroach
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each of the stores.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
	// If the replica isn't specified in the header, look it up.
	var err error
	var store *storage.Store
	// If we aren't given a Replica, then a little bending over
	// backwards here. We need to find the Store, but all we have is the
	// Key. So find its Range locally. This lets us use the same
	// codepath below (store.ExecuteCmd) for both locally and remotely
	// originated commands.
	header := args.Header()
	if header.Replica.StoreID == 0 {
		var repl *proto.Replica
		repl, err = kv.lookupReplica(header.Key, header.EndKey)
		if err == nil {
			header.Replica = *repl
		}
	}
	if err == nil {
		store, err = kv.GetStore(header.Replica.StoreID)
	}
	reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
	if err != nil {
		reply.Header().SetGoError(err)
	} else {
		store.ExecuteCmd(method, args, reply)
		if err := reply.Verify(args); err != nil {
			reply.Header().SetGoError(err)
		}
	}
	reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
コード例 #11
0
// compareStoreStatus ensures that the actual store status for the passed in
// store is updated correctly. It checks that the Desc.StoreID, Desc.Attrs,
// Desc.Node, Desc.Capacity.Capacity, NodeID, RangeCount, ReplicatedRangeCount
// are exactly correct and that the bytes and counts for Live, Key and Val are
// at least the expected value.
// The latest actual stats are returned.
func compareStoreStatus(t *testing.T, store *storage.Store, expectedStoreStatus *storage.StoreStatus, testNumber int) *storage.StoreStatus {
	storeStatusKey := keys.StoreStatusKey(int32(store.Ident.StoreID))
	gArgs, gReply := getArgs(storeStatusKey, 1, store.Ident.StoreID)
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: gArgs, Reply: gReply}); err != nil {
		t.Fatalf("%v: failure getting store status: %s", testNumber, err)
	}
	if gReply.Value == nil {
		t.Errorf("%v: could not find store status at: %s", testNumber, storeStatusKey)
	}
	storeStatus := &storage.StoreStatus{}
	if err := gogoproto.Unmarshal(gReply.Value.GetBytes(), storeStatus); err != nil {
		t.Fatalf("%v: could not unmarshal store status: %+v", testNumber, gReply)
	}

	// Values much match exactly.
	if expectedStoreStatus.Desc.StoreID != storeStatus.Desc.StoreID {
		t.Errorf("%v: actual Desc.StoreID does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if !reflect.DeepEqual(expectedStoreStatus.Desc.Attrs, storeStatus.Desc.Attrs) {
		t.Errorf("%v: actual Desc.Attrs does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if !reflect.DeepEqual(expectedStoreStatus.Desc.Node, storeStatus.Desc.Node) {
		t.Errorf("%v: actual Desc.Attrs does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Desc.Capacity.Capacity != expectedStoreStatus.Desc.Capacity.Capacity {
		t.Errorf("%v: actual Desc.Capacity.Capacity does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if expectedStoreStatus.NodeID != storeStatus.NodeID {
		t.Errorf("%v: actual node ID does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if expectedStoreStatus.RangeCount != storeStatus.RangeCount {
		t.Errorf("%v: actual RangeCount does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if expectedStoreStatus.ReplicatedRangeCount != storeStatus.ReplicatedRangeCount {
		t.Errorf("%v: actual ReplicatedRangeCount does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}

	// Values should be >= to expected values.
	if storeStatus.Stats.LiveBytes < expectedStoreStatus.Stats.LiveBytes {
		t.Errorf("%v: actual Live Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Stats.KeyBytes < expectedStoreStatus.Stats.KeyBytes {
		t.Errorf("%v: actual Key Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Stats.ValBytes < expectedStoreStatus.Stats.ValBytes {
		t.Errorf("%v: actual Val Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Stats.LiveCount < expectedStoreStatus.Stats.LiveCount {
		t.Errorf("%v: actual Live Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Stats.KeyCount < expectedStoreStatus.Stats.KeyCount {
		t.Errorf("%v: actual Key Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	if storeStatus.Stats.ValCount < expectedStoreStatus.Stats.ValCount {
		t.Errorf("%v: actual Val Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
	}
	return storeStatus
}
コード例 #12
0
ファイル: local_sender.go プロジェクト: harryyeh/cockroach
// SendBatch implements batch.Sender.
func (ls *LocalSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
	trace := tracer.FromCtx(ctx)
	var store *storage.Store
	var err error

	// If we aren't given a Replica, then a little bending over
	// backwards here. This case applies exclusively to unittests.
	if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
		var repl *proto.Replica
		var rangeID proto.RangeID
		rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
		if err == nil {
			ba.RangeID = rangeID
			ba.Replica = *repl
		}
	}

	ctx = log.Add(ctx,
		log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
		log.Key, ba.Key,
		log.RangeID, ba.RangeID)

	if err == nil {
		store, err = ls.GetStore(ba.Replica.StoreID)
	}

	var br *proto.BatchResponse
	if err == nil {
		// For calls that read data within a txn, we can avoid uncertainty
		// related retries in certain situations. If the node is in
		// "CertainNodes", we need not worry about uncertain reads any
		// more. Setting MaxTimestamp=Timestamp for the operation
		// accomplishes that. See proto.Transaction.CertainNodes for details.
		if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
			// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
			trace.Event("read has no clock uncertainty")
			ba.Txn.MaxTimestamp = ba.Txn.Timestamp
		}
		{
			var tmpR proto.Response
			// TODO(tschottdorf): &ba -> ba
			tmpR, err = store.ExecuteCmd(ctx, &ba)
			// TODO(tschottdorf): remove this dance once BatchResponse is returned.
			if tmpR != nil {
				br = tmpR.(*proto.BatchResponse)
				if br.Error != nil {
					panic(proto.ErrorUnexpectedlySet)
				}
			}
		}
	}
	// TODO(tschottdorf): Later error needs to be associated to an index
	// and ideally individual requests don't even have an error in their
	// header. See #1891.
	return br, err
}
コード例 #13
0
ファイル: local_db.go プロジェクト: GavinHwa/cockroach
// AddStore adds the specified store to the store map.
func (db *LocalDB) AddStore(s *storage.Store) {
	db.mu.Lock()
	defer db.mu.Unlock()
	if _, ok := db.storeMap[s.Ident.StoreID]; ok {
		panic(fmt.Sprintf("cannot add store twice to local db: %+v", s.Ident))
	}
	db.storeMap[s.Ident.StoreID] = s

	// Maintain a slice of ranges ordered by StartKey.
	db.ranges = append(db.ranges, s.GetRanges()...)
	sort.Sort(db.ranges)
}
コード例 #14
0
ファイル: local_sender.go プロジェクト: routhcr/cockroach
// Send implements the client.Sender interface. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each store's LookupRange method.
func (ls *LocalSender) Send(ctx context.Context, call proto.Call) {
	var err error
	var store *storage.Store

	trace := tracer.FromCtx(ctx)

	// If we aren't given a Replica, then a little bending over
	// backwards here. This case applies exclusively to unittests.
	header := call.Args.Header()
	if header.RaftID == 0 || header.Replica.StoreID == 0 {
		var repl *proto.Replica
		var raftID proto.RaftID
		raftID, repl, err = ls.lookupReplica(header.Key, header.EndKey)
		if err == nil {
			header.RaftID = raftID
			header.Replica = *repl
		}
	}
	ctx = log.Add(ctx,
		log.Method, call.Method(),
		log.Key, header.Key,
		log.RaftID, header.RaftID)

	if err == nil {
		store, err = ls.GetStore(header.Replica.StoreID)
	}
	var reply proto.Response
	if err == nil {
		// For calls that read data within a txn, we can avoid uncertainty
		// related retries in certain situations. If the node is in
		// "CertainNodes", we need not worry about uncertain reads any
		// more. Setting MaxTimestamp=Timestamp for the operation
		// accomplishes that. See proto.Transaction.CertainNodes for details.
		if header.Txn != nil && header.Txn.CertainNodes.Contains(header.Replica.NodeID) {
			// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
			trace.Event("read has no clock uncertainty")
			header.Txn.MaxTimestamp = header.Txn.Timestamp
		}
		reply, err = store.ExecuteCmd(ctx, call.Args)
	}
	if reply != nil {
		gogoproto.Merge(call.Reply, reply)
	}
	if call.Reply.Header().Error != nil {
		panic(proto.ErrorUnexpectedlySet)
	}
	if err != nil {
		call.Reply.Header().SetGoError(err)
	}
}
コード例 #15
0
func createSplitRanges(store *storage.Store) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
	args := adminSplitArgs(roachpb.KeyMin, []byte("b"))
	if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
		return nil, nil, err
	}

	rangeADesc := store.LookupReplica([]byte("a"), nil).Desc()
	rangeBDesc := store.LookupReplica([]byte("c"), nil).Desc()

	if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
		log.Errorf("split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
	}

	return rangeADesc, rangeBDesc, nil
}
コード例 #16
0
ファイル: local_sender.go プロジェクト: nporsche/cockroach
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *LocalSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
	trace := tracer.FromCtx(ctx)
	var store *storage.Store
	var err error

	// If we aren't given a Replica, then a little bending over
	// backwards here. This case applies exclusively to unittests.
	if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
		var repl *roachpb.ReplicaDescriptor
		var rangeID roachpb.RangeID
		key, endKey := keys.Range(ba)
		rangeID, repl, err = ls.lookupReplica(key, endKey)
		if err == nil {
			ba.RangeID = rangeID
			ba.Replica = *repl
		}
	}

	ctx = log.Add(ctx,
		log.RangeID, ba.RangeID)

	if err == nil {
		store, err = ls.GetStore(ba.Replica.StoreID)
	}

	var br *roachpb.BatchResponse
	if err != nil {
		return nil, roachpb.NewError(err)
	}
	// For calls that read data within a txn, we can avoid uncertainty
	// related retries in certain situations. If the node is in
	// "CertainNodes", we need not worry about uncertain reads any
	// more. Setting MaxTimestamp=Timestamp for the operation
	// accomplishes that. See roachpb.Transaction.CertainNodes for details.
	if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
		// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
		trace.Event("read has no clock uncertainty")
		ba.Txn.MaxTimestamp = ba.Txn.Timestamp
	}
	br, pErr := store.Send(ctx, ba)
	if br != nil && br.Error != nil {
		panic(roachpb.ErrorUnexpectedlySet(store, br))
	}
	return br, pErr
}
コード例 #17
0
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written.
func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) {
	src := rand.New(rand.NewSource(0))
	for {
		var ms engine.MVCCStats
		if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
			t.Fatal(err)
		}
		keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
		if keyBytes+valBytes >= bytes {
			return
		}
		key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
		val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
		pArgs := putArgs(key, val, rangeID, store.StoreID())
		if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
			t.Fatal(err)
		}
	}
}
コード例 #18
0
func verifyStats(t *testing.T, s *storage.Store) {
	// Compute real total MVCC statistics from store.
	realStats, err := s.ComputeMVCCStatsTest()
	if err != nil {
		t.Fatal(err)
	}

	// Sanity regression check for bug #4624: ensure intent count is zero.
	if a := realStats.IntentCount; a != 0 {
		t.Fatalf("Expected intent count to be zero, was %d", a)
	}

	// Sanity check: LiveBytes is not zero (ensures we don't have
	// zeroed out structures.)
	if liveBytes := getGauge(t, s, "livebytes"); liveBytes == 0 {
		t.Fatal("Expected livebytes to be non-nero, was zero")
	}

	// Ensure that real MVCC stats match computed stats.
	checkGauge(t, s, "livebytes", realStats.LiveBytes)
	checkGauge(t, s, "keybytes", realStats.KeyBytes)
	checkGauge(t, s, "valbytes", realStats.ValBytes)
	checkGauge(t, s, "intentbytes", realStats.IntentBytes)
	checkGauge(t, s, "livecount", realStats.LiveCount)
	checkGauge(t, s, "keycount", realStats.KeyCount)
	checkGauge(t, s, "valcount", realStats.ValCount)
	checkGauge(t, s, "intentcount", realStats.IntentCount)
	// "Ages" will be different depending on how much time has passed. Even with
	// a manual clock, this can be an issue in tests. Therefore, we do not
	// verify them in this test.

	if t.Failed() {
		t.Log(util.ErrorfSkipFrames(1, "verifyStats failed, aborting test."))
		t.FailNow()
	}
}
コード例 #19
0
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written.
func fillRange(store *storage.Store, rangeID proto.RangeID, prefix proto.Key, bytes int64, t *testing.T) {
	src := rand.New(rand.NewSource(0))
	for {
		var ms engine.MVCCStats
		if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
			t.Fatal(err)
		}
		keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
		if keyBytes+valBytes >= bytes {
			return
		}
		key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
		val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
		pArgs := putArgs(key, val, rangeID, store.StoreID())
		pArgs.Timestamp = store.Clock().Now()
		if _, err := store.ExecuteCmd(context.Background(), &pArgs); err != nil {
			t.Fatal(err)
		}
	}
}
コード例 #20
0
func createSplitRanges(store *storage.Store) (*proto.RangeDescriptor, *proto.RangeDescriptor, error) {
	args := adminSplitArgs(proto.KeyMin, []byte("b"), 1, store.StoreID())
	if _, err := store.ExecuteCmd(context.Background(), &args); err != nil {
		return nil, nil, err
	}

	rangeA := store.LookupReplica([]byte("a"), nil)
	rangeB := store.LookupReplica([]byte("c"), nil)

	if bytes.Equal(rangeA.Desc().StartKey, rangeB.Desc().StartKey) {
		log.Errorf("split ranges keys are equal %q!=%q", rangeA.Desc().StartKey, rangeB.Desc().StartKey)
	}

	return rangeA.Desc(), rangeB.Desc(), nil
}
コード例 #21
0
func createSplitRanges(store *storage.Store) (*proto.RangeDescriptor, *proto.RangeDescriptor, error) {
	args, reply := adminSplitArgs(engine.KeyMin, []byte("b"), 1, store.StoreID())
	if err := store.ExecuteCmd(proto.AdminSplit, args, reply); err != nil {
		return nil, nil, err
	}

	rangeA := store.LookupRange([]byte("a"), nil)
	rangeB := store.LookupRange([]byte("c"), nil)

	if bytes.Equal(rangeA.Desc().StartKey, rangeB.Desc().StartKey) {
		log.Errorf("split ranges keys are equal %q!=%q", rangeA.Desc().StartKey, rangeB.Desc().StartKey)
	}

	return rangeA.Desc(), rangeB.Desc(), nil
}
コード例 #22
0
func splitTestRange(store *storage.Store, key, splitKey proto.Key, t *testing.T) *storage.Range {
	rng := store.LookupRange(key, key)
	if rng == nil {
		t.Fatalf("couldn't lookup range for key %q", key)
	}
	desc, err := store.NewRangeDescriptor(splitKey, rng.Desc().EndKey, rng.Desc().Replicas)
	if err != nil {
		t.Fatal(err)
	}
	newRng, err := storage.NewRange(desc, store)
	if err != nil {
		t.Fatal(err)
	}
	if err := store.SplitRange(rng, newRng); err != nil {
		t.Fatal(err)
	}
	return newRng
}