Exemplo n.º 1
0
// String prints out the current status of the cluster.
func (c *Cluster) String() string {
	storesRangeCounts := make(map[proto.StoreID]int)
	for _, r := range c.ranges {
		for _, storeID := range r.getStoreIDs() {
			storesRangeCounts[storeID]++
		}
	}

	var nodeIDs []int
	for nodeID := range c.nodes {
		nodeIDs = append(nodeIDs, int(nodeID))
	}
	sort.Ints(nodeIDs)

	var buf bytes.Buffer
	buf.WriteString("Node Info:\n")
	for _, nodeID := range nodeIDs {
		n := c.nodes[proto.NodeID(nodeID)]
		buf.WriteString(n.String())
		buf.WriteString("\n")
	}

	var storeIDs []int
	for storeID := range c.stores {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Ints(storeIDs)

	buf.WriteString("Store Info:\n")
	for _, storeID := range storeIDs {
		s := c.stores[proto.StoreID(storeID)]
		buf.WriteString(s.String(storesRangeCounts[proto.StoreID(storeID)]))
		buf.WriteString("\n")
	}

	var rangeIDs []int
	for rangeID := range c.ranges {
		rangeIDs = append(rangeIDs, int(rangeID))
	}
	sort.Ints(rangeIDs)

	buf.WriteString("Range Info:\n")
	for _, rangeID := range rangeIDs {
		r := c.ranges[proto.RangeID(rangeID)]
		buf.WriteString(r.String())
		buf.WriteString("\n")
	}

	return buf.String()
}
Exemplo n.º 2
0
func TestStorePoolGetStoreDetails(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper, g, sp := createTestStorePool(TestTimeUntilStoreDeadOff)
	defer stopper.Stop()
	sg := gossiputil.NewStoreGossiper(g)
	sg.GossipStores(uniqueStore, t)

	if detail := sp.getStoreDetail(proto.StoreID(1)); detail.dead {
		t.Errorf("Present storeDetail came back as dead, expected it to be alive. %+v", detail)
	}

	if detail := sp.getStoreDetail(proto.StoreID(2)); detail.dead {
		t.Errorf("Absent storeDetail came back as dead, expected it to be alive. %+v", detail)
	}
}
Exemplo n.º 3
0
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(stopper *stop.Stopper, nodeCount int) *Cluster {
	rand, seed := randutil.NewPseudoRand()
	clock := hlc.NewClock(hlc.UnixNano)
	rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
	g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper)
	c := &Cluster{
		stopper:       stopper,
		clock:         clock,
		rpc:           rpcContext,
		gossip:        g,
		storePool:     storePool,
		allocator:     storage.MakeAllocator(storePool, storage.RebalancingOptions{}),
		storeGossiper: gossiputil.NewStoreGossiper(g),
		nodes:         make(map[proto.NodeID]*Node),
		stores:        make(map[proto.StoreID]*Store),
		ranges:        make(map[proto.RangeID]*Range),
		rand:          rand,
		seed:          seed,
	}

	// Add the nodes.
	for i := 0; i < nodeCount; i++ {
		c.addNewNodeWithStore()
	}

	// Add a single range and add to this first node's first store.
	firstRange := c.addRange()
	firstRange.attachRangeToStore(c.stores[proto.StoreID(0)])
	return c
}
Exemplo n.º 4
0
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = proto.RangeDescriptor{
		RaftID:   1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &proto.NodeDescriptor{
			NodeID: proto.NodeID(i),
			Address: proto.Addr{
				Network: addr.Network(),
				Address: addr.String(),
			},
		}
		if err := g.AddInfo(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
			NodeID:  proto.NodeID(i),
			StoreID: proto.StoreID(i),
		})
	}
	// Define our rpcSend stub which returns success on the second address.
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) {
		if method == "Node.Scan" {
			// reply from first address failed
			_ = getReply()
			// reply from second address succeed
			reply := getReply()
			reply.(*proto.ScanResponse).Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}})
			return []interface{}{reply}, nil
		}
		return nil, util.Errorf("Not expected method %v", method)
	}
	ctx := &DistSenderContext{
		rpcSend: testFn,
		rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 1)
	sr := call.Reply.(*proto.ScanResponse)
	ds.Send(context.Background(), call)
	if err := sr.GoError(); err != nil {
		t.Fatal(err)
	}
	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
Exemplo n.º 5
0
// updateCountString describes the update counts that were recorded by
// storeEventReader.  The formatting is appropriate to paste into this test if
// as a new expected value.
func (ser *storeEventReader) updateCountString() string {
	var buffer bytes.Buffer
	w := tabwriter.NewWriter(&buffer, 2, 1, 2, ' ', 0)

	var storeIDs sort.IntSlice
	for storeID := range ser.perStoreUpdateCount {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Sort(storeIDs)

	for _, storeID := range storeIDs {
		if countset, ok := ser.perStoreUpdateCount[proto.StoreID(storeID)]; ok {
			fmt.Fprintf(w, "proto.StoreID(%d): {\n", storeID)

			var methodIDs sort.IntSlice
			for methodID := range countset {
				methodIDs = append(methodIDs, int(methodID))
			}
			sort.Sort(methodIDs)

			for _, methodID := range methodIDs {
				method := proto.Method(methodID)
				if count, okCount := countset[method]; okCount {
					fmt.Fprintf(w, "\tproto.%s:\t%d,\n", method, count)
				} else {
					panic("unreachable!")
				}
			}
		} else {
			panic("unreachable!")
		}
		fmt.Fprintf(w, "},\n")
	}
	return buffer.String()
}
Exemplo n.º 6
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.DB) (proto.StoreID, error) {
	r, err := db.Inc(keys.StoreIDGenerator, inc)
	if err != nil {
		return 0, util.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
	}
	return proto.StoreID(r.ValueInt() - inc + 1), nil
}
Exemplo n.º 7
0
// BootstrapCluster bootstraps a multiple stores using the provided engines and
// cluster ID. The first bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close the returned
// client.
func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) {
	ctx := storage.StoreContext{}
	ctx.ScanInterval = 10 * time.Minute
	ctx.Clock = hlc.NewClock(hlc.UnixNano)
	// Create a KV DB with a local sender.
	lSender := kv.NewLocalSender()
	sender := kv.NewTxnCoordSender(lSender, ctx.Clock, false, nil, stopper)
	ctx.DB = client.NewDB(sender)
	ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
	for i, eng := range engines {
		sIdent := proto.StoreIdent{
			ClusterID: clusterID,
			NodeID:    1,
			StoreID:   proto.StoreID(i + 1),
		}

		// The bootstrapping store will not connect to other nodes so its
		// StoreConfig doesn't really matter.
		s := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})

		// Verify the store isn't already part of a cluster.
		if len(s.Ident.ClusterID) > 0 {
			return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
		}

		// Bootstrap store to persist the store ident.
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			return nil, err
		}
		// Create first range, writing directly to engine. Note this does
		// not create the range, just its data.  Only do this if this is the
		// first store.
		if i == 0 {
			// TODO(marc): this is better than having storage/ import sql, but still
			// not great. Find a better place to keep those.
			initialValues := sql.GetInitialSystemValues()
			if err := s.BootstrapRange(initialValues); err != nil {
				return nil, err
			}
		}
		if err := s.Start(stopper); err != nil {
			return nil, err
		}

		lSender.AddStore(s)

		// Initialize node and store ids.  Only initialize the node once.
		if i == 0 {
			if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
				return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
					sIdent.NodeID, nodeID, err)
			}
		}
		if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
			return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
				sIdent.StoreID, storeID, err)
		}
	}
	return ctx.DB, nil
}
Exemplo n.º 8
0
func TestLocalSenderVisitStores(t *testing.T) {
	defer leaktest.AfterTest(t)
	ls := NewLocalSender()
	numStores := 10
	for i := 0; i < numStores; i++ {
		ls.AddStore(&storage.Store{Ident: proto.StoreIdent{StoreID: proto.StoreID(i)}})
	}

	visit := make([]bool, numStores)
	err := ls.VisitStores(func(s *storage.Store) error { visit[s.Ident.StoreID] = true; return nil })
	if err != nil {
		t.Errorf("unexpected error on visit: %s", err.Error())
	}

	for i, visited := range visit {
		if !visited {
			t.Errorf("store %d was not visited", i)
		}
	}

	err = ls.VisitStores(func(s *storage.Store) error { return errors.New("") })
	if err == nil {
		t.Errorf("expected visit error")
	}
}
Exemplo n.º 9
0
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore() {
	idx := len(m.stores)
	var clock *hlc.Clock
	if len(m.clocks) > idx {
		clock = m.clocks[idx]
	} else {
		clock = m.clock
		m.clocks = append(m.clocks, clock)
	}
	var eng engine.Engine
	var needBootstrap bool
	if len(m.engines) > idx {
		eng = m.engines[idx]
	} else {
		eng = engine.NewInMem(proto.Attributes{}, 1<<20)
		m.engines = append(m.engines, eng)
		needBootstrap = true
		// Add an extra refcount to the engine so the underlying rocksdb instances
		// aren't closed when stopping and restarting the stores.
		// These refcounts are removed in Stop().
		if err := eng.Open(); err != nil {
			m.t.Fatal(err)
		}
	}

	stopper := stop.NewStopper()
	ctx := m.makeContext(idx)
	store := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: proto.NodeID(idx + 1)})
	if needBootstrap {
		err := store.Bootstrap(proto.StoreIdent{
			NodeID:  proto.NodeID(idx + 1),
			StoreID: proto.StoreID(idx + 1),
		}, stopper)
		if err != nil {
			m.t.Fatal(err)
		}

		// Bootstrap the initial range on the first store
		if idx == 0 {
			if err := store.BootstrapRange(nil); err != nil {
				m.t.Fatal(err)
			}
		}
	}
	if err := store.Start(stopper); err != nil {
		m.t.Fatal(err)
	}
	store.WaitForInit()
	m.stores = append(m.stores, store)
	if len(m.senders) == idx {
		m.senders = append(m.senders, kv.NewLocalSender())
	}
	m.senders[idx].AddStore(store)
	// Save the store identities for later so we can use them in
	// replication operations even while the store is stopped.
	m.idents = append(m.idents, store.Ident)
	m.stoppers = append(m.stoppers, stopper)
}
Exemplo n.º 10
0
// TestAllocatorRelaxConstraints verifies that attribute constraints
// will be relaxed in order to match nodes lacking required attributes,
// if necessary to find an allocation target.
func TestAllocatorRelaxConstraints(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, _, stopper := createTestStore(t)
	defer stopper.Stop()
	newStoreGossiper(s.Gossip()).gossipStores(multiDCStores, t)

	testCases := []struct {
		required         []string // attribute strings
		existing         []int    // existing store/node ID
		relaxConstraints bool     // allow constraints to be relaxed?
		expID            int      // expected store/node ID on allocate
		expErr           bool
	}{
		// The two stores in the system have attributes:
		//  storeID=1 {"a", "ssd"}
		//  storeID=2 {"b", "ssd"}
		{[]string{"a", "ssd"}, []int{}, true, 1, false},
		{[]string{"a", "ssd"}, []int{1}, true, 2, false},
		{[]string{"a", "ssd"}, []int{1}, false, 0, true},
		{[]string{"a", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd"}, []int{}, true, 2, false},
		{[]string{"b", "ssd"}, []int{1}, true, 2, false},
		{[]string{"b", "ssd"}, []int{2}, false, 0, true},
		{[]string{"b", "ssd"}, []int{2}, true, 1, false},
		{[]string{"b", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "hdd"}, []int{}, true, 2, false},
		{[]string{"b", "hdd"}, []int{2}, true, 1, false},
		{[]string{"b", "hdd"}, []int{2}, false, 0, true},
		{[]string{"b", "hdd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd", "gpu"}, []int{}, true, 2, false},
		{[]string{"b", "hdd", "gpu"}, []int{}, true, 2, false},
	}
	for i, test := range testCases {
		var existing []proto.Replica
		for _, id := range test.existing {
			existing = append(existing, proto.Replica{NodeID: proto.NodeID(id), StoreID: proto.StoreID(id)})
		}
		result, err := s.allocator().AllocateTarget(proto.Attributes{Attrs: test.required}, existing, test.relaxConstraints)
		if haveErr := (err != nil); haveErr != test.expErr {
			t.Errorf("%d: expected error %t; got %t: %s", i, test.expErr, haveErr, err)
		} else if err == nil && proto.StoreID(test.expID) != result.StoreID {
			t.Errorf("%d: expected result to have store %d; got %+v", i, test.expID, result)
		}
	}
}
Exemplo n.º 11
0
// OutputEpoch writes to the epochWRiter the current free capacity for all
// stores.
func (c *Cluster) OutputEpoch() {
	fmt.Fprintf(c.epochWriter, "%d:\t", c.epoch)

	for _, storeID := range c.storeIDs {
		store := c.stores[proto.StoreID(storeID)]
		capacity := store.getCapacity(len(c.rangeIDsByStore[storeID]))
		fmt.Fprintf(c.epochWriter, "%d/%.0f%%\t", len(c.rangeIDsByStore[storeID]), float64(capacity.Available)/float64(capacity.Capacity)*100)
	}
	fmt.Fprintf(c.epochWriter, "\n")
}
Exemplo n.º 12
0
// OutputEpoch writes to the epochWRiter the current free capacity for all
// stores.
func (c *Cluster) OutputEpoch() {
	fmt.Fprintf(c.epochWriter, "%d:\t", c.epoch)

	// TODO(bram): Consider saving this map in the cluster instead of
	// recalculating it each time.
	storesRangeCounts := make(map[proto.StoreID]int)
	for _, r := range c.ranges {
		for _, storeID := range r.getStoreIDs() {
			storesRangeCounts[storeID]++
		}
	}

	for _, storeID := range c.storeIDs {
		store := c.stores[proto.StoreID(storeID)]
		capacity := store.getCapacity(storesRangeCounts[proto.StoreID(storeID)])
		fmt.Fprintf(c.epochWriter, "%.0f%%\t", float64(capacity.Available)/float64(capacity.Capacity)*100)
	}
	fmt.Fprintf(c.epochWriter, "\n")
}
Exemplo n.º 13
0
// StringEpoch create a string with the current free capacity for all stores.
func (c *Cluster) StringEpoch() string {
	var buf bytes.Buffer
	fmt.Fprintf(&buf, "%d:\t", c.epoch)

	// TODO(bram): Consider saving this map in the cluster instead of
	// recalculating it each time.
	storesRangeCounts := make(map[proto.StoreID]int)
	for _, r := range c.ranges {
		for _, storeID := range r.getStoreIDs() {
			storesRangeCounts[storeID]++
		}
	}

	for _, storeID := range c.storeIDs {
		store := c.stores[proto.StoreID(storeID)]
		capacity := store.getCapacity(storesRangeCounts[proto.StoreID(storeID)])
		fmt.Fprintf(&buf, "%.0f%%\t", float64(capacity.Available)/float64(capacity.Capacity)*100)
	}
	return buf.String()
}
Exemplo n.º 14
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.KV) (proto.StoreID, error) {
	iReply := &proto.IncrementResponse{}
	if err := db.Call(proto.Increment, &proto.IncrementRequest{
		RequestHeader: proto.RequestHeader{
			Key:  engine.MakeKey(engine.KeyStoreIDGeneratorPrefix, []byte(strconv.Itoa(int(nodeID)))),
			User: storage.UserRoot,
		},
		Increment: inc,
	}, iReply); err != nil {
		return 0, util.Errorf("unable to allocate %d store IDs for node %d: %v", inc, nodeID, err)
	}
	return proto.StoreID(iReply.NewValue - inc + 1), nil
}
Exemplo n.º 15
0
func TestLocalSenderRemoveStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	ls := NewLocalSender()

	storeID := proto.StoreID(89)

	ls.AddStore(&storage.Store{Ident: proto.StoreIdent{StoreID: storeID}})

	ls.RemoveStore(&storage.Store{Ident: proto.StoreIdent{StoreID: storeID}})

	if ls.HasStore(storeID) {
		t.Errorf("expted local sender to remove storeID=%d", storeID)
	}
}
Exemplo n.º 16
0
func TesLocalSendertGetStoreCount(t *testing.T) {
	ls := NewLocalSender()
	if ls.GetStoreCount() != 0 {
		t.Errorf("expected 0 stores in new local sender")
	}

	expectedCount := 10
	for i := 0; i < expectedCount; i++ {
		ls.AddStore(&storage.Store{Ident: proto.StoreIdent{StoreID: proto.StoreID(i)}})
	}
	if count := ls.GetStoreCount(); count != expectedCount {
		t.Errorf("expected store count to be %d but was %d", expectedCount, count)
	}
}
Exemplo n.º 17
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success. The call
// will retry indefinitely on retryable errors.
func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.DB) (proto.StoreID, error) {
	var id proto.StoreID
	err := retry.WithBackoff(allocRetryOptions, func() (retry.Status, error) {
		r, err := db.Inc(keys.StoreIDGenerator, inc)
		if err != nil {
			status := retry.Break
			if _, ok := err.(util.Retryable); ok {
				status = retry.Continue
			}
			return status, util.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
		}
		id = proto.StoreID(r.ValueInt() - inc + 1)
		return retry.Break, nil
	})
	return id, err
}
Exemplo n.º 18
0
func TestReplicaSetMoveToFront(t *testing.T) {
	defer leaktest.AfterTest(t)
	rs := replicaSlice(nil)
	for i := 0; i < 5; i++ {
		rs = append(rs, replicaInfo{Replica: proto.Replica{StoreID: proto.StoreID(i + 1)}})
	}
	rs.MoveToFront(0)
	exp := []proto.StoreID{1, 2, 3, 4, 5}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
	rs.MoveToFront(2)
	exp = []proto.StoreID{3, 1, 2, 4, 5}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
	rs.MoveToFront(4)
	exp = []proto.StoreID{5, 3, 1, 2, 4}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
}
Exemplo n.º 19
0
// eventFeedString describes the event information that was recorded by
// storeEventReader. The formatting is appropriate to paste into this test if as
// a new expected value.
func (ser *storeEventReader) eventFeedString() string {
	var buffer bytes.Buffer
	w := tabwriter.NewWriter(&buffer, 2, 1, 2, ' ', 0)

	var storeIDs sort.IntSlice
	for storeID := range ser.perStoreFeeds {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Sort(storeIDs)

	for _, storeID := range storeIDs {
		if feed, ok := ser.perStoreFeeds[proto.StoreID(storeID)]; ok {
			fmt.Fprintf(w, "proto.StoreID(%d): {\n", storeID)
			for _, evt := range feed {
				fmt.Fprintf(w, "\t\"%s\",\n", evt)
			}
			fmt.Fprintf(w, "},\n")
		} else {
			panic("unreachable!")
		}
	}
	return buffer.String()
}
Exemplo n.º 20
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required proto.Attributes, deterministic bool) *StoreList {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	var storeIDs proto.StoreIDSlice
	for storeID := range sp.stores {
		storeIDs = append(storeIDs, storeID)
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Sort(storeIDs)
	}
	sl := new(StoreList)
	for _, storeID := range storeIDs {
		detail := sp.stores[proto.StoreID(storeID)]
		if !detail.dead && required.IsSubset(*detail.desc.CombinedAttrs()) {
			desc := detail.desc
			sl.add(&desc)
		}
	}
	return sl
}
Exemplo n.º 21
0
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore(t *testing.T) {
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := storage.NewStore(m.clock, eng, m.db, m.gossip, m.transport)
	err := store.Bootstrap(proto.StoreIdent{
		NodeID:  proto.NodeID(len(m.stores) + 1),
		StoreID: proto.StoreID(len(m.stores) + 1),
	})
	if err != nil {
		t.Fatal(err)
	}

	if len(m.stores) == 0 {
		// Bootstrap the initial range on the first store
		if err := store.BootstrapRange(); err != nil {
			t.Fatal(err)
		}
	}
	if err := store.Start(); err != nil {
		t.Fatal(err)
	}
	m.engines = append(m.engines, eng)
	m.stores = append(m.stores, store)
	m.sender.AddStore(store)
}
Exemplo n.º 22
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required proto.Attributes, deterministic bool) *StoreList {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	// TODO(bram): Consider adding the sort interface to proto.StoreID.
	var storeIDs []int
	for storeID := range sp.stores {
		storeIDs = append(storeIDs, int(storeID))
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Ints(storeIDs)
	}
	sl := new(StoreList)
	for _, storeID := range storeIDs {
		detail := sp.stores[proto.StoreID(storeID)]
		if !detail.dead && required.IsSubset(*detail.desc.CombinedAttrs()) {
			desc := detail.desc
			sl.add(&desc)
		}
	}
	return sl
}
Exemplo n.º 23
0
func (m *NodeStatus) Unmarshal(data []byte) error {
	l := len(data)
	iNdEx := 0
	for iNdEx < l {
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := data[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		switch fieldNum {
		case 1:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthStatus
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.Desc.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 2:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field StoreIDs", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.StoreID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.StoreID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.StoreIDs = append(m.StoreIDs, v)
		case 3:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeCount", wireType)
			}
			m.RangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.RangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 4:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
			}
			m.StartedAt = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.StartedAt |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 5:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
			}
			m.UpdatedAt = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.UpdatedAt |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 6:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthStatus
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.Stats.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 7:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field LeaderRangeCount", wireType)
			}
			m.LeaderRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.LeaderRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 8:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field ReplicatedRangeCount", wireType)
			}
			m.ReplicatedRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.ReplicatedRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 9:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field AvailableRangeCount", wireType)
			}
			m.AvailableRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.AvailableRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		default:
			var sizeOfWire int
			for {
				sizeOfWire++
				wire >>= 7
				if wire == 0 {
					break
				}
			}
			iNdEx -= sizeOfWire
			skippy, err := skipStatus(data[iNdEx:])
			if err != nil {
				return err
			}
			if skippy < 0 {
				return ErrInvalidLengthStatus
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			iNdEx += skippy
		}
	}

	return nil
}
Exemplo n.º 24
0
// TestStatusSummaries verifies that status summaries are written correctly for
// both the Node and stores within the node.
func TestStatusSummaries(t *testing.T) {
	defer leaktest.AfterTest(t)
	ts := &TestServer{}
	ts.Ctx = NewTestContext()
	ts.StoresPerNode = 3
	if err := ts.Start(); err != nil {
		t.Fatal(err)
	}
	defer ts.Stop()

	// Retrieve the first store from the Node.
	s, err := ts.node.lSender.GetStore(proto.StoreID(1))
	if err != nil {
		t.Fatal(err)
	}

	s.WaitForInit()

	content := "junk"
	leftKey := "a"

	// Write to the range to ensure that the raft machinery is running.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}

	storeDesc, err := s.Descriptor()
	if err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus := &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            0,
		UpdatedAt:            0,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}
	expectedStoreStatus := &storage.StoreStatus{
		Desc:                 *storeDesc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}

	// Function to force summaries to be written synchronously, including all
	// data currently in the event pipeline. Only one of the stores has
	// replicas, so there are no concerns related to quorum writes; if there
	// were multiple replicas, more care would need to be taken in the initial
	// syncFeed().
	forceWriteStatus := func() {
		if err := ts.node.publishStoreStatuses(); err != nil {
			t.Fatalf("error publishing store statuses: %s", err)
		}

		// Ensure that the event feed has been fully flushed.
		ts.EventFeed().Flush()

		if err := ts.writeSummaries(); err != nil {
			t.Fatalf("error writing summaries: %s", err)
		}
	}

	forceWriteStatus()
	oldNodeStats := compareNodeStatus(t, ts, expectedNodeStatus, 0)
	oldStoreStats := compareStoreStatus(t, ts, s, expectedStoreStatus, 0)

	splitKey := "b"
	rightKey := "c"

	// Write some values left and right of the proposed split key. No
	// particular reason.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(rightKey, content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount + 1,
			KeyCount:  oldNodeStats.Stats.KeyCount + 1,
			ValCount:  oldNodeStats.Stats.ValCount + 1,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount + 1,
			KeyCount:  oldStoreStats.Stats.KeyCount + 1,
			ValCount:  oldStoreStats.Stats.ValCount + 1,
		},
	}

	forceWriteStatus()
	oldNodeStats = compareNodeStatus(t, ts, expectedNodeStatus, 1)
	oldStoreStats = compareStoreStatus(t, ts, s, expectedStoreStatus, 1)

	// Split the range.
	if err := ts.db.AdminSplit(splitKey); err != nil {
		t.Fatal(err)
	}

	// Write on both sides of the split to ensure that the raft machinery
	// is running.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(rightKey, content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           2,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 2,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount,
			KeyCount:  oldNodeStats.Stats.KeyCount,
			ValCount:  oldNodeStats.Stats.ValCount,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           2,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 2,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount,
			KeyCount:  oldStoreStats.Stats.KeyCount,
			ValCount:  oldStoreStats.Stats.ValCount,
		},
	}
	forceWriteStatus()
	compareNodeStatus(t, ts, expectedNodeStatus, 3)
	compareStoreStatus(t, ts, s, expectedStoreStatus, 3)
}
Exemplo n.º 25
0
func TestNodeStatusMonitor(t *testing.T) {
	defer leaktest.AfterTest(t)
	desc1 := &proto.RangeDescriptor{
		RaftID:   1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("b"),
	}
	desc2 := &proto.RangeDescriptor{
		RaftID:   2,
		StartKey: proto.Key("b"),
		EndKey:   proto.Key("c"),
	}
	stats := engine.MVCCStats{
		LiveBytes:       1,
		KeyBytes:        2,
		ValBytes:        2,
		IntentBytes:     1,
		LiveCount:       1,
		KeyCount:        1,
		ValCount:        1,
		IntentCount:     1,
		IntentAge:       1,
		GCBytesAge:      1,
		LastUpdateNanos: 1 * 1E9,
	}

	monitorStopper := stop.NewStopper()
	storeStopper := stop.NewStopper()
	feed := &util.Feed{}
	monitor := NewNodeStatusMonitor()
	sub := feed.Subscribe()
	monitorStopper.RunWorker(func() {
		for event := range sub.Events() {
			storage.ProcessStoreEvent(monitor, event)
			ProcessNodeEvent(monitor, event)
		}
	})

	for i := 0; i < 3; i++ {
		id := proto.StoreID(i + 1)
		eventList := []interface{}{
			&storage.StartStoreEvent{
				StoreID: id,
			},
			&storage.BeginScanRangesEvent{ // Begin scan phase.
				StoreID: id,
			},
			&storage.UpdateRangeEvent{ // Update during scan, expect it to be ignored.
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Scan:    false, // should lead to this being ignored
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Scan:    true, // not ignored
			},
			&storage.UpdateRangeEvent{ // Update during scan after register, should be picked up
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.EndScanRangesEvent{ // End Scan.
				StoreID: id,
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Scan:    true, // ignored, not in ScanRanges mode
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.SplitRangeEvent{
				StoreID: id,
				Original: storage.UpdateRangeEvent{
					StoreID: id,
					Desc:    desc1,
					Stats:   stats,
					Delta:   stats,
				},
				New: storage.RegisterRangeEvent{
					StoreID: id,
					Desc:    desc2,
					Stats:   stats,
					Scan:    false,
				},
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Delta:   stats,
			},
			&CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
			&CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
			&CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Scan,
			},
		}
		storeStopper.RunWorker(func() {
			for _, event := range eventList {
				feed.Publish(event)
			}
		})
	}

	storeStopper.Stop()
	feed.Close()
	monitorStopper.Stop()

	expectedStats := engine.MVCCStats{
		LiveBytes:       6,
		KeyBytes:        12,
		ValBytes:        12,
		IntentBytes:     6,
		LiveCount:       6,
		KeyCount:        6,
		ValCount:        6,
		IntentCount:     6,
		IntentAge:       6,
		GCBytesAge:      6,
		LastUpdateNanos: 1 * 1E9,
	}

	if a, e := len(monitor.stores), 3; a != e {
		t.Fatalf("unexpected number of stores recorded by monitor; expected %d, got %d", e, a)
	}
	for id, store := range monitor.stores {
		if a, e := store.stats, expectedStats; !reflect.DeepEqual(a, e) {
			t.Errorf("monitored stats for store %d did not match expectation: %v != %v", id, a, e)
		}
		if a, e := store.rangeCount, int64(2); a != e {
			t.Errorf("monitored range count for store %d did not match expectation: %d != %d", id, a, e)
		}
	}

	if a, e := monitor.callCount, int64(6); a != e {
		t.Errorf("monitored stats for node recorded wrong number of ops %d, expected %d", a, e)
	}
	if a, e := monitor.callErrors, int64(3); a != e {
		t.Errorf("monitored stats for node recorded wrong number of errors %d, expected %d", a, e)
	}
}
Exemplo n.º 26
0
// ExampleAllocatorRebalancing models a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
func ExampleAllocatorRebalancing() {
	g := gossip.New(nil, 0, nil)
	alloc := newAllocator(g)
	alloc.randGen = rand.New(rand.NewSource(0))
	alloc.deterministic = true

	var wg sync.WaitGroup
	g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })

	const generations = 100
	const nodes = 20

	// Initialize testStores.
	var testStores [nodes]testStore
	for i := 0; i < len(testStores); i++ {
		testStores[i].StoreID = proto.StoreID(i)
		testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
		testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
	}
	// Initialize the cluster with a single range.
	testStores[0].Add(alloc.randGen.Int63n(1 << 20))

	for i := 0; i < generations; i++ {
		// First loop through test stores and add data.
		wg.Add(len(testStores))
		for j := 0; j < len(testStores); j++ {
			// Add a pretend range to the testStore if there's already one.
			if testStores[j].Capacity.RangeCount > 0 {
				testStores[j].Add(alloc.randGen.Int63n(1 << 20))
			}
			key := gossip.MakeCapacityKey(proto.NodeID(j), proto.StoreID(j))
			if err := g.AddInfo(key, testStores[j].StoreDescriptor, 0); err != nil {
				panic(err)
			}
		}
		wg.Wait()

		// Next loop through test stores and maybe rebalance.
		for j := 0; j < len(testStores); j++ {
			ts := &testStores[j]
			if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
				target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
				if target != nil {
					testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
				}
			}
		}

		// Output store capacities as hexidecimal 2-character values.
		if i%(generations/50) == 0 {
			var maxBytes int64
			for j := 0; j < len(testStores); j++ {
				bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
				if bytes > maxBytes {
					maxBytes = bytes
				}
			}
			if maxBytes > 0 {
				for j := 0; j < len(testStores); j++ {
					endStr := " "
					if j == len(testStores)-1 {
						endStr = ""
					}
					bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
					fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
				}
				fmt.Printf("\n")
			}
		}
	}

	var totBytes int64
	var totRanges int32
	for i := 0; i < len(testStores); i++ {
		totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
		totRanges += testStores[i].Capacity.RangeCount
	}
	fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)

	// Output:
	// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 204 000 000 375 000 000 107 000 000 000 000 000 000 000 000 536
	// 942 000 000 463 140 000 000 646 000 288 288 000 442 000 058 647 000 000 316 999
	// 880 000 412 630 365 745 445 565 122 407 380 570 276 000 271 709 000 718 299 999
	// 925 000 667 600 555 975 704 552 272 491 773 890 584 000 407 974 000 930 476 999
	// 990 967 793 579 493 999 698 453 616 608 777 755 709 425 455 984 483 698 267 931
	// 965 999 869 606 635 908 630 585 567 577 818 870 740 621 550 868 805 790 411 913
	// 953 995 990 624 617 947 562 609 670 658 909 952 835 851 641 958 924 999 526 987
	// 999 923 901 571 687 915 636 636 674 685 831 881 847 820 702 905 897 983 509 981
	// 999 884 809 585 691 826 640 572 748 641 754 887 758 848 643 927 865 897 541 956
	// 999 856 891 594 691 745 602 615 766 663 814 834 719 886 733 925 882 911 593 926
	// 999 890 900 653 707 759 642 697 771 732 851 858 748 869 842 953 903 928 655 923
	// 999 924 909 696 748 797 693 689 806 766 841 902 705 897 874 914 913 916 730 892
	// 999 948 892 704 740 821 685 656 859 772 893 911 690 878 824 935 928 941 741 860
	// 999 948 931 697 770 782 697 666 893 761 944 869 658 902 816 925 923 983 742 831
	// 999 878 901 736 750 737 677 647 869 731 930 825 631 880 775 947 949 930 687 810
	// 999 890 910 764 778 757 709 663 849 777 964 837 672 891 814 978 944 946 721 868
	// 985 895 968 806 791 791 720 694 883 819 999 847 652 888 790 995 950 947 692 843
	// 960 903 956 794 815 779 746 706 891 824 958 830 665 886 757 999 931 969 701 861
	// 999 928 954 805 807 822 764 734 910 829 952 827 678 927 785 980 936 962 677 836
	// 999 903 924 800 769 822 776 730 886 815 935 781 668 890 805 948 929 965 676 837
	// 999 926 935 836 782 836 809 756 897 835 937 781 690 894 804 979 951 978 667 832
	// 999 937 936 875 843 872 854 793 908 873 950 808 714 901 860 981 975 962 693 866
	// 988 957 938 898 922 912 916 886 905 912 964 867 764 915 911 992 999 985 776 896
	// 945 959 922 910 937 913 938 944 957 921 993 916 898 957 928 999 976 997 855 957
	// 980 986 944 956 963 920 966 967 999 966 991 956 981 973 955 998 990 954 994 981
	// 956 985 942 945 950 900 933 949 981 969 946 935 963 951 931 999 936 941 972 963
	// 940 999 964 949 941 974 967 937 970 975 965 951 976 968 949 993 944 949 977 964
	// 926 999 973 932 944 952 933 944 963 965 927 940 964 960 938 995 932 935 968 951
	// 907 999 919 957 941 958 934 935 930 941 940 926 966 933 920 973 937 923 938 946
	// 924 999 914 963 976 945 911 936 929 951 930 930 972 935 941 977 932 960 939 958
	// 942 999 950 961 987 942 928 945 938 941 939 936 985 937 969 985 952 958 957 948
	// 956 999 950 947 943 939 949 934 929 935 940 942 943 957 988 974 933 936 938 951
	// 967 990 950 949 964 952 951 922 943 940 954 956 962 946 982 999 945 949 940 954
	// 970 999 952 959 970 955 957 974 937 965 968 947 950 958 947 993 953 938 958 950
	// 945 964 954 963 965 959 967 961 925 978 954 944 968 937 960 999 947 947 961 960
	// 930 957 938 974 956 944 968 930 944 972 930 946 958 974 940 999 961 945 953 947
	// 966 980 954 989 979 960 969 995 961 986 954 980 980 971 968 999 968 977 979 972
	// 963 953 958 986 990 947 973 955 955 983 974 981 961 964 977 999 984 982 966 964
	// 964 968 975 993 999 955 965 958 972 995 978 981 956 966 981 987 978 976 985 966
	// 967 957 954 999 963 940 968 966 941 966 971 969 957 961 949 940 968 963 988 947
	// 951 939 952 980 937 948 964 970 941 965 979 966 941 940 952 938 973 955 999 934
	// 939 958 941 998 942 951 962 942 962 951 972 978 946 935 958 935 950 947 999 953
	// 959 952 938 999 936 957 961 950 937 954 975 971 958 930 938 930 944 939 978 950
	// 957 943 963 999 947 965 953 937 966 953 978 972 963 937 933 945 944 937 979 952
	// 945 951 956 999 926 948 958 923 947 934 951 961 955 941 949 936 945 929 960 947
	// 956 960 975 999 945 977 956 934 954 943 961 956 956 954 960 954 958 929 969 938
	// 947 966 993 999 944 963 942 939 963 935 952 957 968 947 962 946 962 947 959 942
	// 940 961 999 992 935 946 938 932 968 939 957 938 970 949 964 934 948 957 952 939
	// 944 955 999 978 940 932 937 944 957 936 957 945 958 955 947 933 956 948 947 942
	// Total bytes=1003302292, ranges=1899
}
Exemplo n.º 27
0
func (m *LogEntry) Unmarshal(data []byte) error {
	l := len(data)
	iNdEx := 0
	for iNdEx < l {
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := data[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		switch fieldNum {
		case 1:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Severity", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Severity |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 2:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Time |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 3:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field ThreadID", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.ThreadID |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 4:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field File", wireType)
			}
			var stringLen uint64
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				stringLen |= (uint64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + int(stringLen)
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.File = string(data[iNdEx:postIndex])
			iNdEx = postIndex
		case 5:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Line |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 6:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
			}
			var stringLen uint64
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				stringLen |= (uint64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + int(stringLen)
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Format = string(data[iNdEx:postIndex])
			iNdEx = postIndex
		case 7:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Args = append(m.Args, LogEntry_Arg{})
			if err := m.Args[len(m.Args)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 8:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.NodeID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.NodeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.NodeID = &v
		case 9:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field StoreID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.StoreID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.StoreID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.StoreID = &v
		case 10:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.RangeID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.RangeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.RangeID = &v
		case 11:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.Method
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.Method(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.Method = &v
		case 12:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Key = append([]byte{}, data[iNdEx:postIndex]...)
			iNdEx = postIndex
		case 13:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Stacks", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Stacks = append([]byte{}, data[iNdEx:postIndex]...)
			iNdEx = postIndex
		default:
			var sizeOfWire int
			for {
				sizeOfWire++
				wire >>= 7
				if wire == 0 {
					break
				}
			}
			iNdEx -= sizeOfWire
			skippy, err := skipLog(data[iNdEx:])
			if err != nil {
				return err
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
			iNdEx += skippy
		}
	}

	return nil
}
Exemplo n.º 28
0
func Example_rebalancing() {
	// Model a set of stores in a cluster,
	// randomly adding / removing stores and adding bytes.
	g := gossip.New(nil, 0, nil)
	alloc := newAllocator(g)
	alloc.randGen = rand.New(rand.NewSource(0))
	alloc.deterministic = true

	var wg sync.WaitGroup
	g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })

	const generations = 100
	const nodes = 20

	// Initialize testStores.
	var testStores [nodes]testStore
	for i := 0; i < len(testStores); i++ {
		testStores[i].StoreID = proto.StoreID(i)
		testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
		testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
	}
	// Initialize the cluster with a single range.
	testStores[0].Add(alloc.randGen.Int63n(1 << 20))

	for i := 0; i < generations; i++ {
		// First loop through test stores and add data.
		wg.Add(len(testStores))
		for j := 0; j < len(testStores); j++ {
			// Add a pretend range to the testStore if there's already one.
			if testStores[j].Capacity.RangeCount > 0 {
				testStores[j].Add(alloc.randGen.Int63n(1 << 20))
			}
			key := gossip.MakeStoreKey(proto.StoreID(j))
			if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
				panic(err)
			}
		}
		wg.Wait()

		// Next loop through test stores and maybe rebalance.
		for j := 0; j < len(testStores); j++ {
			ts := &testStores[j]
			if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
				target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
				if target != nil {
					testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
				}
			}
		}

		// Output store capacities as hexidecimal 2-character values.
		if i%(generations/50) == 0 {
			var maxBytes int64
			for j := 0; j < len(testStores); j++ {
				bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
				if bytes > maxBytes {
					maxBytes = bytes
				}
			}
			if maxBytes > 0 {
				for j := 0; j < len(testStores); j++ {
					endStr := " "
					if j == len(testStores)-1 {
						endStr = ""
					}
					bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
					fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
				}
				fmt.Printf("\n")
			}
		}
	}

	var totBytes int64
	var totRanges int32
	for i := 0; i < len(testStores); i++ {
		totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
		totRanges += testStores[i].Capacity.RangeCount
	}
	fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)

	// Output:
	// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 107 000 000 204 000 000 375 000 000 000 000 000 000 000 000 000 000 536 000
	// 999 310 000 262 872 000 000 208 000 705 000 526 000 000 439 000 000 607 933 000
	// 812 258 000 220 999 673 402 480 000 430 516 374 000 431 318 000 551 714 917 000
	// 582 625 185 334 720 589 647 619 000 300 483 352 279 502 208 665 816 684 999 374
	// 751 617 771 542 738 676 665 525 309 435 612 449 457 616 306 837 993 754 999 445
	// 759 659 828 478 693 622 594 591 349 458 630 538 526 613 462 827 879 787 999 550
	// 861 658 828 559 801 660 681 560 487 529 652 686 642 716 575 999 989 875 989 581
	// 775 647 724 557 779 662 670 494 535 502 681 676 624 695 561 961 999 772 888 592
	// 856 712 753 661 767 658 717 606 529 615 755 699 672 700 576 955 999 755 861 671
	// 882 735 776 685 844 643 740 578 610 688 787 741 661 767 587 999 955 809 803 731
	// 958 716 789 719 861 689 821 608 634 724 800 782 694 799 619 994 999 851 812 818
	// 949 726 788 664 873 633 749 599 680 714 790 728 663 842 628 999 978 816 823 791
	// 923 698 792 712 816 605 774 651 661 728 802 718 670 819 714 999 966 801 829 791
	// 962 779 847 737 900 675 811 691 745 778 835 812 680 894 790 999 989 872 923 799
	// 967 812 826 772 891 685 828 683 761 808 864 820 643 873 783 969 999 873 910 781
	// 923 813 837 739 867 672 792 664 773 772 879 803 610 845 740 957 999 867 912 732
	// 952 803 866 759 881 655 765 668 803 772 929 762 601 844 751 973 999 892 864 731
	// 970 777 867 800 859 639 774 662 787 760 906 751 595 854 732 989 999 853 859 762
	// 943 776 872 787 861 686 780 663 789 793 926 784 612 832 733 999 968 868 827 767
	// 914 801 912 802 878 704 800 685 818 808 939 759 627 844 717 999 976 872 828 757
	// 935 806 911 797 887 710 798 711 826 824 938 775 614 870 716 999 986 886 803 767
	// 991 851 898 856 872 795 828 782 826 852 963 797 710 868 775 994 999 923 896 794
	// 999 924 866 877 884 883 886 836 846 869 953 851 762 887 858 985 949 900 917 836
	// 999 910 887 878 897 890 906 868 906 903 983 947 801 895 913 976 924 890 904 898
	// 955 884 888 916 886 879 901 872 898 883 999 874 829 888 892 937 918 889 891 862
	// 974 952 957 990 950 976 945 946 980 961 999 975 942 926 957 994 965 946 960 960
	// 949 929 952 999 929 961 943 946 993 918 984 961 952 919 953 950 952 941 949 934
	// 907 999 916 935 903 903 909 907 960 939 973 912 901 885 916 910 941 911 906 913
	// 939 999 948 948 945 962 951 954 952 964 996 942 975 962 962 956 971 969 975 969
	// 940 974 964 947 971 975 949 954 953 970 992 971 981 973 948 962 999 969 978 975
	// 950 971 953 938 962 967 930 964 953 978 999 945 974 972 951 950 998 951 949 962
	// 934 946 943 936 942 949 929 956 928 970 989 944 945 923 987 927 999 942 931 944
	// 939 957 942 958 951 970 937 946 930 950 940 959 963 937 973 943 999 931 949 940
	// 933 935 945 929 933 960 937 935 919 918 930 931 950 924 969 935 999 943 949 926
	// 959 941 948 952 948 957 936 937 943 930 955 962 953 949 980 948 999 934 980 942
	// 950 973 954 962 949 964 935 949 925 936 951 962 979 962 999 942 990 948 969 959
	// 937 993 958 949 960 960 942 954 969 950 951 952 974 970 999 927 979 964 975 944
	// 981 986 971 968 964 984 954 959 985 979 966 963 994 963 999 970 991 971 988 965
	// 967 997 961 957 959 985 956 940 955 955 957 955 970 952 979 964 999 951 960 968
	// 937 969 931 950 945 954 932 925 954 946 944 926 955 938 957 949 999 934 947 938
	// 958 967 954 955 971 973 946 934 979 947 944 958 954 954 960 948 999 936 960 951
	// 950 948 940 958 937 955 928 927 953 923 935 939 934 921 934 934 999 922 940 938
	// 960 960 929 962 955 955 926 935 957 928 939 941 938 926 941 924 999 923 957 942
	// 979 958 947 987 980 972 945 943 984 939 951 943 944 946 942 942 999 928 970 943
	// 981 941 931 961 969 962 927 935 985 925 964 945 946 939 946 938 999 933 964 928
	// 980 944 929 970 973 955 942 937 977 920 955 929 937 946 935 933 999 947 956 926
	// 980 948 926 981 938 939 936 936 963 949 965 935 943 946 933 933 999 947 955 943
	// 968 959 945 941 929 926 924 941 970 951 959 941 924 952 931 943 999 941 951 950
	// 961 946 930 923 933 932 953 937 954 940 964 944 931 952 939 935 999 936 945 948
	// Total bytes=996294324, ranges=1897
}
Exemplo n.º 29
0
// TestMultiStoreEventFeed verifies that events on multiple stores are properly
// recieved by a single event reader.
func TestMultiStoreEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)
	t.Skip("disabled until #1531 is fixed")

	// Create a multiTestContext which publishes all store events to the given
	// feed.
	feed := &util.Feed{}
	mtc := &multiTestContext{
		feed: feed,
	}

	// Start reading events from the feed before starting the stores.
	ser := &storeEventReader{
		recordUpdateDetail: false,
	}
	readStopper := stop.NewStopper()
	sub := feed.Subscribe()
	readStopper.RunWorker(func() {
		ser.readEvents(sub)
	})

	mtc.Start(t, 3)
	defer mtc.Stop()

	// Replicate the default range.
	raftID := proto.RaftID(1)
	mtc.replicateRange(raftID, 0, 1, 2)

	// Add some data in a transaction
	err := mtc.db.Txn(func(txn *client.Txn) error {
		b := &client.Batch{}
		b.Put("a", "asdf")
		b.Put("c", "jkl;")
		return txn.Commit(b)
	})
	if err != nil {
		t.Fatalf("error putting data to db: %s", err)
	}

	// AdminSplit in between the two ranges.
	if err := mtc.db.AdminSplit("b"); err != nil {
		t.Fatalf("error splitting initial: %s", err)
	}

	// AdminSplit an empty range at the end of the second range.
	if err := mtc.db.AdminSplit("z"); err != nil {
		t.Fatalf("error splitting second range: %s", err)
	}

	// AdminMerge the empty range back into the second range.
	if err := mtc.db.AdminMerge("c"); err != nil {
		t.Fatalf("error merging final range: %s", err)
	}

	// Add an additional put through the system and wait for all
	// replicas to receive it.
	if _, err := mtc.db.Inc("aa", 5); err != nil {
		t.Fatalf("error putting data to db: %s", err)
	}
	util.SucceedsWithin(t, time.Second, func() error {
		for _, eng := range mtc.engines {
			val, _, err := engine.MVCCGet(eng, proto.Key("aa"), mtc.clock.Now(), true, nil)
			if err != nil {
				return err
			}
			if a, e := mustGetInteger(val), int64(5); a != e {
				return util.Errorf("expected aa = %d, got %d", e, a)
			}
		}
		return nil
	})

	// Close feed and wait for reader to receive all events.
	feed.Close()
	readStopper.Stop()

	// Compare events to expected values.
	expected := map[proto.StoreID][]string{
		proto.StoreID(1): {
			"StartStore",
			"BeginScanRanges",
			"RegisterRange scan=true, rid=1, live=.*",
			"EndScanRanges",
			"SplitRange origId=1, newId=2, origKey=336, newKey=15",
			"SplitRange origId=2, newId=3, origKey=15, newKey=0",
			"MergeRange rid=2, subId=3, key=15, subKey=0",
		},
		proto.StoreID(2): {
			"StartStore",
			"BeginScanRanges",
			"EndScanRanges",
			"RegisterRange scan=false, rid=1, live=.*",
			"SplitRange origId=1, newId=2, origKey=336, newKey=15",
			"SplitRange origId=2, newId=3, origKey=15, newKey=0",
			"MergeRange rid=2, subId=3, key=15, subKey=0",
		},
		proto.StoreID(3): {
			"StartStore",
			"BeginScanRanges",
			"EndScanRanges",
			"RegisterRange scan=false, rid=1, live=.*",
			"SplitRange origId=1, newId=2, origKey=336, newKey=15",
			"SplitRange origId=2, newId=3, origKey=15, newKey=0",
			"MergeRange rid=2, subId=3, key=15, subKey=0",
		},
	}
	if a, e := ser.perStoreFeeds, expected; !checkMatch(e, a) {
		t.Errorf("event feed did not match expected value. Actual values have been printed to compare with above expectation.\n")
		t.Logf("Event feed information:\n%s", ser.eventFeedString())
	}

	// Expected count of update events on a per-method basis.
	expectedUpdateCount := map[proto.StoreID]map[proto.Method]int{
		proto.StoreID(1): {
			proto.Put:                 18,
			proto.ConditionalPut:      7,
			proto.Increment:           2,
			proto.Delete:              2,
			proto.EndTransaction:      6,
			proto.InternalLeaderLease: 3,
		},
		proto.StoreID(2): {
			proto.Put:                 16,
			proto.ConditionalPut:      6,
			proto.Increment:           2,
			proto.Delete:              2,
			proto.EndTransaction:      5,
			proto.InternalLeaderLease: 2,
		},
		proto.StoreID(3): {
			proto.Put:                 14,
			proto.ConditionalPut:      5,
			proto.Increment:           2,
			proto.Delete:              2,
			proto.EndTransaction:      4,
			proto.InternalLeaderLease: 2,
		},
	}
	if a, e := ser.perStoreUpdateCount, expectedUpdateCount; !reflect.DeepEqual(a, e) {
		t.Errorf("update counts did not match expected value. Actual values have been printed to compare with above expectation.\n")
		t.Logf("Update count information:\n%s", ser.updateCountString())
	}
}
Exemplo n.º 30
0
func testContext() context.Context {
	ctx := context.Background()
	return Add(ctx, NodeID, proto.NodeID(1), StoreID, proto.StoreID(2), RangeID, proto.RangeID(3), Method, proto.Get, Key, proto.Key("key"))
}