예제 #1
0
func getLatestConfig(s *server.TestServer, expected int) (cfg *config.SystemConfig, err error) {
	err = util.IsTrueWithin(func() bool {
		cfg = s.Gossip().GetSystemConfig()
		return cfg != nil && len(cfg.Values) == expected
	}, 500*time.Millisecond)
	return
}
예제 #2
0
func getLatestConfig(s *server.TestServer, expected int) (cfg *config.SystemConfig, err error) {
	err = util.IsTrueWithin(func() bool {
		var err2 error
		cfg, err2 = s.Gossip().GetSystemConfig()
		if err2 != nil {
			return false
		}
		return len(cfg.Values) != expected
	}, 500*time.Millisecond)
	return
}
func waitForConfigChange(t *testing.T, s *server.TestServer) (*config.SystemConfig, error) {
	var foundDesc sql.DatabaseDescriptor
	var cfg *config.SystemConfig
	return cfg, util.IsTrueWithin(func() bool {
		if cfg = s.Gossip().GetSystemConfig(); cfg != nil {
			if val := cfg.GetValue(configDescKey); val != nil {
				if err := val.GetProto(&foundDesc); err != nil {
					t.Fatal(err)
				}
				return foundDesc.ID == configID
			}
		}

		return false
	}, 10*time.Second)
}
예제 #4
0
func waitForConfigChange(t *testing.T, s *server.TestServer) (cfg *config.SystemConfig, err error) {
	var foundDesc sql.DatabaseDescriptor
	err = util.IsTrueWithin(func() bool {
		cfg = s.Gossip().GetSystemConfig()
		if cfg == nil {
			return false
		}
		raw, ok := cfg.GetValue(configDescKey)
		if !ok {
			return false
		}
		if err2 := proto.Unmarshal(raw, &foundDesc); err2 != nil {
			t.Fatalf("could not unmarshal raw value: %s", err2)
			return false
		}
		return foundDesc.ID == configID
	}, 10*time.Second)
	return
}
예제 #5
0
func waitForConfigChange(t *testing.T, s *server.TestServer) config.SystemConfig {
	var foundDesc sqlbase.Descriptor
	var cfg config.SystemConfig
	util.SucceedsSoon(t, func() error {
		var ok bool
		if cfg, ok = s.Gossip().GetSystemConfig(); ok {
			if val := cfg.GetValue(configDescKey); val != nil {
				if err := val.GetProto(&foundDesc); err != nil {
					t.Fatal(err)
				}
				if id := foundDesc.GetDatabase().GetID(); id != configID {
					return errors.Errorf("expected database id %d; got %d", configID, id)
				}
				return nil
			}
		}
		return errors.Errorf("got nil system config")
	})
	return cfg
}
예제 #6
0
// checkPGWireMetrics returns the server's pgwire bytesIn/bytesOut and an error if the
// bytesIn/bytesOut don't satisfy the given minimums and maximums.
func checkPGWireMetrics(s *server.TestServer, minBytesIn, minBytesOut, maxBytesIn, maxBytesOut int64) (int64, int64, error) {
	nid := s.Gossip().GetNodeID().String()
	if err := s.WriteSummaries(); err != nil {
		return -1, -1, err
	}

	bytesIn := s.MustGetCounter("cr.node.pgwire.bytesin." + nid)
	bytesOut := s.MustGetCounter("cr.node.pgwire.bytesout." + nid)
	if a, min := bytesIn, minBytesIn; a < min {
		return bytesIn, bytesOut, util.Errorf("bytesin %d < expected min %d", a, min)
	}
	if a, min := bytesOut, minBytesOut; a < min {
		return bytesIn, bytesOut, util.Errorf("bytesout %d < expected min %d", a, min)
	}
	if a, max := bytesIn, maxBytesIn; a > max {
		return bytesIn, bytesOut, util.Errorf("bytesin %d > expected max %d", a, max)
	}
	if a, max := bytesOut, maxBytesOut; a > max {
		return bytesIn, bytesOut, util.Errorf("bytesout %d > expected max %d", a, max)
	}
	return bytesIn, bytesOut, nil
}
// TestRequestToUninitializedRange tests the behavior when a request
// is sent to a node which should be a replica of the correct range
// but has not yet received its initial snapshot. This would
// previously panic due to a malformed error response from the server,
// as seen in https://github.com/cockroachdb/cockroach/issues/6027.
//
// Prior to the other changes in the commit that introduced it, this
// test would reliable trigger the panic from #6027. However, it
// relies on some hacky tricks to both trigger the panic and shut down
// cleanly. If this test needs a lot of maintenance in the future we
// should be willing to get rid of it.
func TestRequestToUninitializedRange(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := server.TestServer{StoresPerNode: 2}
	if err := s.Start(); err != nil {
		t.Fatalf("Could not start server: %v", err)
	}
	defer s.Stop()

	// Choose a range ID that is much larger than any that would be
	// created by initial splits.
	const rangeID = roachpb.RangeID(1000)

	// Set up a range with replicas on two stores of the same node. This
	// ensures that the DistSender will consider both replicas healthy
	// and will try to talk to both (so we can get a non-retryable error
	// from the second store).
	replica1 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   1,
		ReplicaID: 1,
	}
	replica2 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   2,
		ReplicaID: 2,
	}

	// HACK: remove the second store from the node to generate a
	// non-retryable error when we try to talk to it.
	store2, err := s.Stores().GetStore(2)
	if err != nil {
		t.Fatal(err)
	}
	s.Stores().RemoveStore(store2)

	// Create the uninitialized range by sending an isolated raft
	// message to the first store.
	conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
	if err != nil {
		t.Fatal(err)
	}
	raftClient := storage.NewMultiRaftClient(conn)
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := raftClient.RaftMessage(ctx)
	if err != nil {
		t.Fatal(err)
	}
	msg := storage.RaftMessageRequest{
		GroupID:     rangeID,
		ToReplica:   replica1,
		FromReplica: replica2,
		Message: raftpb.Message{
			Type: raftpb.MsgApp,
			To:   1,
		},
	}
	if err := stream.Send(&msg); err != nil {
		t.Fatal(err)
	}

	// Make sure the replica was created.
	store1, err := s.Stores().GetStore(1)
	if err != nil {
		t.Fatal(err)
	}
	util.SucceedsSoon(t, func() error {
		if replica, err := store1.GetReplica(rangeID); err != nil {
			return util.Errorf("failed to look up replica: %s", err)
		} else if replica.IsInitialized() {
			return util.Errorf("expected replica to be uninitialized")
		}
		return nil
	})

	// Create our own DistSender so we can force some requests to the
	// bogus range. The DistSender needs to be in scope for its own
	// MockRangeDescriptorDB closure.
	var sender *kv.DistSender
	sender = kv.NewDistSender(&kv.DistSenderContext{
		Clock:      s.Clock(),
		RPCContext: s.RPCContext(),
		RangeDescriptorDB: kv.MockRangeDescriptorDB(
			func(key roachpb.RKey, considerIntents, useReverseScan bool,
			) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
				if key.Equal(roachpb.RKeyMin) {
					// Pass through requests for the first range to the real sender.
					desc, err := sender.FirstRange()
					if err != nil {
						return nil, nil, roachpb.NewError(err)
					}
					return []roachpb.RangeDescriptor{*desc}, nil, nil
				}
				return []roachpb.RangeDescriptor{{
					RangeID:  rangeID,
					StartKey: roachpb.RKey(keys.Meta2Prefix),
					EndKey:   roachpb.RKeyMax,
					Replicas: []roachpb.ReplicaDescriptor{replica1, replica2},
				}}, nil, nil
			}),
	}, s.Gossip())
	// Only inconsistent reads triggered the panic in #6027.
	hdr := roachpb.Header{
		ReadConsistency: roachpb.INCONSISTENT,
	}
	req := roachpb.NewGet(roachpb.Key("asdf"))
	// Repeat the test a few times: due to the randomization between the
	// two replicas, each attempt only had a 50% chance of triggering
	// the panic.
	for i := 0; i < 5; i++ {
		_, pErr := client.SendWrappedWith(sender, context.Background(), hdr, req)
		// Each attempt fails with "store 2 not found" because that is the
		// non-retryable error.
		if !testutils.IsPError(pErr, "store 2 not found") {
			t.Fatal(pErr)
		}
	}
}