Exemplo n.º 1
0
// checkTreeNode compares the node cached in the treeContext against its
// expected value. It also makes sure that the node is marked as dirty in the
// cache. If an actual value is passed it, that is compared with the value in
// the cache as well.
func checkTreeNode(t *testing.T, tc *treeContext, testNumber int, name string, key roachpb.RKey, expected, actual *roachpb.RangeTreeNode) {
	if expected != nil {
		// Is the value correct?
		cached, err := tc.getNode(expected.Key)
		if err != nil {
			t.Fatal(util.ErrorfSkipFrames(1, "%d: Could not get node %s", testNumber, expected.Key))
		}
		if !reflect.DeepEqual(cached, expected) {
			t.Error(util.ErrorfSkipFrames(1, "%d: Expected %s node is not the same as the actual.\nExpected: %+v\nActual: %+v", testNumber, name, expected, actual))
		}

		// Is there a returned value to match against the cached one?
		if actual != nil {
			if !reflect.DeepEqual(actual, cached) {
				t.Error(util.ErrorfSkipFrames(1, "%d: Cached %s node is not the same as the actual.\nExpected: %+v\nActual: %+v", testNumber, name, cached, actual))
			}
		}

		// Is the node marked as dirty?
		if !tc.nodes[string(expected.Key)].dirty {
			t.Error(util.ErrorfSkipFrames(1, "%d: Expected %s node to be dirty", testNumber, name))
		}
	} else {
		if cached := tc.nodes[string(key)].node; cached != nil {
			t.Error(util.ErrorfSkipFrames(1, "%d: Expected nil for %s node, got a cached value of: %+v", testNumber, name, cached))
		}
	}
}
Exemplo n.º 2
0
// verifyBookie ensures that the correct number of reservations, reserved bytes,
// and that the expirationQueue's length are correct.
func verifyBookie(t *testing.T, b *bookie, reservations, queueLen int, reservedBytes int64) {
	if e, a := reservedBytes, b.ReservedBytes(); e != a {
		t.Error(util.ErrorfSkipFrames(1, "expected total bytes reserved to be %d, got %d", e, a))
	}
	if e, a := reservations, b.Outstanding(); e != a {
		t.Error(util.ErrorfSkipFrames(1, "expected total reservations to be %d, got %d", e, a))
	}
	b.mu.Lock()
	defer b.mu.Unlock()
	if e, a := queueLen, len(b.mu.queue); e != a {
		t.Error(util.ErrorfSkipFrames(1, "expected total queue length to be %d, got %d", e, a))
	}
}
Exemplo n.º 3
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(t *testing.T, c cluster.Cluster, i int, nodeID, otherNodeID, expectedNodeID string) {
	var detail details
	if err := getJSON(c.URL(i), "/_status/details/"+otherNodeID, &detail); err != nil {
		t.Fatal(util.ErrorfSkipFrames(1, "unable to parse details - %s", err))
	}
	if actualNodeID := detail.NodeID.String(); actualNodeID != expectedNodeID {
		t.Fatal(util.ErrorfSkipFrames(1, "%s calling %s: node ids don't match - expected %s, actual %s", nodeID, otherNodeID, expectedNodeID, actualNodeID))
	}

	get(t, c.URL(i), fmt.Sprintf("/_status/gossip/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/logfiles/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/logs/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/stacks/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/nodes/%s", otherNodeID))
}
Exemplo n.º 4
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(t *testing.T, client *http.Client, node *localcluster.Container, nodeID, otherNodeID, expectedNodeID string) {
	body := get(t, client, node, "/_status/details/"+otherNodeID)
	var detail details
	if err := json.Unmarshal(body, &detail); err != nil {
		t.Fatal(util.ErrorfSkipFrames(1, "unable to parse details - %s", err))
	}
	if actualNodeID := detail.NodeID.String(); actualNodeID != expectedNodeID {
		t.Fatal(util.ErrorfSkipFrames(1, "%s calling %s: node ids don't match - expected %s, actual %s", nodeID, otherNodeID, expectedNodeID, actualNodeID))
	}

	get(t, client, node, fmt.Sprintf("/_status/gossip/%s", otherNodeID))
	get(t, client, node, fmt.Sprintf("/_status/logfiles/%s", otherNodeID))
	get(t, client, node, fmt.Sprintf("/_status/logs/%s", otherNodeID))
	get(t, client, node, fmt.Sprintf("/_status/stacks/%s", otherNodeID))
	get(t, client, node, fmt.Sprintf("/_status/nodes/%s", otherNodeID))
}
Exemplo n.º 5
0
func TestFreeze(t *testing.T) {
	defer leaktest.AfterTest(t)()

	c := newCLITest()
	defer c.stop()

	assertOutput := func(msg string) {
		if !strings.HasSuffix(strings.TrimSpace(msg), "ok") {
			t.Fatal(util.ErrorfSkipFrames(1, "expected trailing 'ok':\n%s", msg))
		}
	}

	{
		out, err := c.RunWithCapture("freeze-cluster")
		if err != nil {
			t.Fatal(err)
		}
		assertOutput(out)
	}
	{
		out, err := c.RunWithCapture("freeze-cluster --undo")
		if err != nil {
			t.Fatal(err)
		}
		assertOutput(out)
	}
}
Exemplo n.º 6
0
func getGauge(t *testing.T, s *storage.Store, key string) int64 {
	gauge := s.Registry().GetGauge(key)
	if gauge == nil {
		t.Fatal(util.ErrorfSkipFrames(1, "store did not contain gauge %s", key))
	}
	return gauge.Value()
}
Exemplo n.º 7
0
func getCounter(t *testing.T, s *storage.Store, key string) int64 {
	counter := s.Registry().GetCounter(key)
	if counter == nil {
		t.Fatal(util.ErrorfSkipFrames(1, "store did not contain counter %s", key))
	}
	return counter.Count()
}
Exemplo n.º 8
0
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) {
	err := util.RetryForDuration(d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		var infoStatus gossip.InfoStatus
		for i := 0; i < c.NumNodes(); i++ {
			if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil {
				return err
			}
			if err := f(infoStatus.Infos); err != nil {
				return util.Errorf("node %d: %s", i, err)
			}
		}

		return nil
	})
	if err != nil {
		t.Fatal(util.ErrorfSkipFrames(1, "condition failed to evaluate within %s: %s", d, err))
	}
}
Exemplo n.º 9
0
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration,
	f checkGossipFunc) {
	err := util.RetryForDuration(d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		for i := 0; i < c.NumNodes(); i++ {
			var m map[string]interface{}
			if err := getJSON(c.URL(i), "/_status/gossip/local", &m); err != nil {
				return err
			}
			infos, ok := m["infos"].(map[string]interface{})
			if !ok {
				return errors.New("no infos yet")
			}
			if err := f(infos); err != nil {
				return util.Errorf("node %d: %s", i, err)
			}
		}

		return nil
	})
	if err != nil {
		t.Fatal(util.ErrorfSkipFrames(1, "condition failed to evaluate within %s: %s", d, err))
	}
}
Exemplo n.º 10
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(t *testing.T, c cluster.Cluster, i int, nodeID, otherNodeID, expectedNodeID roachpb.NodeID) {
	urlIDs := []string{otherNodeID.String()}
	if nodeID == otherNodeID {
		urlIDs = append(urlIDs, "local")
	}
	var details server.DetailsResponse
	for _, urlID := range urlIDs {
		if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/"+urlID, &details); err != nil {
			t.Fatal(util.ErrorfSkipFrames(1, "unable to parse details - %s", err))
		}
		if details.NodeID != expectedNodeID {
			t.Fatal(util.ErrorfSkipFrames(1, "%d calling %s: node ids don't match - expected %d, actual %d", nodeID, urlID, expectedNodeID, details.NodeID))
		}

		get(t, c.URL(i), fmt.Sprintf("/_status/gossip/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/nodes/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/logfiles/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/logs/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/stacks/%s", urlID))
	}
}
Exemplo n.º 11
0
func verifyStats(t *testing.T, mtc *multiTestContext, storeIdx int) {
	// Get the current store at storeIdx.
	s := mtc.stores[storeIdx]
	// Stop the store at the given index, while keeping the the reference to the
	// store object. Method ComputeMVCCStatsTest() still works on a stopped
	// store (it needs only the engine, which is still open), and the most
	// recent stats are still available on the stopped store object; however, no
	// further information can be committed to the store while it is stopped,
	// preventing any races during verification.
	mtc.stopStore(storeIdx)

	// Compute real total MVCC statistics from store.
	realStats, err := s.ComputeMVCCStatsTest()
	if err != nil {
		t.Fatal(err)
	}

	// Sanity regression check for bug #4624: ensure intent count is zero.
	if a := realStats.IntentCount; a != 0 {
		t.Fatalf("Expected intent count to be zero, was %d", a)
	}

	// Sanity check: LiveBytes is not zero (ensures we don't have
	// zeroed out structures.)
	if liveBytes := getGauge(t, s, "livebytes"); liveBytes == 0 {
		t.Fatal("Expected livebytes to be non-zero, was zero")
	}

	// Ensure that real MVCC stats match computed stats.
	checkGauge(t, s, "livebytes", realStats.LiveBytes)
	checkGauge(t, s, "keybytes", realStats.KeyBytes)
	checkGauge(t, s, "valbytes", realStats.ValBytes)
	checkGauge(t, s, "intentbytes", realStats.IntentBytes)
	checkGauge(t, s, "livecount", realStats.LiveCount)
	checkGauge(t, s, "keycount", realStats.KeyCount)
	checkGauge(t, s, "valcount", realStats.ValCount)
	checkGauge(t, s, "intentcount", realStats.IntentCount)
	checkGauge(t, s, "sysbytes", realStats.SysBytes)
	checkGauge(t, s, "syscount", realStats.SysCount)
	// "Ages" will be different depending on how much time has passed. Even with
	// a manual clock, this can be an issue in tests. Therefore, we do not
	// verify them in this test.

	if t.Failed() {
		t.Log(util.ErrorfSkipFrames(1, "verifyStats failed, aborting test."))
		t.FailNow()
	}

	// Restart the store at the provided index.
	mtc.restartStore(storeIdx)
}
Exemplo n.º 12
0
func verifyStats(t *testing.T, s *storage.Store) {
	// Compute real total MVCC statistics from store.
	realStats, err := s.ComputeMVCCStatsTest()
	if err != nil {
		t.Fatal(err)
	}

	// Sanity regression check for bug #4624: ensure intent count is zero.
	if a := realStats.IntentCount; a != 0 {
		t.Fatalf("Expected intent count to be zero, was %d", a)
	}

	// Sanity check: LiveBytes is not zero (ensures we don't have
	// zeroed out structures.)
	if liveBytes := getGauge(t, s, "livebytes"); liveBytes == 0 {
		t.Fatal("Expected livebytes to be non-nero, was zero")
	}

	// Ensure that real MVCC stats match computed stats.
	checkGauge(t, s, "livebytes", realStats.LiveBytes)
	checkGauge(t, s, "keybytes", realStats.KeyBytes)
	checkGauge(t, s, "valbytes", realStats.ValBytes)
	checkGauge(t, s, "intentbytes", realStats.IntentBytes)
	checkGauge(t, s, "livecount", realStats.LiveCount)
	checkGauge(t, s, "keycount", realStats.KeyCount)
	checkGauge(t, s, "valcount", realStats.ValCount)
	checkGauge(t, s, "intentcount", realStats.IntentCount)
	// "Ages" will be different depending on how much time has passed. Even with
	// a manual clock, this can be an issue in tests. Therefore, we do not
	// verify them in this test.

	if t.Failed() {
		t.Log(util.ErrorfSkipFrames(1, "verifyStats failed, aborting test."))
		t.FailNow()
	}
}
Exemplo n.º 13
0
func checkCounter(t *testing.T, s *storage.Store, key string, e int64) {
	if a := getCounter(t, s, key); a != e {
		t.Error(util.ErrorfSkipFrames(1, "%s for store: actual %d != expected %d", key, a, e))
	}
}
Exemplo n.º 14
0
// ScanIndex scans the given column index of the given row into dst.
func (rs *resultScanner) ScanIndex(row sql.ResultRow, index int, dst interface{}) error {
	src := row.Values[index]

	switch d := dst.(type) {
	case *string:
		if dst == nil {
			return util.ErrorfSkipFrames(1, "nil destination pointer passed in")
		}
		s, ok := src.(parser.DString)
		if !ok {
			return util.ErrorfSkipFrames(1, "source type assertion failed")
		}
		*d = string(s)

	case *bool:
		if dst == nil {
			return util.ErrorfSkipFrames(1, "nil destination pointer passed in")
		}
		s, ok := src.(parser.DBool)
		if !ok {
			return util.ErrorfSkipFrames(1, "source type assertion failed")
		}
		*d = bool(s)

	case *int64:
		if dst == nil {
			return util.ErrorfSkipFrames(1, "nil destination pointer passed in")
		}
		s, ok := src.(parser.DInt)
		if !ok {
			return util.ErrorfSkipFrames(1, "source type assertion failed")
		}
		*d = int64(s)

	case *time.Time:
		if dst == nil {
			return util.ErrorfSkipFrames(1, "nil destination pointer passed in")
		}
		s, ok := src.(parser.DTimestamp)
		if !ok {
			return util.ErrorfSkipFrames(1, "source type assertion failed")
		}
		*d = time.Time(s.Time)

	case *[]byte:
		if dst == nil {
			return util.ErrorfSkipFrames(1, "nil destination pointer passed in")
		}
		s, ok := src.(parser.DBytes)
		if !ok {
			return util.ErrorfSkipFrames(1, "source type assertion failed")
		}
		// Yes, this copies, but this probably isn't in the critical path.
		*d = []byte(s)

	default:
		return util.ErrorfSkipFrames(1, "unimplemented type for scanCol: %T", dst)
	}

	return nil
}
Exemplo n.º 15
0
// UglyPrint is a partial right inverse to PrettyPrint: it takes a key
// formatted for human consumption and attempts to translate it into a
// roachpb.Key. Not all key types are supported and no optimization has been
// performed. This is intended for use in debugging only.
func UglyPrint(input string) (_ roachpb.Key, rErr error) {
	defer func() {
		if r := recover(); r != nil {
			if err, ok := r.(error); ok {
				rErr = err
				return
			}
			rErr = fmt.Errorf("%v", r)
		}
	}()

	origInput := input
	var output roachpb.Key

	mkErr := func(err error) (roachpb.Key, error) {
		if err == nil {
			err = errIllegalInput
		}
		return nil, util.ErrorfSkipFrames(1, `can't parse "%s" after reading %s: %s`, input, origInput[:len(origInput)-len(input)], err)
	}

	var entries []dictEntry // nil if not pinned to a subrange
outer:
	for len(input) > 0 {
		if entries != nil {
			for _, v := range entries {
				if strings.HasPrefix(input, v.name) {
					input = input[len(v.name):]
					if v.psFunc == nil {
						return mkErr(nil)
					}
					remainder, key := v.psFunc(input)
					input = remainder
					output = append(output, key...)
					entries = nil
					continue outer
				}
			}
			return nil, &errUglifyUnsupported{errors.New("known key, but unsupported subtype")}
		}
		for _, v := range constKeyDict {
			if strings.HasPrefix(input, v.name) {
				output = append(output, v.value...)
				input = input[len(v.name):]
				continue outer
			}
		}
		for _, v := range keyDict {
			if strings.HasPrefix(input, v.name) {
				// No appending to output yet, the dictionary will take care of
				// it.
				input = input[len(v.name):]
				entries = v.entries
				continue outer
			}
		}
		return mkErr(errors.New("can't handle key"))
	}
	if out := PrettyPrint(output); out != origInput {
		return nil, fmt.Errorf("constructed key deviates from original: %s vs %s", out, origInput)
	}
	return output, nil
}
Exemplo n.º 16
0
func checkCounterGE(t *testing.T, s *testServer, key string, e int64) {
	if a := s.MustGetSQLCounter(key); !(a >= e) {
		t.Error(util.ErrorfSkipFrames(1, "stat %s: expected: actual %d >= %d", key, a, e))
	}
}
Exemplo n.º 17
0
func checkCounterEQ(t *testing.T, s *testServer, key string, e int64) {
	if a := s.MustGetSQLCounter(key); a != e {
		t.Error(util.ErrorfSkipFrames(1, "stat %s: actual %d != expected %d", key, a, e))
	}
}
Exemplo n.º 18
0
func testRaftUpdateInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	minAffected := int64(server.ExpectedInitialRangeCount())

	const long = time.Minute
	const short = 10 * time.Second

	mustPost := func(freeze bool) serverpb.ClusterFreezeResponse {
		reply, err := postFreeze(c, freeze, long)
		if err != nil {
			t.Fatal(util.ErrorfSkipFrames(1, "%v", err))
		}
		return reply
	}

	if reply := mustPost(false); reply.RangesAffected != 0 {
		t.Fatalf("expected initial unfreeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d frozen ranges, got %d", minAffected, reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected != 0 {
		t.Fatalf("expected second freeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(false); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d thawed ranges, got %d", minAffected, reply.RangesAffected)
	}

	num := c.NumNodes()
	if num < 3 {
		t.Skip("skipping remainder of test; needs at least 3 nodes")
	}

	// Kill the last node.
	if err := c.Kill(num - 1); err != nil {
		t.Fatal(err)
	}

	// Attempt to freeze should get stuck (since it does not get confirmation
	// of the last node receiving the freeze command).
	// Note that this is the freeze trigger stalling on the Replica, not the
	// Store-polling mechanism.
	acceptErrs := strings.Join([]string{
		"timed out waiting for Range",
		"Timeout exceeded while",
		"connection is closing",
		"deadline",
		// error returned via JSON when the server-side gRPC stream times out (due to
		// lack of new input). Unmarshaling that JSON fails with a message referencing
		// unknown fields, unfortunately in map order.
		"unknown field .*",
	}, "|")
	if reply, err := postFreeze(c, true, short); !testutils.IsError(err, acceptErrs) {
		t.Fatalf("expected timeout, got %v: %v", err, reply)
	}

	// Shut down the remaining nodes and restart them.
	for i := 0; i < num-1; i++ {
		if err := c.Kill(i); err != nil {
			t.Fatal(err)
		}
	}
	for i := 0; i < num; i++ {
		if err := c.Restart(i); err != nil {
			t.Fatal(err)
		}
	}

	// The cluster should now be fully operational (at least after waiting
	// a little bit) since each node tries to unfreeze everything when it
	// starts.
	//
	// TODO(tschottdorf): we unfreeze again in the loop since Raft reproposals
	// can re-freeze Ranges unexpectedly. This should be re-evaluated after
	// #6287 removes that problem.
	if err := util.RetryForDuration(time.Minute, func() error {
		if _, err := postFreeze(c, false, short); err != nil {
			return err
		}

		// TODO(tschottdorf): moving the client creation outside of the retry
		// loop will break the test with the following message:
		//
		//   client/rpc_sender.go:61: roachpb.Batch RPC failed as client
		//   connection was closed
		//
		// Perhaps the cluster updates the address too late after restarting
		// the node.
		db, dbStopper := c.NewClient(t, 0)
		defer dbStopper.Stop()

		_, err := db.Scan(keys.LocalMax, roachpb.KeyMax, 0)
		if err != nil {
			log.Info(err)
		}
		return err
	}); err != nil {
		t.Fatal(err)
	}

	// Unfreezing again should be a no-op.
	if reply, err := postFreeze(c, false, long); err != nil {
		t.Fatal(err)
	} else if reply.RangesAffected > 0 {
		t.Fatalf("still %d frozen ranges", reply.RangesAffected)
	}
}
Exemplo n.º 19
0
// assertQuery generates a query result from the local test model and compares
// it against the query returned from the server.
func (tm *testModel) assertQuery(name string, sources []string,
	downsample, agg *TimeSeriesQueryAggregator, derivative *TimeSeriesQueryDerivative,
	r Resolution, start, end int64, expectedDatapointCount int, expectedSourceCount int) {
	// Query the actual server.
	q := Query{
		Name:             name,
		Downsampler:      downsample,
		SourceAggregator: agg,
		Derivative:       derivative,
		Sources:          sources,
	}
	actualDatapoints, actualSources, err := tm.DB.Query(q, r, start, end)
	if err != nil {
		tm.t.Fatal(err)
	}
	if a, e := len(actualDatapoints), expectedDatapointCount; a != e {
		tm.t.Logf("actual datapoints: %v", actualDatapoints)
		tm.t.Fatal(util.ErrorfSkipFrames(1, "query expected %d datapoints, got %d", e, a))
	}
	if a, e := len(actualSources), expectedSourceCount; a != e {
		tm.t.Fatal(util.ErrorfSkipFrames(1, "query expected %d sources, got %d", e, a))
	}

	// Construct an expected result for comparison.
	var expectedDatapoints []*TimeSeriesDatapoint
	expectedSources := make([]string, 0, 0)
	dataSpans := make(map[string]*dataSpan)

	// If no specific sources were provided, look for data from every source
	// encountered by the test model.
	var sourcesToCheck map[string]struct{}
	if len(sources) == 0 {
		sourcesToCheck = tm.seenSources
	} else {
		sourcesToCheck = make(map[string]struct{})
		for _, s := range sources {
			sourcesToCheck[s] = struct{}{}
		}
	}

	// Iterate over all possible sources which may have data for this query.
	for sourceName := range sourcesToCheck {
		// Iterate over all possible key times at which query data may be present.
		for time := start - (start % r.KeyDuration()); time < end; time += r.KeyDuration() {
			// Construct a key for this source/time and retrieve it from model.
			key := MakeDataKey(name, sourceName, r, time)
			value, ok := tm.modelData[string(key)]
			if !ok {
				continue
			}

			// Add data from the key to the correct dataSpan.
			data, err := value.GetTimeseries()
			if err != nil {
				tm.t.Fatal(err)
			}
			ds, ok := dataSpans[sourceName]
			if !ok {
				ds = &dataSpan{
					startNanos:  start - (start % r.SampleDuration()),
					sampleNanos: r.SampleDuration(),
				}
				dataSpans[sourceName] = ds
				expectedSources = append(expectedSources, sourceName)
			}
			if err := ds.addData(&data); err != nil {
				tm.t.Fatal(err)
			}
		}
	}

	// Iterate over data in all dataSpans and construct expected datapoints.
	var startOffset int32
	isDerivative := q.GetDerivative() != TimeSeriesQueryDerivative_NONE
	if isDerivative {
		startOffset = -1
	}
	downsampleFn, err := getDownsampleFunction(q.GetDownsampler())
	if err != nil {
		tm.t.Fatal(err)
	}
	var iters unionIterator
	for _, ds := range dataSpans {
		iters = append(iters, ds.newIterator(startOffset, downsampleFn))
	}

	iters.init()
	currentVal := func() TimeSeriesDatapoint {
		var value float64
		switch q.GetSourceAggregator() {
		case TimeSeriesQueryAggregator_SUM:
			value = iters.sum()
		case TimeSeriesQueryAggregator_AVG:
			value = iters.avg()
		case TimeSeriesQueryAggregator_MAX:
			value = iters.max()
		case TimeSeriesQueryAggregator_MIN:
			value = iters.min()
		default:
			tm.t.Fatalf("unknown query aggregator %s", q.GetSourceAggregator())
		}
		return TimeSeriesDatapoint{
			TimestampNanos: iters.timestamp(),
			Value:          value,
		}
	}

	var last TimeSeriesDatapoint
	if isDerivative {
		last = currentVal()
		if iters.offset() < 0 {
			iters.advance()
		}
	}
	for iters.isValid() && iters.timestamp() <= end {
		current := currentVal()
		result := &TimeSeriesDatapoint{}
		*result = current
		if isDerivative {
			dTime := (current.TimestampNanos - last.TimestampNanos) / r.SampleDuration()
			if dTime == 0 {
				result.Value = 0
			} else {
				result.Value = (current.Value - last.Value) / float64(dTime)
			}
			if result.Value < 0 &&
				q.GetDerivative() == TimeSeriesQueryDerivative_NON_NEGATIVE_DERIVATIVE {
				result.Value = 0
			}
		}
		expectedDatapoints = append(expectedDatapoints, result)
		last = current
		iters.advance()
	}

	sort.Strings(expectedSources)
	sort.Strings(actualSources)
	if !reflect.DeepEqual(actualSources, expectedSources) {
		tm.t.Error(util.ErrorfSkipFrames(1, "actual source list: %v, expected: %v", actualSources, expectedSources))
	}
	if !reflect.DeepEqual(actualDatapoints, expectedDatapoints) {
		tm.t.Error(util.ErrorfSkipFrames(1, "actual datapoints: %v, expected: %v", actualDatapoints, expectedDatapoints))
	}
}
Exemplo n.º 20
0
func emptyKeyError() error {
	return util.ErrorfSkipFrames(1, "attempted access to empty key")
}
Exemplo n.º 21
0
func (s *adminServer) internalServerErrorf(w http.ResponseWriter, format string, args ...interface{}) {
	err := util.ErrorfSkipFrames(1, format, args...)
	log.Error(err)
	http.Error(w, err.Error(), http.StatusInternalServerError)
}
Exemplo n.º 22
0
func testRaftUpdateInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	minAffected := int64(server.ExpectedInitialRangeCount())

	mustPost := func(freeze bool) server.ClusterFreezeResponse {
		reply, err := postFreeze(c, freeze)
		if err != nil {
			t.Fatal(util.ErrorfSkipFrames(1, "%v", err))
		}
		return reply
	}

	if reply := mustPost(false); reply.RangesAffected != 0 {
		t.Fatalf("expected initial unfreeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d frozen ranges, got %d", minAffected, reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected != 0 {
		t.Fatalf("expected second freeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(false); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d thawed ranges, got %d", minAffected, reply.RangesAffected)
	}

	num := c.NumNodes()
	if num < 3 {
		t.Skip("skipping remainder of test; needs at least 3 nodes")
	}

	// Kill the last node.
	if err := c.Kill(num - 1); err != nil {
		t.Fatal(err)
	}

	// Attempt to freeze should get stuck (since it does not get confirmation
	// of the last node receiving the freeze command).
	if reply, err := postFreeze(c, true); !testutils.IsError(err, "timed out waiting for Range|Timeout exceeded while") {
		t.Fatalf("expected timeout, got %v: %v", err, reply)
	}

	// Shut down the remaining nodes and restart then.
	for i := 0; i < num-1; i++ {
		if err := c.Kill(i); err != nil {
			t.Fatal(err)
		}
	}
	for i := 0; i < num; i++ {
		if err := c.Restart(i); err != nil {
			t.Fatal(err)
		}
	}

	// The cluster should now be fully operational (at least after waiting
	// a little bit) since each node tries to unfreeze everything when it
	// starts.
	if err := util.RetryForDuration(time.Minute, func() error {
		// TODO(tschottdorf): moving the client creation outside of the retry
		// loop will break the test with the following message:
		//
		//   client/rpc_sender.go:61: roachpb.Batch RPC failed as client
		//   connection was closed
		//
		// Perhaps the cluster updates the address too late after restarting
		// the node.
		db, dbStopper := c.NewClient(t, 0)
		defer dbStopper.Stop()

		_, err := db.Scan(keys.LocalMax, roachpb.KeyMax, 0)
		if err != nil {
			log.Info(err)
		}
		return err
	}); err != nil {
		t.Fatal(err)
	}

	// Unfreezing again should be a no-op.
	if reply, err := postFreeze(c, false); err != nil {
		t.Fatal(err)
	} else if reply.RangesAffected > 0 {
		t.Fatalf("still %d frozen ranges", reply.RangesAffected)
	}
}