예제 #1
0
// CheckQueryResults checks that the rows returned by a query match the expected
// response.
func (sr *SQLRunner) CheckQueryResults(query string, expected [][]string) {
	res := sr.QueryStr(query)
	if !reflect.DeepEqual(res, expected) {
		file, line, _ := caller.Lookup(1)
		sr.Errorf("%s:%d query '%s': expected:\n%v\ngot:%v\n", file, line, query, expected, res)
	}
}
예제 #2
0
파일: txn.go 프로젝트: veteranlu/cockroach
// SetDebugName sets the debug name associated with the transaction which will
// appear in log files and the web UI. Each transaction starts out with an
// automatically assigned debug name composed of the file and line number where
// the transaction was created.
func (txn *Txn) SetDebugName(name string, depth int) {
	file, line, fun := caller.Lookup(depth + 1)
	if name == "" {
		name = fun
	}
	txn.Proto.Name = file + ":" + strconv.Itoa(line) + " " + name
}
예제 #3
0
func TestLogBacktraceAt(t *testing.T) {
	setFlags()
	defer logging.swap(logging.newBuffers())
	// The peculiar style of this code simplifies line counting and maintenance of the
	// tracing block below.
	var infoLine string
	setTraceLocation := func(file string, line int, delta int) {
		_, file = filepath.Split(file)
		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
		err := logging.traceLocation.Set(infoLine)
		if err != nil {
			t.Fatal("error setting log_backtrace_at: ", err)
		}
	}
	{
		// Start of tracing block. These lines know about each other's relative position.
		file, line, _ := caller.Lookup(0)
		setTraceLocation(file, line, +2) // Two lines between Caller and Info calls.
		Info(context.Background(), "we want a stack trace here")
		if err := logging.traceLocation.Set(""); err != nil {
			t.Fatal(err)
		}
	}
	numAppearances := strings.Count(contents(Severity_INFO), infoLine)
	if numAppearances < 2 {
		// Need 2 appearances, one in the log header and one in the trace:
		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
		//   ...
		//   github.com/clog/glog_test.go:280 (0x41ba91)
		//   ...
		// We could be more precise but that would require knowing the details
		// of the traceback format, which may not be dependable.
		t.Fatal("got no trace back; log is ", contents(Severity_INFO))
	}
}
예제 #4
0
// Stop signals all live workers to stop and then waits for each to
// confirm it has stopped.
func (s *Stopper) Stop() {
	defer s.Recover()
	defer unregister(s)

	file, line, _ := caller.Lookup(1)
	log.Infof(context.TODO(),
		"stop has been called from %s:%d, stopping or quiescing all running tasks", file, line)
	// Don't bother doing stuff cleanly if we're panicking, that would likely
	// block. Instead, best effort only. This cleans up the stack traces,
	// avoids stalls and helps some tests in `./cli` finish cleanly (where
	// panics happen on purpose).
	if r := recover(); r != nil {
		go s.Quiesce()
		close(s.stopper)
		close(s.stopped)
		s.mu.Lock()
		for _, c := range s.mu.closers {
			go c.Close()
		}
		s.mu.Unlock()
		panic(r)
	}

	s.Quiesce()
	close(s.stopper)
	s.stop.Wait()
	s.mu.Lock()
	defer s.mu.Unlock()
	for _, c := range s.mu.closers {
		c.Close()
	}
	close(s.stopped)
}
예제 #5
0
// EnsureContext checks whether the given context.Context contains a Span. If
// not, it creates one using the provided Tracer and wraps it in the returned
// Span. The returned closure must be called after the request has been fully
// processed.
func EnsureContext(ctx context.Context, tracer opentracing.Tracer) (context.Context, func()) {
	_, _, funcName := caller.Lookup(1)
	if opentracing.SpanFromContext(ctx) == nil {
		sp := tracer.StartSpan(funcName)
		return opentracing.ContextWithSpan(ctx, sp), sp.Finish
	}
	return ctx, func() {}
}
예제 #6
0
파일: structured.go 프로젝트: knz/cockroach
// addStructured creates a structured log entry to be written to the
// specified facility of the logger.
func addStructured(ctx context.Context, s Severity, depth int, format string, args []interface{}) {
	file, line, _ := caller.Lookup(depth + 1)
	msg := makeMessage(ctx, format, args)
	// makeMessage already added the tags when forming msg, we don't want
	// eventInternal to prepend them again.
	eventInternal(ctx, (s >= Severity_ERROR), false /*withTags*/, "%s:%d %s", file, line, msg)
	logging.outputLogEntry(s, file, line, msg)
}
예제 #7
0
func defTestCase(expected, expectedReverse int, desired ...sqlbase.ColumnOrderInfo) desiredCase {
	// The line number is used to identify testcases in error messages.
	_, line, _ := caller.Lookup(1)
	return desiredCase{
		line:            line,
		desired:         desired,
		expected:        expected,
		expectedReverse: expectedReverse,
	}
}
예제 #8
0
파일: error.go 프로젝트: knz/cockroach
// Caller returns filename and line number info for the specified stack
// depths. The info is formated as <file>:<line> and each entry is separated
// for a space.
func Caller(depth ...int) string {
	var sep string
	var buf bytes.Buffer
	for _, d := range depth {
		file, line, _ := caller.Lookup(d + 1)
		fmt.Fprintf(&buf, "%s%s:%d", sep, file, line)
		sep = " "
	}
	return buf.String()
}
예제 #9
0
파일: stopper.go 프로젝트: knz/cockroach
// RunTaskWithErr adds one to the count of tasks left to quiesce in the system.
// Any worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include goroutines launched
// to do periodic work and the kv/db.go gateway which accepts external client
// requests.
//
// If the system is currently quiescing and function f was not called, returns
// an error indicating this condition. Otherwise, returns whatever f returns.
func (s *Stopper) RunTaskWithErr(f func() error) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}
	if !s.runPrelude(key) {
		return errUnavailable
	}
	// Call f.
	defer s.Recover()
	defer s.runPostlude(key)
	return f()
}
예제 #10
0
// checkRestart checks that there are no errors left to inject.
func checkRestarts(t *testing.T, magicVals *filterVals) {
	magicVals.Lock()
	defer magicVals.Unlock()
	for key, count := range magicVals.restartCounts {
		if count != 0 {
			file, line, _ := caller.Lookup(1)
			t.Errorf("%s:%d: INSERT for \"%s\" still has to be retried %d times",
				file, line, key, count)
		}
	}
	for key, count := range magicVals.abortCounts {
		if count != 0 {
			file, line, _ := caller.Lookup(1)
			t.Errorf("%s:%d: INSERT for \"%s\" still has to be aborted %d times",
				file, line, key, count)
		}
	}
	if t.Failed() {
		t.Fatalf("checking error injection failed")
	}
}
예제 #11
0
파일: stopper.go 프로젝트: knz/cockroach
// RunLimitedAsyncTask runs function f in a goroutine, using the given
// channel as a semaphore to limit the number of tasks that are run
// concurrently to the channel's capacity. If wait is true, blocks
// until the semaphore is available in order to push back on callers
// that may be trying to create many tasks. If wait is false, returns
// immediately with an error if the semaphore is not
// available. Returns an error if the Stopper is quiescing, in which
// case the function is not executed.
func (s *Stopper) RunLimitedAsyncTask(
	ctx context.Context, sem chan struct{}, wait bool, f func(context.Context),
) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}

	// Wait for permission to run from the semaphore.
	select {
	case sem <- struct{}{}:
	case <-ctx.Done():
		return ctx.Err()
	case <-s.ShouldQuiesce():
		return errUnavailable
	default:
		if !wait {
			return ErrThrottled
		}
		log.Infof(context.TODO(), "stopper throttling task from %s:%d due to semaphore", file, line)
		// Retry the select without the default.
		select {
		case sem <- struct{}{}:
		case <-ctx.Done():
			return ctx.Err()
		case <-s.ShouldQuiesce():
			return errUnavailable
		}
	}

	// Check for canceled context: it's possible to get the semaphore even
	// if the context is canceled.
	select {
	case <-ctx.Done():
		<-sem
		return ctx.Err()
	default:
	}

	if !s.runPrelude(key) {
		<-sem
		return errUnavailable
	}

	ctx, span := tracing.ForkCtxSpan(ctx, fmt.Sprintf("%s:%d", file, line))

	go func() {
		defer s.Recover()
		defer s.runPostlude(key)
		defer func() { <-sem }()
		defer tracing.FinishSpan(span)
		f(ctx)
	}()
	return nil
}
예제 #12
0
파일: dir.go 프로젝트: knz/cockroach
// TempDir creates a directory and a function to clean it up at the end of the
// test. If called directly from a test function, pass 0 for depth (which puts
// the test name in the directory). Otherwise, offset depth appropriately.
func TempDir(t testing.TB, depth int) (string, func()) {
	_, _, name := caller.Lookup(depth + 1)
	dir, err := ioutil.TempDir("", name)
	if err != nil {
		t.Fatal(err)
	}
	cleanup := func() {
		if err := os.RemoveAll(dir); err != nil {
			t.Error(err)
		}
	}
	return dir, cleanup
}
예제 #13
0
// CheckQueryResults checks that the rows returned by a query match the expected
// response.
func (sr *SQLRunner) CheckQueryResults(query string, expected [][]string) {
	file, line, _ := caller.Lookup(1)
	info := fmt.Sprintf("%s:%d query '%s'", file, line, query)

	rows := sr.Query(query)
	cols, err := rows.Columns()
	if err != nil {
		sr.Error(err)
		return
	}
	if len(expected) > 0 && len(cols) != len(expected[0]) {
		sr.Errorf("%s: wrong number of columns %d", info, len(cols))
		return
	}
	vals := make([]interface{}, len(cols))
	for i := range vals {
		vals[i] = new(interface{})
	}
	i := 0
	for ; rows.Next(); i++ {
		if i >= len(expected) {
			sr.Errorf("%s: expected %d rows, got more", info, len(expected))
			return
		}
		if err := rows.Scan(vals...); err != nil {
			sr.Error(err)
			return
		}
		for j, v := range vals {
			if val := *v.(*interface{}); val != nil {
				var s string
				switch t := val.(type) {
				case []byte:
					s = string(t)
				default:
					s = fmt.Sprint(val)
				}
				if expected[i][j] != s {
					sr.Errorf("%s: expected %v, found %v", info, expected[i][j], s)
				}
			} else if expected[i][j] != "NULL" {
				sr.Errorf("%s: expected %v, found %v", info, expected[i][j], "NULL")
			}
		}
	}
	if i != len(expected) {
		sr.Errorf("%s: found %d rows, expected %d", info, i, len(expected))
	}
}
예제 #14
0
파일: db_test.go 프로젝트: knz/cockroach
func TestDebugName(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db := setup(t)
	defer s.Stopper().Stop()

	file, _, _ := caller.Lookup(0)
	if err := db.Txn(context.TODO(), func(txn *client.Txn) error {
		if !strings.HasPrefix(txn.DebugName(), file+":") {
			t.Fatalf("expected \"%s\" to have the prefix \"%s:\"", txn.DebugName(), file)
		}
		return nil
	}); err != nil {
		t.Errorf("txn failed: %s", err)
	}
}
예제 #15
0
// logScope creates a testLogScope which corresponds to the
// lifetime of a logging directory. The logging directory is named
// after the caller of MaketestLogScope, up `skip` caller levels.
func logScope(t *testing.T) testLogScope {
	testName := "logUnknown"
	if _, _, f := caller.Lookup(1); f != "" {
		parts := strings.Split(f, ".")
		testName = "log" + parts[len(parts)-1]
	}
	tempDir, err := ioutil.TempDir("", testName)
	if err != nil {
		t.Fatal(err)
	}
	if err := dirTestOverride(tempDir); err != nil {
		t.Fatal(err)
	}
	return testLogScope(tempDir)
}
예제 #16
0
파일: stopper.go 프로젝트: knz/cockroach
// RunAsyncTask runs function f in a goroutine. It returns an error when the
// Stopper is quiescing, in which case the function is not executed.
func (s *Stopper) RunAsyncTask(ctx context.Context, f func(context.Context)) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}
	if !s.runPrelude(key) {
		return errUnavailable
	}

	ctx, span := tracing.ForkCtxSpan(ctx, fmt.Sprintf("%s:%d", file, line))

	// Call f.
	go func() {
		defer s.Recover()
		defer s.runPostlude(key)
		defer tracing.FinishSpan(span)
		f(ctx)
	}()
	return nil
}
예제 #17
0
파일: clog.go 프로젝트: hvaara/cockroach
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
	if sb.file != nil {
		if err := sb.Flush(); err != nil {
			return err
		}
		if err := sb.file.Close(); err != nil {
			return err
		}
	}
	var err error
	sb.file, _, err = create(sb.sev, now)
	sb.nbytes = 0
	if err != nil {
		return err
	}

	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)

	f, l, _ := caller.Lookup(1)
	for _, msg := range []string{
		fmt.Sprintf("[config] file created at: %s\n", now.Format("2006/01/02 15:04:05")),
		fmt.Sprintf("[config] running on machine: %s\n", host),
		fmt.Sprintf("[config] binary: %s\n", build.GetInfo().Short()),
		fmt.Sprintf("[config] arguments: %s\n", os.Args),
		fmt.Sprintf("line format: [IWEF]yymmdd hh:mm:ss.uuuuuu goid file:line msg\n"),
	} {
		buf := formatLogEntry(Entry{
			Severity:  sb.sev,
			Time:      now.UnixNano(),
			Goroutine: goid.Get(),
			File:      f,
			Line:      int64(l),
			Message:   msg,
		}, nil, nil)
		var n int
		n, err = sb.file.Write(buf.Bytes())
		sb.nbytes += uint64(n)
		if err != nil {
			return err
		}
		logging.putBuffer(buf)
	}
	return nil
}
예제 #18
0
파일: util_test.go 프로젝트: knz/cockroach
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			logDir = func(d string) string {
				for i := 1; i < 100; i++ {
					_, _, fun := caller.Lookup(i)
					if testFuncRE.MatchString(fun) {
						return filepath.Join(d, fun)
					}
				}
				panic("no caller matching Test(.*) in stack trace")
			}(logDir)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
		checkRangeReplication(t, l, 20*time.Second)
		completed = true
		return l
	}
	f := farmer(t, "")
	c = f
	if err := f.Resize(*flagNodes); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		if destroyErr := f.Destroy(t); destroyErr != nil {
			t.Fatalf("could not destroy cluster after error %v: %v", err, destroyErr)
		}
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	completed = true
	return f
}
예제 #19
0
// MakeSrcCtx creates a SrcCtx value with contextual information about the
// caller at the requested depth.
func MakeSrcCtx(depth int) SrcCtx {
	f, l, fun := caller.Lookup(depth + 1)
	return SrcCtx{File: f, Line: l, Function: fun}
}
예제 #20
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if *flagRemote {
		f := farmer(t, "")
		c = f
		if err := f.Resize(*flagNodes); err != nil {
			t.Fatal(err)
		}
		if err := f.WaitReady(5 * time.Minute); err != nil {
			if destroyErr := f.Destroy(t); destroyErr != nil {
				t.Fatalf("could not destroy cluster after error %s: %s", err, destroyErr)
			}
			t.Fatalf("cluster not ready in time: %s", err)
		}
	} else {
		logDir := *flagLogDir
		if logDir != "" {
			logDir = func(d string) string {
				for i := 1; i < 100; i++ {
					_, _, fun := caller.Lookup(i)
					if testFuncRE.MatchString(fun) {
						return filepath.Join(d, fun)
					}
				}
				panic("no caller matching Test(.*) in stack trace")
			}(logDir)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
	}
	wantedReplicas := 3
	if numNodes := c.NumNodes(); numNodes < wantedReplicas {
		wantedReplicas = numNodes
	}

	// Looks silly, but we actually start zero-node clusters in the
	// reference tests.
	if wantedReplicas > 0 {
		ctx := context.TODO()

		log.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas)

		util.SucceedsSoon(t, func() error {
			select {
			case <-stopper:
				t.Fatal("interrupted")
			case <-time.After(time.Second):
			}

			// Reconnect on every iteration; gRPC will eagerly tank the connection
			// on transport errors. Always talk to node 0 because it's guaranteed
			// to exist.
			client, dbStopper := c.NewClient(t, 0)
			defer dbStopper.Stop()

			ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second)
			defer cancel()

			var desc roachpb.RangeDescriptor
			if err := client.GetProto(ctxWithTimeout, keys.RangeDescriptorKey(roachpb.RKeyMin), &desc); err != nil {
				return err
			}
			foundReplicas := len(desc.Replicas)

			if log.V(1) {
				log.Infof(ctxWithTimeout, "found %d replicas", foundReplicas)
			}

			if foundReplicas < wantedReplicas {
				return errors.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
			}
			return nil
		})
	}

	completed = true
	return c
}
예제 #21
0
// SucceedsSoonDepth is like SucceedsSoon() but with an additional
// stack depth offset.
func SucceedsSoonDepth(depth int, t testing.TB, fn func() error) {
	if err := util.RetryForDuration(DefaultSucceedsSoonDuration, fn); err != nil {
		file, line, _ := caller.Lookup(depth + 1)
		t.Fatalf("%s:%d, condition failed to evaluate within %s: %s", file, line, DefaultSucceedsSoonDuration, err)
	}
}
예제 #22
0
func TestTransportMoveToFront(t *testing.T) {
	defer leaktest.AfterTest(t)()
	rd1 := roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 1, ReplicaID: 1}
	rd2 := roachpb.ReplicaDescriptor{NodeID: 2, StoreID: 2, ReplicaID: 2}
	rd3 := roachpb.ReplicaDescriptor{NodeID: 3, StoreID: 3, ReplicaID: 3}
	clients := []batchClient{
		{args: roachpb.BatchRequest{Header: roachpb.Header{Replica: rd1}}},
		{args: roachpb.BatchRequest{Header: roachpb.Header{Replica: rd2}}},
		{args: roachpb.BatchRequest{Header: roachpb.Header{Replica: rd3}}},
	}
	gt := grpcTransport{orderedClients: clients}

	verifyOrder := func(replicas []roachpb.ReplicaDescriptor) {
		file, line, _ := caller.Lookup(1)
		for i, bc := range gt.orderedClients {
			if bc.args.Replica != replicas[i] {
				t.Fatalf("%s:%d: expected order %+v; got mismatch at index %d: %+v",
					file, line, replicas, i, bc.args.Replica)
			}
		}
	}

	verifyOrder([]roachpb.ReplicaDescriptor{rd1, rd2, rd3})

	// Move replica 2 to the front.
	gt.MoveToFront(rd2)
	verifyOrder([]roachpb.ReplicaDescriptor{rd2, rd1, rd3})

	// Now replica 3.
	gt.MoveToFront(rd3)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd1, rd2})

	// Mark replica 2 pending. Shouldn't be able to move it.
	clients[2].pending = true
	gt.MoveToFront(rd2)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd1, rd2})

	// Advance the client index and move replica 3 back to front.
	gt.clientIndex++
	gt.MoveToFront(rd3)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd1, rd2})
	if gt.clientIndex != 0 {
		t.Fatalf("expected cient index 0; got %d", gt.clientIndex)
	}

	// Advance the client index again and verify replica 3 cannot
	// be moved to front for a second retry.
	gt.clientIndex++
	gt.MoveToFront(rd3)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd1, rd2})
	if gt.clientIndex != 1 {
		t.Fatalf("expected cient index 1; got %d", gt.clientIndex)
	}

	// Mark replica 2 no longer pending. Should be able to move it.
	clients[2].pending = false
	gt.MoveToFront(rd2)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd2, rd1})

	// Advance client index and move rd1 front; should be no change.
	gt.clientIndex++
	gt.MoveToFront(rd1)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd2, rd1})

	// Advance client index to and and move rd1 to front. Should move
	// client index back for a retry.
	gt.clientIndex++
	gt.MoveToFront(rd1)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd2, rd1})
	if gt.clientIndex != 2 {
		t.Fatalf("expected cient index 2; got %d", gt.clientIndex)
	}

	// Advance client index once more; verify no second retry.
	gt.clientIndex++
	gt.MoveToFront(rd1)
	verifyOrder([]roachpb.ReplicaDescriptor{rd3, rd2, rd1})
	if gt.clientIndex != 3 {
		t.Fatalf("expected cient index 3; got %d", gt.clientIndex)
	}
}
예제 #23
0
파일: errors.go 프로젝트: maxlang/cockroach
// NewErrorf creates an Error from the given error message. It is a
// passthrough to fmt.Errorf, with an additional prefix containing the
// filename and line number.
func NewErrorf(format string, a ...interface{}) *Error {
	// Cannot use errors.Errorf here due to cyclic dependency.
	file, line, _ := caller.Lookup(1)
	s := fmt.Sprintf("%s:%d: ", file, line)
	return NewError(fmt.Errorf(s+format, a...))
}
예제 #24
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(ctx context.Context, t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(ctx, t)
		}
	}()
	if *flagRemote {
		f := MakeFarmer(t, "", stopper)
		c = f
		if err := f.Resize(*flagNodes); err != nil {
			t.Fatal(err)
		}
		if err := f.WaitReady(5 * time.Minute); err != nil {
			if destroyErr := f.Destroy(t); destroyErr != nil {
				t.Fatalf("could not destroy cluster after error %s: %s", err, destroyErr)
			}
			t.Fatalf("cluster not ready in time: %s", err)
		}
	} else {
		logDir := *flagLogDir
		if logDir != "" {
			logDir = func(d string) string {
				for i := 1; i < 100; i++ {
					_, _, fun := caller.Lookup(i)
					if testFuncRE.MatchString(fun) {
						return filepath.Join(d, fun)
					}
				}
				panic("no caller matching Test(.*) in stack trace")
			}(logDir)
		}
		l := cluster.CreateLocal(ctx, cfg, logDir, *flagPrivileged, stopper)
		l.Start(ctx)
		c = l
	}
	wantedReplicas := 3
	if numNodes := c.NumNodes(); numNodes < wantedReplicas {
		wantedReplicas = numNodes
	}

	// Looks silly, but we actually start zero-node clusters in the
	// reference tests.
	if wantedReplicas > 0 {

		log.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas)

		testutils.SucceedsSoon(t, func() error {
			select {
			case <-stopper.ShouldStop():
				t.Fatal("interrupted")
			case <-time.After(time.Second):
			}

			// Reconnect on every iteration; gRPC will eagerly tank the connection
			// on transport errors. Always talk to node 0 because it's guaranteed
			// to exist.
			client, err := c.NewClient(ctx, 0)
			if err != nil {
				t.Fatal(err)
			}

			var desc roachpb.RangeDescriptor
			if err := client.GetProto(ctx, keys.RangeDescriptorKey(roachpb.RKeyMin), &desc); err != nil {
				return err
			}
			foundReplicas := len(desc.Replicas)

			if log.V(1) {
				log.Infof(ctx, "found %d replicas", foundReplicas)
			}

			if foundReplicas < wantedReplicas {
				return errors.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
			}
			return nil
		})
	}

	// Ensure that all nodes are serving SQL by making sure a simple
	// read-only query succeeds.
	for i := 0; i < c.NumNodes(); i++ {
		testutils.SucceedsSoon(t, func() error {
			db, err := gosql.Open("postgres", c.PGUrl(ctx, i))
			if err != nil {
				return err
			}
			if _, err := db.Exec("SHOW DATABASES;"); err != nil {
				return err
			}
			return nil
		})
	}

	completed = true
	return c
}
예제 #25
0
func (b *Bank) must(err error) {
	if err != nil {
		f, l, _ := caller.Lookup(1)
		b.Fatal(errors.Wrapf(err, "%s:%d", f, l))
	}
}