Esempio n. 1
0
// checkRestart checks that there are no errors left to inject.
func checkRestarts(t *testing.T, magicVals *filterVals) {
	magicVals.Lock()
	defer magicVals.Unlock()
	for key, count := range magicVals.restartCounts {
		if count != 0 {
			file, line, _ := caller.Lookup(1)
			t.Errorf("%s:%d: INSERT for \"%s\" still has to be retried %d times",
				file, line, key, count)
		}
	}
	for key, count := range magicVals.abortCounts {
		if count != 0 {
			file, line, _ := caller.Lookup(1)
			t.Errorf("%s:%d: INSERT for \"%s\" still has to be aborted %d times",
				file, line, key, count)
		}
	}
	for key, count := range magicVals.endTxnRestartCounts {
		if count != 0 {
			file, line, _ := caller.Lookup(1)
			t.Errorf("%s:%d: txn writing \"%s\" still has to be aborted %d times",
				file, line, key, count)
		}
	}
	if len(magicVals.txnsToFail) > 0 {
		file, line, _ := caller.Lookup(1)
		t.Errorf("%s:%d: txns still to be failed: %v", file, line, magicVals.txnsToFail)
	}
	if t.Failed() {
		t.Fatalf("checking error injection failed")
	}
}
// newTxn begins a transaction. For testing purposes, this comes with a ID
// pre-initialized, but with the Writing flag set to false.
func newTxn(clock *hlc.Clock, baseKey proto.Key) *proto.Transaction {
	f, l, fun := caller.Lookup(1)
	name := fmt.Sprintf("%s:%d %s", f, l, fun)
	txn := proto.NewTransaction("test", baseKey, 1, proto.SERIALIZABLE, clock.Now(), clock.MaxOffset().Nanoseconds())
	txn.Name = name
	return txn
}
Esempio n. 3
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T) cluster.Cluster {
	if *numLocal > 0 {
		if *numRemote > 0 {
			t.Fatal("cannot both specify -num-local and -num-remote")
		}
		logDir := *logDir
		if logDir != "" {
			if _, _, fun := caller.Lookup(1); fun != "" {
				logDir = filepath.Join(logDir, fun)
			}
		}
		l := cluster.CreateLocal(*numLocal, *numStores, logDir, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	if *numRemote == 0 {
		t.Fatal("need to either specify -num-local or -num-remote")
	}
	f := farmer(t)
	if err := f.Resize(*numRemote, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	return f
}
Esempio n. 4
0
func TestLogBacktraceAt(t *testing.T) {
	setFlags()
	defer logging.swap(logging.newBuffers())
	// The peculiar style of this code simplifies line counting and maintenance of the
	// tracing block below.
	var infoLine string
	setTraceLocation := func(file string, line int, delta int) {
		_, file = filepath.Split(file)
		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
		err := logging.traceLocation.Set(infoLine)
		if err != nil {
			t.Fatal("error setting log_backtrace_at: ", err)
		}
	}
	{
		// Start of tracing block. These lines know about each other's relative position.
		file, line, _ := caller.Lookup(0)
		setTraceLocation(file, line, +2) // Two lines between Caller and Info calls.
		Info(context.Background(), "we want a stack trace here")
		if err := logging.traceLocation.Set(""); err != nil {
			t.Fatal(err)
		}
	}
	numAppearances := strings.Count(contents(InfoLog), infoLine)
	if numAppearances < 2 {
		// Need 2 appearances, one in the log header and one in the trace:
		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
		//   ...
		//   github.com/clog/glog_test.go:280 (0x41ba91)
		//   ...
		// We could be more precise but that would require knowing the details
		// of the traceback format, which may not be dependable.
		t.Fatal("got no trace back; log is ", contents(InfoLog))
	}
}
Esempio n. 5
0
// RunLimitedAsyncTask runs function f in a goroutine, using the given channel
// as a semaphore to limit the number of tasks that are run concurrently to
// the channel's capacity. Blocks until the semaphore is available in order to
// push back on callers that may be trying to create many tasks. Returns an
// error if the Stopper is draining, in which case the function is not
// executed.
func (s *Stopper) RunLimitedAsyncTask(sem chan struct{}, f func()) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}

	// Wait for permission to run from the semaphore.
	select {
	case sem <- struct{}{}:
	case <-s.ShouldDrain():
		return errUnavailable
	default:
		log.Printf("stopper throttling task from %s:%d due to semaphore", file, line)
		// Retry the select without the default.
		select {
		case sem <- struct{}{}:
		case <-s.ShouldDrain():
			return errUnavailable
		}
	}

	if !s.runPrelude(key) {
		<-sem
		return errUnavailable
	}
	go func() {
		defer s.runPostlude(key)
		defer func() { <-sem }()
		f()
	}()
	return nil
}
Esempio n. 6
0
// SetDebugName sets the debug name associated with the transaction which will
// appear in log files and the web UI. Each transaction starts out with an
// automatically assigned debug name composed of the file and line number where
// the transaction was created.
func (txn *Txn) SetDebugName(name string, depth int) {
	file, line, fun := caller.Lookup(depth + 1)
	if name == "" {
		name = fun
	}
	txn.Proto.Name = fmt.Sprintf("%s:%d %s", file, line, name)
}
Esempio n. 7
0
// SetDebugName sets the debug name associated with the transaction which will
// appear in log files and the web UI. Each transaction starts out with an
// automatically assigned debug name composed of the file and line number where
// the transaction was created.
func (txn *Txn) SetDebugName(name string, depth int) {
	file, line, fun := caller.Lookup(depth + 1)
	if name == "" {
		name = fun
	}
	txn.Proto.Name = file + ":" + strconv.Itoa(line) + " " + name
}
Esempio n. 8
0
// TestErrorf verifies the pass through to fmt.Errorf as well as
// file/line prefix.
func TestErrorf(t *testing.T) {
	err := Errorf("foo: %d %f", 1, 3.14)
	file, line, _ := caller.Lookup(0)
	expected := fmt.Sprintf("%sfoo: 1 3.140000", fmt.Sprintf(errorPrefixFormat, file, line-1))
	if expected != err.Error() {
		t.Errorf("expected %s, got %s", expected, err.Error())
	}
}
Esempio n. 9
0
// EnsureContext checks whether the given context.Context contains a Span. If
// not, it creates one using the provided Tracer and wraps it in the returned
// Span. The returned closure must be called after the request has been fully
// processed.
func EnsureContext(ctx context.Context, tracer opentracing.Tracer) (context.Context, func()) {
	_, _, funcName := caller.Lookup(1)
	if opentracing.SpanFromContext(ctx) == nil {
		sp := tracer.StartSpan(funcName)
		return opentracing.ContextWithSpan(ctx, sp), sp.Finish
	}
	return ctx, func() {}
}
Esempio n. 10
0
// addStructured creates a structured log entry to be written to the
// specified facility of the logger.
func addStructured(ctx context.Context, s Severity, depth int, format string, args []interface{}) {
	if ctx == nil {
		panic("nil context")
	}
	file, line, _ := caller.Lookup(depth + 1)
	msg := makeMessage(ctx, format, args)
	Trace(ctx, msg)
	logging.outputLogEntry(s, file, line, msg)
}
Esempio n. 11
0
// RunTask adds one to the count of tasks left to drain in the system. Any
// worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include
// goroutines launched to do periodic work and the kv/db.go gateway which
// accepts external client requests.
//
// Returns false to indicate that the system is currently draining and
// function f was not called.
func (s *Stopper) RunTask(f func()) bool {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}
	if !s.runPrelude(key) {
		return false
	}
	// Call f.
	defer s.runPostlude(key)
	f()
	return true
}
Esempio n. 12
0
// TestErrorSkipFrames verifies ErrorSkipFrames with an additional
// stack frame.
func TestErrorSkipFrames(t *testing.T) {
	var err error
	func() {
		err = ErrorSkipFrames(1, "foo ", 1, 3.14)
	}()
	file, line, _ := caller.Lookup(0)
	expected := fmt.Sprintf("%sfoo 1 3.14", fmt.Sprintf(errorPrefixFormat, file, line-1))
	if expected != err.Error() {
		t.Errorf("expected %s, got %s", expected, err.Error())
	}
}
Esempio n. 13
0
func newTxn(db DB, depth int) *Txn {
	txn := &Txn{
		db:      db,
		wrapped: db.Sender,
	}
	txn.db.Sender = (*txnSender)(txn)

	file, line, fun := caller.Lookup(depth + 1)
	txn.txn.Name = fmt.Sprintf("%s:%d %s", file, line, fun)
	return txn
}
Esempio n. 14
0
// RunTask adds one to the count of tasks left to drain in the system. Any
// worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include
// goroutines launched to do periodic work and the kv/db.go gateway which
// accepts external client requests.
//
// Returns false to indicate that the system is currently draining and
// function f was not called.
func (s *Stopper) RunTask(f func()) bool {
	file, line, _ := caller.Lookup(1)
	taskKey := fmt.Sprintf("%s:%d", file, line)
	if !s.runPrelude(taskKey) {
		return false
	}
	// Call f.
	defer s.runPostlude(taskKey)
	f()
	return true
}
Esempio n. 15
0
// addStructured creates a structured log entry to be written to the
// specified facility of the logger.
func addStructured(ctx context.Context, s Severity, depth int, format string, args []interface{}) {
	if ctx == nil {
		panic("nil context")
	}
	file, line, _ := caller.Lookup(depth + 1)
	msg := makeMessage(ctx, format, args)
	// makeMessage already added the tags when forming msg, we don't want
	// eventInternal to prepend them again.
	eventInternal(ctx, (s >= ErrorLog), false /*withTags*/, "%s", msg)
	logging.outputLogEntry(s, file, line, msg)
}
Esempio n. 16
0
// RunTask adds one to the count of tasks left to drain in the system. Any
// worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include
// goroutines launched to do periodic work and the kv/db.go gateway which
// accepts external client requests.
//
// Returns an error to indicate that the system is currently draining and
// function f was not called.
func (s *Stopper) RunTask(f func()) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}
	if !s.runPrelude(key) {
		return errUnavailable
	}
	// Call f.
	defer s.runPostlude(key)
	f()
	return nil
}
Esempio n. 17
0
func TestDebugName(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := setup()
	defer s.Stop()

	file, _, _ := caller.Lookup(0)
	_ = db.Txn(func(txn *client.Txn) error {
		if !strings.HasPrefix(txn.DebugName(), file+":") {
			t.Fatalf("expected \"%s\" to have the prefix \"%s:\"", txn.DebugName(), file)
		}
		return nil
	})
}
Esempio n. 18
0
// RunAsyncTask runs function f in a goroutine. It returns an error when the
// Stopper is quiescing, in which case the function is not executed.
func (s *Stopper) RunAsyncTask(f func()) error {
	file, line, _ := caller.Lookup(1)
	key := taskKey{file, line}
	if !s.runPrelude(key) {
		return errUnavailable
	}
	// Call f.
	go func() {
		defer s.maybeHandlePanic()
		defer s.runPostlude(key)
		f()
	}()
	return nil
}
Esempio n. 19
0
func (t *Trace) add(name string) int {
	// Must be called with two callers to the client.
	// (Client->Event|Epoch->epoch->add)
	file, line, fun := caller.Lookup(3)
	t.Content = append(t.Content, TraceItem{
		depth:     t.depth,
		Origin:    t.tracer.origin,
		File:      file,
		Line:      line,
		Func:      fun,
		Timestamp: t.tracer.now(),
		Name:      name,
	})
	return len(t.Content) - 1
}
Esempio n. 20
0
func TestDebugName(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := setup()
	defer s.Stop()

	file, _, _ := caller.Lookup(0)
	if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
		if !strings.HasPrefix(txn.DebugName(), file+":") {
			t.Fatalf("expected \"%s\" to have the prefix \"%s:\"", txn.DebugName(), file)
		}
		return nil
	}); pErr != nil {
		t.Errorf("txn failed: %s", pErr)
	}
}
Esempio n. 21
0
// TestCommandQueueWriteWaitForNonAdjacentRead tests that the command queue
// lets a writer wait for a read which is separated from it through another
// read. Since reads don't wait for reads, there was a bug in which the writer
// would wind up waiting only for one of the two readers under it.
func TestCommandQueueWriteWaitForNonAdjacentRead(t *testing.T) {
	defer leaktest.AfterTest(t)()
	cq := NewCommandQueue()
	key := roachpb.Key("a")
	// Add a read-only command.
	wk1 := add(cq, key, nil, true)
	// Add another one on top.
	wk2 := add(cq, key, nil, true)

	// A write should have to wait for **both** reads, not only the second
	// one.
	wg := sync.WaitGroup{}
	getWait(cq, key, nil, false /* !readOnly */, &wg)

	assert := func(blocked bool) {
		cmdDone := waitForCmd(&wg)
		d := time.Millisecond
		if !blocked {
			d *= 5
		}
		f, l, _ := caller.Lookup(1)
		if testCmdDone(cmdDone, d) {
			if blocked {
				t.Fatalf("%s:%d: command should not finish with command outstanding", f, l)
			}
		} else if !blocked {
			t.Fatalf("%s:%d: command should not have been blocked", f, l)
		}
	}

	// Certainly blocks now.
	assert(true)

	// The second read returns, but the first one remains.
	cq.remove(wk2)

	// Should still block. This being broken is why this test exists.
	assert(true)

	// First read returns.
	cq.remove(wk1)

	// Now it goes through.
	assert(false)
}
Esempio n. 22
0
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
	if sb.file != nil {
		if err := sb.Flush(); err != nil {
			return err
		}
		if err := sb.file.Close(); err != nil {
			return err
		}
	}
	var err error
	sb.file, _, err = create(sb.sev, now)
	sb.nbytes = 0
	if err != nil {
		return err
	}

	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)

	f, l, _ := caller.Lookup(1)
	for _, msg := range []string{
		fmt.Sprintf("[config] file created at: %s\n", now.Format("2006/01/02 15:04:05")),
		fmt.Sprintf("[config] running on machine: %s\n", host),
		fmt.Sprintf("[config] binary: %s\n", build.GetInfo().Short()),
		fmt.Sprintf("[config] arguments: %s\n", os.Args),
		fmt.Sprintf("line format: [IWEF]yymmdd hh:mm:ss.uuuuuu goid file:line msg\n"),
	} {
		buf := formatLogEntry(Entry{
			Severity:  sb.sev,
			Time:      now.UnixNano(),
			Goroutine: goid.Get(),
			File:      f,
			Line:      int64(l),
			Message:   msg,
		}, nil, nil)
		var n int
		n, err = sb.file.Write(buf.Bytes())
		sb.nbytes += uint64(n)
		if err != nil {
			return err
		}
		logging.putBuffer(buf)
	}
	return nil
}
Esempio n. 23
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			logDir = func(d string) string {
				for i := 1; i < 100; i++ {
					_, _, fun := caller.Lookup(i)
					if testFuncRE.MatchString(fun) {
						return filepath.Join(d, fun)
					}
				}
				panic("no caller matching Test(.*) in stack trace")
			}(logDir)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
		checkRangeReplication(t, l, 20*time.Second)
		completed = true
		return l
	}
	f := farmer(t, "")
	c = f
	if err := f.Resize(*flagNodes); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		if destroyErr := f.Destroy(t); destroyErr != nil {
			t.Fatalf("could not destroy cluster after error %v: %v", err, destroyErr)
		}
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	completed = true
	return f
}
Esempio n. 24
0
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
	if sb.file != nil {
		if err := sb.Flush(); err != nil {
			return err
		}
		if err := sb.file.Close(); err != nil {
			return err
		}
	}
	var err error
	sb.file, _, err = create(sb.sev, now)
	sb.nbytes = 0
	if err != nil {
		return err
	}

	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)

	// Write header.
	file, line, _ := caller.Lookup(0)
	for _, format := range []string{
		fmt.Sprintf("Running on machine: %s", host),
		fmt.Sprintf("Binary: Built with %s %s for %s/%s",
			runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH),
	} {
		entry := LogEntry{
			Time:   time.Now().UnixNano(),
			File:   file,
			Line:   int32(line),
			Format: format,
		}
		n, err := sb.file.Write(encodeLogEntry(&entry))
		if err != nil {
			panic(err)
		}
		sb.nbytes += uint64(n)
	}
	return err
}
Esempio n. 25
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			_, _, fun := caller.Lookup(3)
			if !testFuncRE.MatchString(fun) {
				t.Fatalf("invalid caller %s; want TestX -> runTestOnConfigs -> func()", fun)
			}
			logDir = filepath.Join(logDir, fun)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
		checkRangeReplication(t, l, 20*time.Second)
		completed = true
		return l
	}
	f := farmer(t)
	c = f
	if err := f.Resize(*flagNodes, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	completed = true
	return f
}
Esempio n. 26
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T) cluster.Cluster {
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			if _, _, fun := caller.Lookup(1); fun != "" {
				logDir = filepath.Join(logDir, fun)
			}
		}
		l := cluster.CreateLocal(*flagNodes, *flagStores, logDir, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	f := farmer(t)
	if err := f.Resize(*flagNodes, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	return f
}
Esempio n. 27
0
// AddStructured creates a structured log entry to be written to the
// specified facility of the logger.
func AddStructured(ctx context.Context, s Severity, depth int, format string, args []interface{}) {
	file, line, _ := caller.Lookup(depth + 1)
	entry := &LogEntry{}
	setLogEntry(ctx, format, args, entry)
	logging.outputLogEntry(s, file, line, false, entry)
}
Esempio n. 28
0
// SetDebugName sets the debug name associated with the transaction which will
// appear in log files and the web UI. Each transaction starts out with an
// automatically assigned debug name composed of the file and line number where
// the transaction was created.
func (txn *Txn) SetDebugName(name string) {
	file, line, _ := caller.Lookup(1)
	txn.txn.Name = fmt.Sprintf("%s:%d %s", file, line, name)
}
Esempio n. 29
0
func makeSrcCtx(depth int) SrcCtx {
	f, l, fun := caller.Lookup(depth + 1)
	return SrcCtx{File: f, Line: l, Function: fun}
}
Esempio n. 30
0
func (l *loggingT) print(s Severity, args ...interface{}) {
	file, line, _ := caller.Lookup(1)
	entry := LogEntry{}
	setLogEntry(nil, "", args, &entry)
	l.outputLogEntry(s, file, line, false, &entry)
}