Esempio n. 1
0
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Create a transaction with intent at "a".
	key := roachpb.Key("a")
	txn1 := client.NewTxn(*s.DB)
	txn1.InternalSetPriority(1)
	if pErr := txn1.Put(key, []byte("value")); pErr != nil {
		t.Fatal(pErr)
	}

	// Push the transaction (by writing key "a" with higher priority) to abort it.
	txn2 := client.NewTxn(*s.DB)
	txn2.InternalSetPriority(2)
	if pErr := txn2.Put(key, []byte("value2")); pErr != nil {
		t.Fatal(pErr)
	}

	// Now end the transaction and verify we've cleanup up, even though
	// end transaction failed.
	pErr := txn1.Commit()
	switch pErr.GoError().(type) {
	case *roachpb.TransactionAbortedError:
		// Expected
	default:
		t.Fatalf("expected transaction aborted error; got %s", pErr)
	}
	if pErr := txn2.Commit(); pErr != nil {
		t.Fatal(pErr)
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sender := createTestDB(t)
	defer s.Stop()

	// Create a transaction with intent at "a".
	key := roachpb.Key("a")
	txn1 := client.NewTxn(context.Background(), *s.DB)
	txn1.InternalSetPriority(1)
	if err := txn1.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}

	// Push the transaction (by writing key "a" with higher priority) to abort it.
	txn2 := client.NewTxn(context.Background(), *s.DB)
	txn2.InternalSetPriority(2)
	if err := txn2.Put(key, []byte("value2")); err != nil {
		t.Fatal(err)
	}

	// Now end the transaction and verify we've cleanup up, even though
	// end transaction failed.
	err := txn1.CommitOrCleanup()
	assertTransactionAbortedError(t, err)
	if err := txn2.CommitOrCleanup(); err != nil {
		t.Fatal(err)
	}
	verifyCleanup(key, sender, s.Eng, t)
}
Esempio n. 3
0
// TestTxnCoordSenderBeginTransaction verifies that a command sent with a
// not-nil Txn with empty ID gets a new transaction initialized.
func TestTxnCoordSenderBeginTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := client.NewTxn(*s.DB)

	// Put request will create a new transaction.
	key := roachpb.Key("key")
	txn.InternalSetPriority(10)
	txn.Proto.Isolation = roachpb.SNAPSHOT
	txn.Proto.Name = "test txn"
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}
	if txn.Proto.Name != "test txn" {
		t.Errorf("expected txn name to be %q; got %q", "test txn", txn.Proto.Name)
	}
	if txn.Proto.Priority != 10 {
		t.Errorf("expected txn priority 10; got %d", txn.Proto.Priority)
	}
	if !bytes.Equal(txn.Proto.Key, key) {
		t.Errorf("expected txn Key to match %q != %q", key, txn.Proto.Key)
	}
	if txn.Proto.Isolation != roachpb.SNAPSHOT {
		t.Errorf("expected txn isolation to be SNAPSHOT; got %s", txn.Proto.Isolation)
	}
}
// TestTxnCoordSenderGCTimeout verifies that the coordinator cleans up extant
// transactions and intents after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGCTimeout(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sender := createTestDB(t)
	defer s.Stop()

	// Set heartbeat interval to 1ms for testing.
	sender.heartbeatInterval = 1 * time.Millisecond

	txn := client.NewTxn(context.Background(), *s.DB)
	key := roachpb.Key("a")
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}

	// Now, advance clock past the default client timeout.
	// Locking the TxnCoordSender to prevent a data race.
	sender.Lock()
	s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
	sender.Unlock()

	txnID := *txn.Proto.ID

	util.SucceedsSoon(t, func() error {
		// Locking the TxnCoordSender to prevent a data race.
		sender.Lock()
		_, ok := sender.txns[txnID]
		sender.Unlock()
		if ok {
			return util.Errorf("expected garbage collection")
		}
		return nil
	})

	verifyCleanup(key, sender, s.Eng, t)
}
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	initialTxn := client.NewTxn(*s.DB)
	if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}

	// Verify 3 heartbeats.
	var heartbeatTS roachpb.Timestamp
	for i := 0; i < 3; i++ {
		util.SucceedsSoon(t, func() error {
			ok, txn, pErr := getTxn(s.Sender, &initialTxn.Proto)
			if !ok || pErr != nil {
				t.Fatalf("got txn: %t: %s", ok, pErr)
			}
			// Advance clock by 1ns.
			// Locking the TxnCoordSender to prevent a data race.
			s.Sender.Lock()
			s.Manual.Increment(1)
			s.Sender.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return nil
			}
			return util.Errorf("expected heartbeat")
		})
	}
}
Esempio n. 6
0
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	txn := client.NewTxn(*s.DB)
	if pErr := txn.Put(roachpb.Key("a"), []byte("value")); pErr != nil {
		t.Fatal(pErr)
	}

	// Now, advance clock past the default client timeout.
	// Locking the TxnCoordSender to prevent a data race.
	s.Sender.Lock()
	s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
	s.Sender.Unlock()

	if err := util.IsTrueWithin(func() bool {
		// Locking the TxnCoordSender to prevent a data race.
		s.Sender.Lock()
		_, ok := s.Sender.txns[string(txn.Proto.ID)]
		s.Sender.Unlock()
		return !ok
	}, 50*time.Millisecond); err != nil {
		t.Error("expected garbage collection")
	}
}
func TestTxnRestartCount(t *testing.T) {
	defer leaktest.AfterTest(t)()
	_, sender, cleanupFn := setupMetricsTest(t)
	defer cleanupFn()

	key := []byte("key-restart")
	value := []byte("value")
	db := client.NewDB(sender)

	// Start a transaction and do a GET. This forces a timestamp to be chosen for the transaction.
	txn := client.NewTxn(*db)
	if _, err := txn.Get(key); err != nil {
		t.Fatal(err)
	}

	// Outside of the transaction, read the same key as was read within the transaction. This
	// means that future attempts to write will increase the timestamp.
	if _, err := db.Get(key); err != nil {
		t.Fatal(err)
	}

	// This put increases the candidate timestamp, which causes an immediate restart.
	if err := txn.Put(key, value); err == nil {
		t.Fatalf("unexpected success")
	}
	teardownHeartbeats(sender)
	checkTxnMetrics(t, sender, "restart txn", 0, 1, 0, 1)
}
Esempio n. 8
0
// ExecuteStatements executes the given statement(s) and returns a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) ExecuteStatements(
	user string, session *Session, stmts string,
	params []parser.Datum) StatementResults {

	planMaker := plannerPool.Get().(*planner)
	defer releasePlanner(planMaker)

	cfg, cache := e.getSystemConfig()
	*planMaker = planner{
		user: user,
		evalCtx: parser.EvalContext{
			NodeID:      e.nodeID,
			ReCache:     e.reCache,
			GetLocation: session.getLocation,
		},
		leaseMgr:      e.ctx.LeaseManager,
		systemConfig:  cfg,
		databaseCache: cache,
		session:       session,
	}

	curTxnState := txnState{
		txn:     nil,
		aborted: session.Txn.TxnAborted,
	}
	if txnProto := session.Txn.Txn; txnProto != nil {
		// A pending transaction is already present, resume it.
		curTxnState.txn = client.NewTxn(*e.ctx.DB)
		curTxnState.txn.Proto = *txnProto
		curTxnState.txn.UserPriority = session.Txn.UserPriority
		if session.Txn.MutatesSystemConfig {
			curTxnState.txn.SetSystemConfigTrigger()
		}
		planMaker.setTxn(curTxnState.txn)
	}

	session.Txn = Session_Transaction{}

	// Send the Request for SQL execution and set the application-level error
	// for each result in the reply.
	planMaker.params = parameters(params)
	res := e.execRequest(&curTxnState, stmts, planMaker)

	// Send back the session state even if there were application-level errors.
	// Add transaction to session state.
	session.Txn.TxnAborted = curTxnState.aborted
	if curTxnState.txn != nil {
		// TODO(pmattis): Need to associate the leases used by a transaction with
		// the session state.
		planMaker.releaseLeases()
		session.Txn.Txn = &curTxnState.txn.Proto
		session.Txn.UserPriority = curTxnState.txn.UserPriority
		session.Txn.MutatesSystemConfig = curTxnState.txn.SystemConfigTrigger()
	} else {
		session.Txn.Txn = nil
		session.Txn.MutatesSystemConfig = false
	}

	return res
}
Esempio n. 9
0
// TestTxnCoordSenderAddIntentOnError verifies that intents are tracked if
// the transaction is, even on error.
func TestTxnCoordSenderAddIntentOnError(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Create a transaction with intent at "a".
	key := roachpb.Key("x")
	txn := client.NewTxn(*s.DB)
	// Write so that the coordinator begins tracking this txn.
	if err := txn.Put("x", "y"); err != nil {
		t.Fatal(err)
	}
	err, ok := txn.CPut(key, []byte("x"), []byte("born to fail")).GoError().(*roachpb.ConditionFailedError)
	if !ok {
		t.Fatal(err)
	}
	s.Sender.Lock()
	intentSpans := s.Sender.txns[string(txn.Proto.ID)].intentSpans()
	expSpans := []roachpb.Span{{Key: key, EndKey: []byte("")}}
	equal := !reflect.DeepEqual(intentSpans, expSpans)
	s.Sender.Unlock()
	if pErr := txn.Rollback(); pErr != nil {
		t.Fatal(pErr)
	}
	if !equal {
		t.Fatalf("expected stored intents %v, got %v", expSpans, intentSpans)
	}
}
Esempio n. 10
0
// TestTxnInitialTimestamp verifies that the timestamp requested
// before the Txn is created is honored.
func TestTxnInitialTimestamp(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sender := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(sender)

	txn := client.NewTxn(context.Background(), *s.DB)

	// Request a specific timestamp.
	refTimestamp := roachpb.Timestamp{WallTime: 42, Logical: 69}
	txn.Proto.OrigTimestamp = refTimestamp

	// Put request will create a new transaction.
	key := roachpb.Key("key")
	txn.InternalSetPriority(10)
	txn.Proto.Isolation = roachpb.SNAPSHOT
	txn.Proto.Name = "test txn"
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}
	if txn.Proto.OrigTimestamp != refTimestamp {
		t.Errorf("expected txn orig ts to be %s; got %s", refTimestamp, txn.Proto.OrigTimestamp)
	}
	if txn.Proto.Timestamp != refTimestamp {
		t.Errorf("expected txn ts to be %s; got %s", refTimestamp, txn.Proto.Timestamp)
	}
}
Esempio n. 11
0
// Prepare returns the result types of the given statement. Args may be a
// partially populated val args map. Prepare will populate the missing val
// args. The column result types are returned (or nil if there are no results).
func (e *Executor) Prepare(query string, session *Session, args parser.MapArgs) (
	[]ResultColumn, *roachpb.Error) {
	stmt, err := parser.ParseOne(query, parser.Syntax(session.Syntax))
	if err != nil {
		return nil, roachpb.NewError(err)
	}

	session.planner.resetForBatch(e)
	session.planner.evalCtx.Args = args
	session.planner.evalCtx.PrepareOnly = true

	// TODO(andrei): does the prepare phase really need a Txn?
	txn := client.NewTxn(*e.ctx.DB)
	txn.Proto.Isolation = session.DefaultIsolationLevel
	session.planner.setTxn(txn)
	defer session.planner.setTxn(nil)

	plan, pErr := session.planner.prepare(stmt)
	if pErr != nil {
		return nil, pErr
	}
	if plan == nil {
		return nil, nil
	}
	cols := plan.Columns()
	for _, c := range cols {
		if err := checkResultDatum(c.Typ); err != nil {
			return nil, roachpb.NewError(err)
		}
	}
	return cols, nil
}
Esempio n. 12
0
// TestTxnCoordIdempotentCleanup verifies that cleanupTxnLocked is idempotent.
func TestTxnCoordIdempotentCleanup(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sender := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(sender)

	txn := client.NewTxn(context.Background(), *s.DB)
	ba := txn.NewBatch()
	ba.Put(roachpb.Key("a"), []byte("value"))
	if err := txn.Run(ba); err != nil {
		t.Fatal(err)
	}

	sender.Lock()
	// Clean up twice successively.
	sender.cleanupTxnLocked(context.Background(), txn.Proto)
	sender.cleanupTxnLocked(context.Background(), txn.Proto)
	sender.Unlock()

	// For good measure, try to commit (which cleans up once more if it
	// succeeds, which it may not if the previous cleanup has already
	// terminated the heartbeat goroutine)
	ba = txn.NewBatch()
	ba.AddRawRequest(&roachpb.EndTransactionRequest{})
	err := txn.Run(ba)
	if err != nil && !testutils.IsError(err, errNoState.Error()) {
		t.Fatal(err)
	}
}
Esempio n. 13
0
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// 4 cases: no deadline, past deadline, equal deadline, future deadline.
	for i := 0; i < 4; i++ {
		key := roachpb.Key("key: " + strconv.Itoa(i))
		txn := client.NewTxn(*s.DB)
		// Initialize the transaction
		if err := txn.Put(key, []byte("value")); err != nil {
			t.Fatal(err)
		}

		var deadline *roachpb.Timestamp
		switch i {
		case 0:
			// No deadline.
		case 1:
			// Past deadline.
			ts := txn.Proto.Timestamp.Prev()
			deadline = &ts
		case 2:
			// Equal deadline.
			deadline = &txn.Proto.Timestamp
		case 3:
			// Future deadline.
			ts := txn.Proto.Timestamp.Next()
			deadline = &ts
		}

		{
			err := txn.CommitBy(deadline)
			switch i {
			case 0:
				// No deadline.
				if err != nil {
					t.Error(err)
				}
			case 1:
				// Past deadline.
				if err != nil {
					t.Error(err)
				}
			case 2:
				// Equal deadline.
				if err != nil {
					t.Error(err)
				}
			case 3:
				// Future deadline.
				if _, ok := err.(*roachpb.TransactionAbortedError); !ok {
					t.Errorf("expected TransactionAbortedError but got %T: %s", err, err)
				}
			}
		}
		verifyCleanup(key, s.Sender, s.Eng, t)
	}
}
Esempio n. 14
0
// reset creates a new Txn and initializes it using the session defaults.
func (ts *txnState) reset(ctx context.Context, e *Executor, s *Session) {
	*ts = txnState{}
	ts.txn = client.NewTxn(ctx, *e.ctx.DB)
	ts.txn.Proto.Isolation = s.DefaultIsolationLevel
	ts.tr = s.Trace
	// Discard the old schemaChangers, if any.
	ts.schemaChangers = schemaChangerCollection{}
}
Esempio n. 15
0
// ExecuteStatements executes the given statement(s) and returns a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) ExecuteStatements(user string, session Session, stmts string, params []parser.Datum) (Response, int, error) {
	planMaker := plannerPool.Get().(*planner)
	defer plannerPool.Put(planMaker)

	*planMaker = planner{
		user: user,
		evalCtx: parser.EvalContext{
			NodeID:  e.nodeID,
			ReCache: e.reCache,
			// Copy existing GetLocation closure. See plannerPool.New() for the
			// initial setting.
			GetLocation: planMaker.evalCtx.GetLocation,
		},
		leaseMgr:     e.leaseMgr,
		systemConfig: e.getSystemConfig(),
		session:      session,
	}

	// Resume a pending transaction if present.
	if planMaker.session.Txn != nil {
		txn := client.NewTxn(e.db)
		txn.Proto = planMaker.session.Txn.Txn
		txn.UserPriority = planMaker.session.Txn.UserPriority
		if planMaker.session.MutatesSystemConfig {
			txn.SetSystemConfigTrigger()
		}
		planMaker.setTxn(txn, planMaker.session.Txn.Timestamp.GoTime())
	}

	// Send the Request for SQL execution and set the application-level error
	// for each result in the reply.
	planMaker.params = parameters(params)
	reply := e.execStmts(stmts, planMaker)

	// Send back the session state even if there were application-level errors.
	// Add transaction to session state.
	if planMaker.txn != nil {
		// TODO(pmattis): Need to record the leases used by a transaction within
		// the transaction state and restore it when the transaction is restored.
		planMaker.releaseLeases(e.db)
		planMaker.session.Txn = &Session_Transaction{
			Txn:          planMaker.txn.Proto,
			Timestamp:    driver.Timestamp(planMaker.evalCtx.TxnTimestamp.Time),
			UserPriority: planMaker.txn.UserPriority,
		}
		planMaker.session.MutatesSystemConfig = planMaker.txn.SystemConfigTrigger()
	} else {
		planMaker.session.Txn = nil
		planMaker.session.MutatesSystemConfig = false
	}
	bytes, err := proto.Marshal(&planMaker.session)
	if err != nil {
		return Response{}, http.StatusInternalServerError, err
	}
	reply.Session = bytes

	return reply, 0, nil
}
Esempio n. 16
0
func (ds *ServerImpl) setupTxn(
	ctx context.Context,
	txnProto *roachpb.Transaction,
) *client.Txn {
	txn := client.NewTxn(ctx, *ds.ctx.DB)
	// TODO(radu): we should sanity check some of these fields
	txn.Proto = *txnProto
	return txn
}
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := createTestDB(t)
	defer s.Stop()

	// 4 cases: no deadline, past deadline, equal deadline, future deadline.
	for i := 0; i < 4; i++ {
		key := roachpb.Key("key: " + strconv.Itoa(i))
		txn := client.NewTxn(context.Background(), *s.DB)
		// Initialize the transaction
		if pErr := txn.Put(key, []byte("value")); pErr != nil {
			t.Fatal(pErr)
		}

		{
			var pErr *roachpb.Error
			switch i {
			case 0:
				// No deadline.
				pErr = txn.CommitOrCleanup()
			case 1:
				// Past deadline.
				pErr = txn.CommitBy(txn.Proto.Timestamp.Prev())
			case 2:
				// Equal deadline.
				pErr = txn.CommitBy(txn.Proto.Timestamp)
			case 3:
				// Future deadline.
				pErr = txn.CommitBy(txn.Proto.Timestamp.Next())
			}

			switch i {
			case 0:
				// No deadline.
				if pErr != nil {
					t.Error(pErr)
				}
			case 1:
				// Past deadline.
				if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
					t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
				}
			case 2:
				// Equal deadline.
				if pErr != nil {
					t.Error(pErr)
				}
			case 3:
				// Future deadline.
				if pErr != nil {
					t.Error(pErr)
				}
			}
		}
		verifyCleanup(key, s.Sender, s.Eng, t)
	}
}
Esempio n. 18
0
// TestTxnDrainingNode tests that pending transactions tasks' intents
// are resolved if they commit while draining, and that a
// NodeUnavailableError is received when attempting to run a new
// transaction on a draining node.
func TestTxnDrainingNode(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)

	done := make(chan struct{})
	// Dummy task that keeps the node in draining state.
	if !s.Stopper.RunAsyncTask(func() {
		<-done
	}) {
		t.Fatal("stopper draining prematurely")
	}

	key := roachpb.Key("a")
	txn := client.NewTxn(*s.DB)
	// Begin before draining.
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}
	go func() {
		s.Stopper.Stop()
	}()
	util.SucceedsWithin(t, time.Second, func() error {
		if s.Stopper.RunTask(func() {}) {
			return errors.New("stopper not yet draining")
		}
		return nil
	})
	// Commit after draining.
	if err := txn.Commit(); err != nil {
		t.Fatal(err)
	}
	verifyCleanup(key, s.Sender, s.Eng, t) // make sure intent gets resolved

	// Attempt to start another transaction, but it should be too late.
	txn2 := client.NewTxn(*s.DB)
	err := txn2.Put(key, []byte("value"))
	if _, ok := err.(*roachpb.NodeUnavailableError); !ok {
		teardownHeartbeats(s.Sender)
		t.Fatal(err)
	}
	close(done)
	<-s.Stopper.IsStopped()
}
Esempio n. 19
0
// TestTxnCoordSenderMultipleTxns verifies correct operation with
// multiple outstanding transactions.
func TestTxnCoordSenderMultipleTxns(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn1 := client.NewTxn(*s.DB)
	txn2 := client.NewTxn(*s.DB)

	if err := txn1.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}
	if err := txn2.Put(roachpb.Key("b"), []byte("value")); err != nil {
		t.Fatal(err)
	}

	if len(s.Sender.txns) != 2 {
		t.Errorf("expected length of transactions map to be 2; got %d", len(s.Sender.txns))
	}
}
Esempio n. 20
0
// Execute the statement(s) in the given request and return a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) Execute(args driver.Request) (driver.Response, int, error) {
	planMaker := plannerPool.Get().(*planner)
	defer plannerPool.Put(planMaker)

	*planMaker = planner{
		user: args.GetUser(),
		evalCtx: parser.EvalContext{
			NodeID:  e.nodeID,
			ReCache: e.reCache,
			// Copy existing GetLocation closure. See plannerPool.New() for the
			// initial setting.
			GetLocation: planMaker.evalCtx.GetLocation,
		},
		leaseMgr:     e.leaseMgr,
		systemConfig: e.getSystemConfig(),
	}

	// Pick up current session state.
	if err := proto.Unmarshal(args.Session, &planMaker.session); err != nil {
		return args.CreateReply(), http.StatusBadRequest, err
	}
	// Resume a pending transaction if present.
	if planMaker.session.Txn != nil {
		txn := client.NewTxn(e.db)
		txn.Proto = planMaker.session.Txn.Txn
		if planMaker.session.MutatesSystemDB {
			txn.SetSystemDBTrigger()
		}
		planMaker.setTxn(txn, planMaker.session.Txn.Timestamp.GoTime())
	}

	// Send the Request for SQL execution and set the application-level error
	// for each result in the reply.
	planMaker.params = parameters(args.Params)
	reply := e.execStmts(args.Sql, planMaker)

	// Send back the session state even if there were application-level errors.
	// Add transaction to session state.
	if planMaker.txn != nil {
		planMaker.session.Txn = &Session_Transaction{Txn: planMaker.txn.Proto, Timestamp: driver.Timestamp(planMaker.evalCtx.TxnTimestamp.Time)}
		planMaker.session.MutatesSystemDB = planMaker.txn.SystemDBTrigger()
	} else {
		planMaker.session.Txn = nil
		planMaker.session.MutatesSystemDB = false
	}
	bytes, err := proto.Marshal(&planMaker.session)
	if err != nil {
		return args.CreateReply(), http.StatusInternalServerError, err
	}
	reply.Session = bytes

	return reply, 0, nil
}
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	key := roachpb.Key("a")
	txn := client.NewTxn(*s.DB)
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}
	if err := txn.Commit(); err != nil {
		t.Fatal(err)
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
Esempio n. 22
0
// Prepare returns the result types of the given statement. Args may be a
// partially populated val args map. Prepare will populate the missing val
// args. The column result types are returned (or nil if there are no results).
func (e *Executor) Prepare(user string, query string, session *Session, args parser.MapArgs) (
	[]ResultColumn, *roachpb.Error) {
	stmt, err := parser.ParseOne(query, parser.Syntax(session.Syntax))
	if err != nil {
		return nil, roachpb.NewError(err)
	}
	planMaker := plannerPool.Get().(*planner)
	defer releasePlanner(planMaker)

	cfg, cache := e.getSystemConfig()
	*planMaker = planner{
		user: user,
		evalCtx: parser.EvalContext{
			NodeID:      e.nodeID,
			ReCache:     e.reCache,
			GetLocation: session.getLocation,
			Args:        args,
			PrepareOnly: true,
		},
		leaseMgr:      e.ctx.LeaseManager,
		systemConfig:  cfg,
		databaseCache: cache,
		session:       session,
		execCtx:       &e.ctx,
	}

	txn := client.NewTxn(*e.ctx.DB)
	txn.Proto.Isolation = session.DefaultIsolationLevel

	planMaker.setTxn(txn)
	plan, pErr := planMaker.prepare(stmt)
	if pErr != nil {
		return nil, pErr
	}
	if plan == nil {
		return nil, nil
	}
	cols := plan.Columns()
	for _, c := range cols {
		if err := checkResultDatum(c.Typ); err != nil {
			return nil, roachpb.NewError(err)
		}
	}
	return cols, nil
}
Esempio n. 23
0
// TestTxnCoordSenderReleaseTxnMeta verifies that TxnCoordSender releases the
// txnMetadata after the txn has committed succeed.
func TestTxnCoordSenderReleaseTxnMeta(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := client.NewTxn(*s.DB)
	ba := txn.NewBatch()
	ba.Put(roachpb.Key("a"), []byte("value"))
	ba.Put(roachpb.Key("b"), []byte("value"))
	if err := txn.CommitInBatch(ba); err != nil {
		t.Fatal(err)
	}

	if _, ok := s.Sender.txns[string(txn.Proto.ID)]; ok {
		t.Fatal("expected TxnCoordSender has released the txn")
	}
}
Esempio n. 24
0
// TestTxnCoordSenderKeyRanges verifies that multiple requests to same or
// overlapping key ranges causes the coordinator to keep track only of
// the minimum number of ranges.
func TestTxnCoordSenderKeyRanges(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ranges := []struct {
		start, end roachpb.Key
	}{
		{roachpb.Key("a"), roachpb.Key(nil)},
		{roachpb.Key("a"), roachpb.Key(nil)},
		{roachpb.Key("aa"), roachpb.Key(nil)},
		{roachpb.Key("b"), roachpb.Key(nil)},
		{roachpb.Key("aa"), roachpb.Key("c")},
		{roachpb.Key("b"), roachpb.Key("c")},
	}

	s, sender := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(sender)

	txn := client.NewTxn(context.Background(), *s.DB)
	for _, rng := range ranges {
		if rng.end != nil {
			if err := txn.DelRange(rng.start, rng.end); err != nil {
				t.Fatal(err)
			}
		} else {
			if err := txn.Put(rng.start, []byte("value")); err != nil {
				t.Fatal(err)
			}
		}
	}

	txnID := *txn.Proto.ID

	// Verify that the transaction metadata contains only two entries
	// in its "keys" range group. "a" and range "aa"-"c".
	txnMeta, ok := sender.txns[txnID]
	if !ok {
		t.Fatalf("expected a transaction to be created on coordinator")
	}
	roachpb.MergeSpans(&txnMeta.keys)
	keys := txnMeta.keys
	if len(keys) != 2 {
		t.Errorf("expected 2 entries in keys range group; got %v", keys)
	}
}
Esempio n. 25
0
// Prepare returns the result types of the given statement. Args may be a
// partially populated val args map. Prepare will populate the missing val
// args. The column result types are returned (or nil if there are no results).
func (e *Executor) Prepare(user string, query string, args parser.MapArgs) ([]ResultColumn, *roachpb.Error) {
	stmt, err := parser.ParseOneTraditional(query)
	if err != nil {
		return nil, roachpb.NewError(err)
	}
	planMaker := plannerPool.Get().(*planner)
	defer plannerPool.Put(planMaker)

	cfg, cache := e.getSystemConfig()
	*planMaker = planner{
		user: user,
		evalCtx: parser.EvalContext{
			NodeID:  e.nodeID,
			ReCache: e.reCache,
			// Copy existing GetLocation closure. See plannerPool.New() for the
			// initial setting.
			GetLocation: planMaker.evalCtx.GetLocation,
			Args:        args,
		},
		leaseMgr:      e.leaseMgr,
		systemConfig:  cfg,
		databaseCache: cache,
	}

	timestamp := time.Now()
	txn := client.NewTxn(e.db)
	planMaker.setTxn(txn, timestamp)
	planMaker.evalCtx.StmtTimestamp = parser.DTimestamp{Time: timestamp}
	plan, pErr := planMaker.prepare(stmt)
	if pErr != nil {
		return nil, pErr
	}
	if plan == nil {
		return nil, nil
	}
	cols := plan.Columns()
	for _, c := range cols {
		if err := checkResultDatum(c.Typ); err != nil {
			return nil, roachpb.NewError(err)
		}
	}
	return cols, nil
}
Esempio n. 26
0
// Execute the statement(s) in the given request and return a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) Execute(args driver.Request) (driver.Response, int, error) {
	planMaker := planner{
		user: args.GetUser(),
		evalCtx: parser.EvalContext{
			NodeID: e.nodeID,
		},
		systemConfig: e.getSystemConfig(),
	}
	// Pick up current session state.
	if err := gogoproto.Unmarshal(args.Session, &planMaker.session); err != nil {
		return args.CreateReply(), http.StatusBadRequest, err
	}
	// Open a pending transaction if needed.
	if planMaker.session.Txn != nil {
		txn := client.NewTxn(e.db)
		txn.Proto = *planMaker.session.Txn
		if planMaker.session.MutatesSystemDB {
			txn.SetSystemDBTrigger()
		}
		planMaker.txn = txn
	}

	// Send the Request for SQL execution and set the application-level error
	// for each result in the reply.
	reply := e.execStmts(args.Sql, parameters(args.Params), &planMaker)

	// Send back the session state even if there were application-level errors.
	// Add transaction to session state.
	if planMaker.txn != nil {
		planMaker.session.Txn = &planMaker.txn.Proto
		planMaker.session.MutatesSystemDB = planMaker.txn.SystemDBTrigger()
	} else {
		planMaker.session.Txn = nil
		planMaker.session.MutatesSystemDB = false
	}
	bytes, err := gogoproto.Marshal(&planMaker.session)
	if err != nil {
		return args.CreateReply(), http.StatusInternalServerError, err
	}
	reply.Session = bytes

	return reply, 0, nil
}
// TestTxnCoordSenderKeyRanges verifies that multiple requests to same or
// overlapping key ranges causes the coordinator to keep track only of
// the minimum number of ranges.
func TestTxnCoordSenderKeyRanges(t *testing.T) {
	defer leaktest.AfterTest(t)
	ranges := []struct {
		start, end roachpb.Key
	}{
		{roachpb.Key("a"), roachpb.Key(nil)},
		{roachpb.Key("a"), roachpb.Key(nil)},
		{roachpb.Key("aa"), roachpb.Key(nil)},
		{roachpb.Key("b"), roachpb.Key(nil)},
		{roachpb.Key("aa"), roachpb.Key("c")},
		{roachpb.Key("b"), roachpb.Key("c")},
	}

	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := client.NewTxn(*s.DB)
	for _, rng := range ranges {
		if rng.end != nil {
			if err := txn.DelRange(rng.start, rng.end); err != nil {
				t.Fatal(err)
			}
		} else {
			if err := txn.Put(rng.start, []byte("value")); err != nil {
				t.Fatal(err)
			}
		}
	}

	txnID := *txn.Proto.ID
	txnIDStr := txnID.String()

	// Verify that the transaction metadata contains only two entries
	// in its "keys" interval cache. "a" and range "aa"-"c".
	txnMeta, ok := s.Sender.txns[txnIDStr]
	if !ok {
		t.Fatalf("expected a transaction to be created on coordinator")
	}
	if txnMeta.keys.Len() != 2 {
		t.Errorf("expected 2 entries in keys interval cache; got %v", txnMeta.keys)
	}
}
Esempio n. 28
0
// TestTxnCoordSenderBeginTransactionMinPriority verifies that when starting
// a new transaction, a non-zero priority is treated as a minimum value.
func TestTxnCoordSenderBeginTransactionMinPriority(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := client.NewTxn(*s.DB)

	// Put request will create a new transaction.
	key := roachpb.Key("key")
	txn.InternalSetPriority(10)
	txn.Proto.Isolation = roachpb.SNAPSHOT
	txn.Proto.Priority = 11
	if err := txn.Put(key, []byte("value")); err != nil {
		t.Fatal(err)
	}
	if prio := txn.Proto.Priority; prio != 11 {
		t.Errorf("expected txn priority 11; got %d", prio)
	}
}
Esempio n. 29
0
// Prepare returns the result types of the given statement. pinfo may
// contain partial type information for placeholders. Prepare will
// populate the missing types. The column result types are returned (or
// nil if there are no results).
func (e *Executor) Prepare(
	ctx context.Context,
	query string,
	session *Session,
	pinfo parser.PlaceholderTypes,
) ([]ResultColumn, error) {
	stmt, err := parser.ParseOne(query, parser.Syntax(session.Syntax))
	if err != nil {
		return nil, err
	}
	if err = pinfo.ProcessPlaceholderAnnotations(stmt); err != nil {
		return nil, err
	}

	session.planner.resetForBatch(e)
	session.planner.semaCtx.Placeholders.SetTypes(pinfo)
	session.planner.evalCtx.PrepareOnly = true

	// Prepare needs a transaction because it needs to retrieve db/table
	// descriptors for type checking.
	txn := client.NewTxn(ctx, *e.ctx.DB)
	txn.Proto.Isolation = session.DefaultIsolationLevel
	session.planner.setTxn(txn)
	defer session.planner.setTxn(nil)

	plan, err := session.planner.prepare(stmt)
	if err != nil {
		return nil, err
	}
	if plan == nil {
		return nil, nil
	}
	cols := plan.Columns()
	for _, c := range cols {
		if err := checkResultDatum(c.Typ); err != nil {
			return nil, err
		}
	}
	return cols, nil
}
// TestTxnCoordSenderAddRequest verifies adding a request creates a
// transaction metadata and adding multiple requests with same
// transaction ID updates the last update timestamp.
func TestTxnCoordSenderAddRequest(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := client.NewTxn(*s.DB)

	// Put request will create a new transaction.
	if err := txn.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}
	txnID := *txn.Proto.ID
	txnIDStr := txnID.String()
	txnMeta, ok := s.Sender.txns[txnIDStr]
	if !ok {
		t.Fatal("expected a transaction to be created on coordinator")
	}
	if !txn.Proto.Writing {
		t.Fatal("txn is not marked as writing")
	}
	ts := atomic.LoadInt64(&txnMeta.lastUpdateNanos)

	// Advance time and send another put request. Lock the coordinator
	// to prevent a data race.
	s.Sender.Lock()
	s.Manual.Set(1)
	s.Sender.Unlock()
	if err := txn.Put(roachpb.Key("a"), []byte("value")); err != nil {
		t.Fatal(err)
	}
	if len(s.Sender.txns) != 1 {
		t.Errorf("expected length of transactions map to be 1; got %d", len(s.Sender.txns))
	}
	txnMeta = s.Sender.txns[txnIDStr]
	if lu := atomic.LoadInt64(&txnMeta.lastUpdateNanos); ts >= lu || lu != s.Manual.UnixNano() {
		t.Errorf("expected last update time to advance; got %d", lu)
	}
}