コード例 #1
0
ファイル: conn.go プロジェクト: netroby/tidb
func (cc *clientConn) Run() {
	defer func() {
		r := recover()
		if r != nil {
			const size = 4096
			buf := make([]byte, size)
			buf = buf[:runtime.Stack(buf, false)]
			log.Errorf("lastCmd %s, %v, %s", cc.lastCmd, r, buf)
		}
		cc.Close()
	}()

	for {
		cc.alloc.Reset()
		data, err := cc.readPacket()
		if err != nil {
			if terror.ErrorNotEqual(err, io.EOF) {
				log.Error(err)
			}
			return
		}

		if err := cc.dispatch(data); err != nil {
			if terror.ErrorEqual(err, io.EOF) {
				return
			}
			log.Errorf("dispatch error %s, %s", errors.ErrorStack(err), cc)
			log.Errorf("cmd: %s", string(data[1:]))
			cc.writeError(err)
		}

		cc.pkg.sequence = 0
	}
}
コード例 #2
0
ファイル: conn.go プロジェクト: jmptrader/tidb
// handleLoadData does the additional work after processing the 'load data' query.
// It sends client a file path, then reads the file content from client, inserts data into database.
func (cc *clientConn) handleLoadData(loadDataInfo *executor.LoadDataInfo) error {
	// If the server handles the load data request, the client has to set the ClientLocalFiles capability.
	if cc.capability&mysql.ClientLocalFiles == 0 {
		return errNotAllowedCommand
	}

	var err error
	defer func() {
		cc.ctx.SetValue(executor.LoadDataVarKey, nil)
		if err == nil {
			err = cc.ctx.CommitTxn()
		}
		if err == nil {
			return
		}
		err1 := cc.ctx.RollbackTxn()
		if err1 != nil {
			log.Errorf("load data rollback failed: %v", err1)
		}
	}()

	if loadDataInfo == nil {
		err = errors.New("load data info is empty")
		return errors.Trace(err)
	}
	err = cc.writeReq(loadDataInfo.Path)
	if err != nil {
		return errors.Trace(err)
	}

	var prevData []byte
	var curData []byte
	var shouldBreak bool
	for {
		curData, err = cc.readPacket()
		if err != nil {
			if terror.ErrorNotEqual(err, io.EOF) {
				log.Error(errors.ErrorStack(err))
				return errors.Trace(err)
			}
		}
		if len(curData) == 0 {
			shouldBreak = true
			if len(prevData) == 0 {
				break
			}
		}
		prevData, err = loadDataInfo.InsertData(prevData, curData)
		if err != nil {
			return errors.Trace(err)
		}
		if shouldBreak {
			break
		}
	}

	return nil
}
コード例 #3
0
ファイル: txn_committer.go プロジェクト: anywhy/tidb
func (c *txnCommitter) prewriteSingleRegion(batch batchKeys) error {
	mutations := make([]*pb.Mutation, len(batch.keys))
	for i, k := range batch.keys {
		mutations[i] = c.mutations[string(k)]
	}
	req := &pb.Request{
		Type: pb.MessageType_CmdPrewrite.Enum(),
		CmdPrewriteReq: &pb.CmdPrewriteRequest{
			Mutations:    mutations,
			PrimaryLock:  c.primary(),
			StartVersion: proto.Uint64(c.startTS),
		},
	}

	var backoffErr error
	for backoff := txnLockBackoff(); backoffErr == nil; backoffErr = backoff() {
		resp, err := c.store.SendKVReq(req, batch.region)
		if err != nil {
			return errors.Trace(err)
		}
		if regionErr := resp.GetRegionError(); regionErr != nil {
			// re-split keys and prewrite again.
			// TODO: The recursive maybe not able to exit if TiKV &
			// PD are implemented incorrectly. A possible fix is
			// introducing a 'max backoff time'.
			err = c.prewriteKeys(batch.keys)
			return errors.Trace(err)
		}
		prewriteResp := resp.GetCmdPrewriteResp()
		if prewriteResp == nil {
			return errors.Trace(errBodyMissing)
		}
		keyErrs := prewriteResp.GetErrors()
		if len(keyErrs) == 0 {
			// We need to cleanup all written keys if transaction aborts.
			c.mu.Lock()
			defer c.mu.Unlock()
			c.writtenKeys = append(c.writtenKeys, batch.keys...)
			return nil
		}
		for _, keyErr := range keyErrs {
			lockInfo, err := extractLockInfoFromKeyErr(keyErr)
			if err != nil {
				// It could be `Retryable` or `Abort`.
				return errors.Trace(err)
			}
			lock := newLock(c.store, lockInfo.GetPrimaryLock(), lockInfo.GetLockVersion(), lockInfo.GetKey(), c.startTS)
			_, err = lock.cleanup()
			if err != nil && terror.ErrorNotEqual(err, errInnerRetryable) {
				return errors.Trace(err)
			}
		}
	}
	return errors.Annotate(backoffErr, txnRetryableMark)
}
コード例 #4
0
ファイル: snapshot.go プロジェクト: yubobo/tidb
func (s *tikvSnapshot) batchGetSingleRegion(batch batchKeys, collectF func(k, v []byte)) error {
	pending := batch.keys
	var backoffErr error
	for backoff := txnLockBackoff(); backoffErr == nil; backoffErr = backoff() {
		req := &pb.Request{
			Type: pb.MessageType_CmdBatchGet.Enum(),
			CmdBatchGetReq: &pb.CmdBatchGetRequest{
				Keys:    pending,
				Version: proto.Uint64(s.version.Ver),
			},
		}
		resp, err := s.store.SendKVReq(req, batch.region)
		if err != nil {
			return errors.Trace(err)
		}
		if regionErr := resp.GetRegionError(); regionErr != nil {
			err = s.batchGetKeysByRegions(pending, collectF)
			return errors.Trace(err)
		}
		batchGetResp := resp.GetCmdBatchGetResp()
		if batchGetResp == nil {
			return errors.Trace(errBodyMissing)
		}
		var lockedKeys [][]byte
		for _, pair := range batchGetResp.Pairs {
			keyErr := pair.GetError()
			if keyErr == nil {
				collectF(pair.GetKey(), pair.GetValue())
				continue
			}
			// This could be slow if we meet many expired locks.
			// TODO: Find a way to do quick unlock.
			val, err := s.handleKeyError(keyErr)
			if err != nil {
				if terror.ErrorNotEqual(err, errInnerRetryable) {
					return errors.Trace(err)
				}
				lockedKeys = append(lockedKeys, pair.GetKey())
				continue
			}
			collectF(pair.GetKey(), val)
		}
		if len(lockedKeys) > 0 {
			pending = lockedKeys
			continue
		}
		return nil
	}
	return errors.Annotate(backoffErr, txnRetryableMark)
}
コード例 #5
0
ファイル: tidb_test.go プロジェクト: zebozhuang/tidb
// See: http://dev.mysql.com/doc/refman/5.7/en/commit.html
func (s *testSessionSuite) TestRowLock(c *C) {
	store := newStore(c, s.dbName)
	se := newSession(c, store, s.dbName)
	se1 := newSession(c, store, s.dbName)
	se2 := newSession(c, store, s.dbName)

	mustExecSQL(c, se, "drop table if exists t")
	c.Assert(se.(*session).txn, IsNil)
	mustExecSQL(c, se, "create table t (c1 int, c2 int, c3 int)")
	mustExecSQL(c, se, "insert t values (11, 2, 3)")
	mustExecSQL(c, se, "insert t values (12, 2, 3)")
	mustExecSQL(c, se, "insert t values (13, 2, 3)")

	mustExecSQL(c, se1, "begin")
	mustExecSQL(c, se1, "update t set c2=21 where c1=11")

	mustExecSQL(c, se2, "begin")
	mustExecSQL(c, se2, "update t set c2=211 where c1=11")
	mustExecSQL(c, se2, "commit")

	_, err := exec(c, se1, "commit")
	// row lock conflict but can still success
	if terror.ErrorNotEqual(err, kv.ErrConditionNotMatch) {
		c.Fail()
	}
	// Retry should success
	err = se.Retry()
	c.Assert(err, IsNil)

	mustExecSQL(c, se1, "begin")
	mustExecSQL(c, se1, "update t set c2=21 where c1=11")

	mustExecSQL(c, se2, "begin")
	mustExecSQL(c, se2, "update t set c2=22 where c1=12")
	mustExecSQL(c, se2, "commit")

	mustExecSQL(c, se1, "commit")

	mustExecSQL(c, se, s.dropDBSQL)
}
コード例 #6
0
ファイル: index.go プロジェクト: huozhe3136/tidb
// pointLookup do not seek index but call Exists method to get a handle, which is cheaper.
func (r *indexPlan) pointLookup(ctx context.Context, val interface{}) (*plan.Row, error) {
	txn, err := ctx.GetTxn(false)
	if err != nil {
		return nil, errors.Trace(err)
	}
	var exist bool
	var h int64
	// We expect a kv.ErrKeyExists Error because we pass -1 as the handle which is not equal to the existed handle.
	exist, h, err = r.idx.Exist(txn, []interface{}{val}, -1)
	if !exist {
		return nil, errors.Trace(err)
	}
	if terror.ErrorNotEqual(kv.ErrKeyExists, err) {
		return nil, errors.Trace(err)
	}
	var row *plan.Row
	row, err = r.lookupRow(ctx, h)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return row, nil
}