Esempio n. 1
0
File: txn.go Progetto: netroby/tidb
// RunInNewTxn will run the f in a new transaction environment.
func RunInNewTxn(store Storage, retryable bool, f func(txn Transaction) error) error {
	for {
		txn, err := store.Begin()
		if err != nil {
			log.Errorf("RunInNewTxn error - %v", err)
			return errors.Trace(err)
		}

		err = f(txn)
		if retryable && IsRetryableError(err) {
			log.Warnf("Retry txn %v", txn)
			txn.Rollback()
			continue
		}
		if err != nil {
			return errors.Trace(err)
		}

		err = txn.Commit()
		if retryable && IsRetryableError(err) {
			log.Warnf("Retry txn %v", txn)
			txn.Rollback()
			continue
		}
		if err != nil {
			return errors.Trace(err)
		}

		break
	}

	return nil
}
Esempio n. 2
0
// doBatches applies f to batches parallelly.
func (c *txnCommitter) doBatches(bo *Backoffer, batches []batchKeys, f func(*Backoffer, batchKeys) error) error {
	if len(batches) == 0 {
		return nil
	}
	if len(batches) == 1 {
		e := f(bo, batches[0])
		if e != nil {
			log.Warnf("txnCommitter doBatches failed: %v, tid: %d", e, c.startTS)
		}
		return errors.Trace(e)
	}

	// TODO: For prewrite, stop sending other requests after receiving first error.
	ch := make(chan error)
	for _, batch := range batches {
		go func(batch batchKeys) {
			ch <- f(bo.Fork(), batch)
		}(batch)
	}
	var err error
	for i := 0; i < len(batches); i++ {
		if e := <-ch; e != nil {
			log.Warnf("txnCommitter doBatches failed: %v, tid: %d", e, c.startTS)
			err = e
		}
	}
	return errors.Trace(err)
}
Esempio n. 3
0
func (w *GCWorker) start() {
	log.Infof("[gc worker] %s start.", w.uuid)
	ticker := time.NewTicker(gcWorkerTickInterval)
	for {
		select {
		case <-ticker.C:
			isLeader, err := w.checkLeader()
			if err != nil {
				log.Warnf("[gc worker] check leader err: %v", err)
				break
			}
			if isLeader {
				err = w.leaderTick()
				if err != nil {
					log.Warnf("[gc worker] leader tick err: %v", err)
				}
			}
		case err := <-w.done:
			w.gcIsRunning = false
			w.lastFinish = time.Now()
			if err != nil {
				log.Errorf("[gc worker] runGCJob error: %v", err)
				break
			}
		case <-w.quit:
			log.Infof("[gc worker] (%s) quit.", w.uuid)
			return
		}
	}
}
Esempio n. 4
0
// If forceNew is true, GetTxn() must return a new transaction.
// In this situation, if current transaction is still in progress,
// there will be an implicit commit and create a new transaction.
func (s *session) GetTxn(forceNew bool) (kv.Transaction, error) {
	var err error
	if s.txn == nil {
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, err
		}

		log.Warnf("New txn:%s in session:%d", s.txn, s.sid)
		return s.txn, nil
	}
	if forceNew {
		err = s.txn.Commit()
		if err != nil {
			return nil, err
		}
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, err
		}
		log.Warnf("Force new txn:%s in session:%d", s.txn, s.sid)
	}
	return s.txn, nil
}
Esempio n. 5
0
func (s *session) Execute(sql string) ([]ast.RecordSet, error) {
	if err := s.checkSchemaValidOrRollback(); err != nil {
		return nil, errors.Trace(err)
	}
	charset, collation := getCtxCharsetInfo(s)
	rawStmts, err := s.ParseSQL(sql, charset, collation)
	if err != nil {
		log.Warnf("compiling %s, error: %v", sql, err)
		return nil, errors.Trace(err)
	}

	var rs []ast.RecordSet
	ph := sessionctx.GetDomain(s).PerfSchema()
	for i, rst := range rawStmts {
		st, err1 := Compile(s, rst)
		if err1 != nil {
			log.Errorf("Syntax error: %s", sql)
			log.Errorf("Error occurs at %s.", err1)
			return nil, errors.Trace(err1)
		}
		id := variable.GetSessionVars(s).ConnectionID
		s.stmtState = ph.StartStatement(sql, id, perfschema.CallerNameSessionExecute, rawStmts[i])
		r, err := runStmt(s, st)
		ph.EndStatement(s.stmtState)
		if err != nil {
			log.Warnf("session:%v, err:%v", s, err)
			return nil, errors.Trace(err)
		}
		if r != nil {
			rs = append(rs, r)
		}
	}
	return rs, nil
}
Esempio n. 6
0
// loadRegion loads region from pd client, and picks the first peer as leader.
func (c *RegionCache) loadRegion(key []byte) (*Region, error) {
	var region *Region
	var backoffErr error
	for backoff := pdBackoff(); backoffErr == nil; backoffErr = backoff() {
		meta, err := c.pdClient.GetRegion(key)
		if err != nil {
			log.Warnf("loadRegion from PD failed, key: %q, err: %v", key, err)
			continue
		}
		if meta == nil {
			log.Warnf("region not found for key %q", key)
			continue
		}
		if len(meta.Peers) == 0 {
			return nil, errors.New("receive Region with no peer")
		}
		peer := meta.Peers[0]
		store, err := c.pdClient.GetStore(peer.GetStoreId())
		if err != nil {
			log.Warnf("loadStore from PD failed, key %q, storeID: %d, err: %v", key, peer.GetStoreId(), err)
			continue
		}
		region = &Region{
			meta:       meta,
			peer:       peer,
			addr:       store.GetAddress(),
			curPeerIdx: 0,
		}
		break
	}
	if backoffErr != nil {
		return nil, errors.Annotate(backoffErr, txnRetryableMark)
	}
	return region, nil
}
Esempio n. 7
0
func (s *Server) onConn(c net.Conn) {
	session := s.newSession(c)

	defer func() {
		if !debug {
			if err := recover(); err != nil {
				const size = 4096
				buf := make([]byte, size)
				buf = buf[:runtime.Stack(buf, false)]
				log.Fatal("onConn panic %v: %v\n%s", c.RemoteAddr().String(), err, buf)
			}
		}

		session.Close()
	}()

	// Handshake error, here we do not need to close the conn
	if err := session.Handshake(); err != nil {
		log.Warnf("handshake error: %s", err)
		return
	}

	if err := session.Run(); err != nil {
		// TODO
		// session.WriteError(NewDefaultError(err))
		if err == errSessionQuit {
			return
		}

		log.Warnf("session run error: %s", err.Error())
	}
}
Esempio n. 8
0
func (c *txnCommitter) Commit() error {
	err := c.prewriteKeys(c.keys)
	if err != nil {
		log.Warnf("txn commit failed on prewrite: %v, tid: %d", err, c.startTS)
		go func() {
			c.cleanupKeys(c.writtenKeys)
		}()
		return errors.Trace(err)
	}

	commitTS, err := c.store.getTimestampWithRetry()
	if err != nil {
		return errors.Trace(err)
	}
	c.commitTS = commitTS

	err = c.commitKeys(c.keys)
	if err != nil {
		if !c.committed {
			go func() {
				c.cleanupKeys(c.writtenKeys)
			}()
			return errors.Trace(err)
		}
		log.Warnf("txn commit succeed with error: %v, tid: %d", err, c.startTS)
	}
	return nil
}
Esempio n. 9
0
// Run reads client query and writes query result to client in for loop, if there is a panic during query handling,
// it will be recovered and log the panic error.
// This function returns and the connection is closed if there is an IO error or there is a panic.
func (cc *clientConn) Run() {
	defer func() {
		r := recover()
		if r != nil {
			const size = 4096
			buf := make([]byte, size)
			buf = buf[:runtime.Stack(buf, false)]
			log.Errorf("lastCmd %s, %v, %s", cc.lastCmd, r, buf)
		}
		cc.Close()
	}()

	for {
		cc.alloc.Reset()
		data, err := cc.readPacket()
		if err != nil {
			if terror.ErrorNotEqual(err, io.EOF) {
				log.Error(errors.ErrorStack(err))
			}
			return
		}

		if err := cc.dispatch(data); err != nil {
			if terror.ErrorEqual(err, io.EOF) {
				return
			}
			log.Warnf("dispatch error %s, %s", errors.ErrorStack(err), cc)
			log.Warnf("cmd: %s", string(data[1:]))
			cc.writeError(err)
		}

		cc.pkt.sequence = 0
	}
}
Esempio n. 10
0
func (s *session) Retry() error {
	s.retrying = true
	nh := s.history.clone()
	// Debug infos.
	if len(nh.history) == 0 {
		s.debugInfos[retryEmptyHistoryList] = true
	} else {
		s.debugInfos[retryEmptyHistoryList] = false
	}
	defer func() {
		s.history.history = nh.history
		s.retrying = false
	}()

	if forUpdate := s.Value(forupdate.ForUpdateKey); forUpdate != nil {
		return errors.Errorf("can not retry select for update statement")
	}
	var err error
	retryCnt := 0
	for {
		s.resetHistory()
		s.FinishTxn(true)
		success := true
		for _, sr := range nh.history {
			st := sr.st
			// Skip prepare statement
			if !needRetry(st) {
				continue
			}
			log.Warnf("Retry %s", st.OriginText())
			switch st.(type) {
			case *stmts.ExecuteStmt:
				_, err = runStmt(s, st, sr.params...)
			default:
				_, err = runStmt(s, st)
			}
			if err != nil {
				if kv.IsRetryableError(err) {
					success = false
					break
				}
				log.Warnf("session:%v, err:%v", s, err)
				return errors.Trace(err)
			}
		}
		if success {
			err = s.FinishTxn(false)
			if !kv.IsRetryableError(err) {
				break
			}
		}
		retryCnt++
		if (s.maxRetryCnt != unlimitedRetryCnt) && (retryCnt >= s.maxRetryCnt) {
			return errors.Trace(err)
		}
		kv.BackOff(retryCnt)
	}
	return err
}
Esempio n. 11
0
func (d *ddl) startJob(ctx context.Context, job *model.Job) error {
	// for every DDL, we must commit current transaction.
	if err := ctx.FinishTxn(false); err != nil {
		return errors.Trace(err)
	}

	// Create a new job and queue it.
	err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
		t := meta.NewMeta(txn)
		var err error
		job.ID, err = t.GenGlobalID()
		if err != nil {
			return errors.Trace(err)
		}

		err = t.EnQueueDDLJob(job)
		return errors.Trace(err)
	})

	if err != nil {
		return errors.Trace(err)
	}

	// notice worker that we push a new job and wait the job done.
	asyncNotify(d.jobCh)

	log.Warnf("start DDL job %v", job)

	jobID := job.ID

	var historyJob *model.Job

	// for a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
	// for every state change, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
	ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second))
	defer ticker.Stop()
	for {
		select {
		case <-d.jobDoneCh:
		case <-ticker.C:
		}

		historyJob, err = d.getHistoryJob(jobID)
		if err != nil {
			log.Errorf("get history job err %v, check again", err)
			continue
		} else if historyJob == nil {
			log.Warnf("job %d is not in history, maybe not run", jobID)
			continue
		}

		// if a job is a history table, the state must be JobDone or JobCancel.
		if historyJob.State == model.JobDone {
			return nil
		}

		return errors.Errorf(historyJob.Error)
	}
}
Esempio n. 12
0
func (s *session) Retry() error {
	variable.GetSessionVars(s).RetryInfo.Retrying = true
	nh := s.history.clone()
	// Debug infos.
	if len(nh.history) == 0 {
		s.debugInfos[retryEmptyHistoryList] = true
	} else {
		s.debugInfos[retryEmptyHistoryList] = false
	}
	defer func() {
		s.history.history = nh.history
		variable.GetSessionVars(s).RetryInfo.Retrying = false
	}()

	if forUpdate := s.Value(forupdate.ForUpdateKey); forUpdate != nil {
		return errors.Errorf("can not retry select for update statement")
	}
	var err error
	retryCnt := 0
	for {
		variable.GetSessionVars(s).RetryInfo.Attempts = retryCnt + 1
		s.resetHistory()
		log.Info("RollbackTxn for retry txn.")
		err = s.RollbackTxn()
		if err != nil {
			// TODO: handle this error.
			log.Errorf("rollback txn failed, err:%v", errors.ErrorStack(err))
		}
		success := true
		variable.GetSessionVars(s).RetryInfo.ResetOffset()
		for _, sr := range nh.history {
			st := sr.st
			log.Warnf("Retry %s", st.OriginText())
			_, err = runStmt(s, st)
			if err != nil {
				if kv.IsRetryableError(err) {
					success = false
					break
				}
				log.Warnf("session:%v, err:%v", s, err)
				return errors.Trace(err)
			}
		}
		if success {
			err = s.CommitTxn()
			if !kv.IsRetryableError(err) {
				break
			}
		}
		retryCnt++
		if (s.maxRetryCnt != unlimitedRetryCnt) && (retryCnt >= s.maxRetryCnt) {
			return errors.Trace(err)
		}
		kv.BackOff(retryCnt)
	}
	return err
}
Esempio n. 13
0
File: 2pc.go Progetto: pingcap/tidb
// execute executes the two-phase commit protocol.
func (c *twoPhaseCommitter) execute() error {
	ctx := context.Background()
	defer func() {
		// Always clean up all written keys if the txn does not commit.
		c.mu.RLock()
		writtenKeys := c.mu.writtenKeys
		committed := c.mu.committed
		c.mu.RUnlock()
		if !committed {
			go func() {
				err := c.cleanupKeys(NewBackoffer(cleanupMaxBackoff, ctx), writtenKeys)
				if err != nil {
					log.Infof("2PC cleanup err: %v, tid: %d", err, c.startTS)
				} else {
					log.Infof("2PC clean up done, tid: %d", c.startTS)
				}
			}()
		}
	}()

	binlogChan := c.prewriteBinlog()
	err := c.prewriteKeys(NewBackoffer(prewriteMaxBackoff, ctx), c.keys)
	if binlogChan != nil {
		binlogErr := <-binlogChan
		if binlogErr != nil {
			return errors.Trace(binlogErr)
		}
	}
	if err != nil {
		log.Warnf("2PC failed on prewrite: %v, tid: %d", err, c.startTS)
		return errors.Trace(err)
	}

	commitTS, err := c.store.getTimestampWithRetry(NewBackoffer(tsoMaxBackoff, ctx))
	if err != nil {
		log.Warnf("2PC get commitTS failed: %v, tid: %d", err, c.startTS)
		return errors.Trace(err)
	}
	c.commitTS = commitTS

	if c.store.oracle.IsExpired(c.startTS, maxTxnTimeUse) {
		err = errors.Errorf("txn takes too much time, start: %d, commit: %d", c.startTS, c.commitTS)
		return errors.Annotate(err, txnRetryableMark)
	}

	err = c.commitKeys(NewBackoffer(commitMaxBackoff, ctx), c.keys)
	if err != nil {
		if !c.mu.committed {
			log.Warnf("2PC failed on commit: %v, tid: %d", err, c.startTS)
			return errors.Trace(err)
		}
		log.Warnf("2PC succeed with error: %v, tid: %d", err, c.startTS)
	}
	return nil
}
Esempio n. 14
0
// Handle single copTask.
func (it *copIterator) handleTask(task *copTask) (*coprocessor.Response, error) {
	var backoffErr error
	for backoff := rpcBackoff(); backoffErr == nil; backoffErr = backoff() {
		client, err := it.store.getClient(task.region.GetAddress())
		if err != nil {
			return nil, errors.Trace(err)
		}
		req := &coprocessor.Request{
			Context: task.region.GetContext(),
			Tp:      proto.Int64(it.req.Tp),
			Data:    it.req.Data,
			Ranges:  task.pbRanges(),
		}
		resp, err := client.SendCopReq(req)
		if err != nil {
			it.store.regionCache.NextPeer(task.region.VerID())
			err1 := it.rebuildCurrentTask(task)
			if err1 != nil {
				return nil, errors.Trace(err1)
			}
			log.Warnf("send coprocessor request error: %v, try next peer later", err)
			continue
		}
		if e := resp.GetRegionError(); e != nil {
			if notLeader := e.GetNotLeader(); notLeader != nil {
				it.store.regionCache.UpdateLeader(task.region.VerID(), notLeader.GetLeader().GetId())
			} else {
				it.store.regionCache.DropRegion(task.region.VerID())
			}
			err = it.rebuildCurrentTask(task)
			if err != nil {
				return nil, errors.Trace(err)
			}
			log.Warnf("coprocessor region error: %v, retry later", e)
			continue
		}
		if e := resp.GetLocked(); e != nil {
			lock := newLock(it.store, e.GetPrimaryLock(), e.GetLockVersion(), e.GetKey(), e.GetLockVersion())
			_, lockErr := lock.cleanup()
			if lockErr == nil || terror.ErrorEqual(lockErr, errInnerRetryable) {
				continue
			}
			log.Warnf("cleanup lock error: %v", lockErr)
			return nil, errors.Trace(lockErr)
		}
		if e := resp.GetOtherError(); e != "" {
			err = errors.Errorf("other error: %s", e)
			log.Warnf("coprocessor err: %v", err)
			return nil, errors.Trace(err)
		}
		return resp, nil
	}
	return nil, errors.Trace(backoffErr)
}
Esempio n. 15
0
func (e *Error) getMySQLErrorCode() uint16 {
	codeMap, ok := ErrClassToMySQLCodes[e.class]
	if !ok {
		log.Warnf("Unknown error class: %v", e.class)
		return defaultMySQLErrorCode
	}
	code, ok := codeMap[e.code]
	if !ok {
		log.Warnf("Unknown error class: %v code: %v", e.class, e.code)
		return defaultMySQLErrorCode
	}
	return code
}
Esempio n. 16
0
func (s *session) Execute(sql string) ([]ast.RecordSet, error) {
	if err := s.checkSchemaValidOrRollback(); err != nil {
		return nil, errors.Trace(err)
	}
	startTS := time.Now()
	charset, collation := s.sessionVars.GetCharsetInfo()
	connID := s.sessionVars.ConnectionID
	rawStmts, err := s.ParseSQL(sql, charset, collation)
	if err != nil {
		log.Warnf("[%d] parse error:\n%v\n%s", connID, err, sql)
		return nil, errors.Trace(err)
	}
	sessionExecuteParseDuration.Observe(time.Since(startTS).Seconds())

	var rs []ast.RecordSet
	ph := sessionctx.GetDomain(s).PerfSchema()
	for i, rst := range rawStmts {
		startTS := time.Now()
		// Some execution is done in compile stage, so we reset it before compile.
		resetStmtCtx(s, rawStmts[0])
		st, err1 := Compile(s, rst)
		if err1 != nil {
			log.Warnf("[%d] compile error:\n%v\n%s", connID, err1, sql)
			return nil, errors.Trace(err1)
		}
		sessionExecuteCompileDuration.Observe(time.Since(startTS).Seconds())

		s.stmtState = ph.StartStatement(sql, connID, perfschema.CallerNameSessionExecute, rawStmts[i])
		s.SetValue(context.QueryString, st.OriginText())

		startTS = time.Now()
		r, err := runStmt(s, st)
		ph.EndStatement(s.stmtState)
		if err != nil {
			log.Warnf("[%d] session error:\n%v\n%s", connID, err, s)
			return nil, errors.Trace(err)
		}
		sessionExecuteRunDuration.Observe(time.Since(startTS).Seconds())
		if r != nil {
			rs = append(rs, r)
		}
	}

	if s.sessionVars.ClientCapability&mysql.ClientMultiResults == 0 && len(rs) > 1 {
		// return the first recordset if client doesn't support ClientMultiResults.
		rs = rs[:1]
	}
	return rs, nil
}
Esempio n. 17
0
File: 2pc.go Progetto: pingcap/tidb
// doActionOnBatches does action to batches in parallel.
func (c *twoPhaseCommitter) doActionOnBatches(bo *Backoffer, action twoPhaseCommitAction, batches []batchKeys) error {
	if len(batches) == 0 {
		return nil
	}
	var singleBatchActionFunc func(bo *Backoffer, batch batchKeys) error
	switch action {
	case actionPrewrite:
		singleBatchActionFunc = c.prewriteSingleBatch
	case actionCommit:
		singleBatchActionFunc = c.commitSingleBatch
	case actionCleanup:
		singleBatchActionFunc = c.cleanupSingleBatch
	}
	if len(batches) == 1 {
		e := singleBatchActionFunc(bo, batches[0])
		if e != nil {
			log.Warnf("2PC doActionOnBatches %s failed: %v, tid: %d", action, e, c.startTS)
		}
		return errors.Trace(e)
	}

	// For prewrite, stop sending other requests after receiving first error.
	var cancel context.CancelFunc
	if action == actionPrewrite {
		cancel = bo.WithCancel()
	}

	// Concurrently do the work for each batch.
	ch := make(chan error, len(batches))
	for _, batch := range batches {
		go func(batch batchKeys) {
			ch <- singleBatchActionFunc(bo.Fork(), batch)
		}(batch)
	}
	var err error
	for i := 0; i < len(batches); i++ {
		if e := <-ch; e != nil {
			log.Warnf("2PC doActionOnBatches %s failed: %v, tid: %d", action, e, c.startTS)
			if cancel != nil {
				// Cancel other requests and return the first error.
				cancel()
				return errors.Trace(e)
			}
			err = e
		}
	}
	return errors.Trace(err)
}
Esempio n. 18
0
File: where.go Progetto: no2key/tidb
// Plan gets NullPlan/FilterDefaultPlan.
func (r *WhereRset) Plan(ctx context.Context) (plan.Plan, error) {
	expr, err := r.Expr.Clone()
	if err != nil {
		return nil, err
	}

	if expr.IsStatic() {
		// IsStaic means we have a const value for where condition, and we don't need any index.
		return r.planStatic(ctx, expr)
	}

	switch x := expr.(type) {
	case *expressions.BinaryOperation:
		return r.planBinOp(ctx, x)
	case *expressions.Ident:
		return r.planIdent(ctx, x)
	case *expressions.IsNull:
		return r.planIsNull(ctx, x)
	case *expressions.PatternIn:
		// TODO: optimize
		// TODO: show plan
	case *expressions.PatternLike:
		// TODO: optimize
	case *expressions.PatternRegexp:
		// TODO: optimize
	case *expressions.UnaryOperation:
		return r.planUnaryOp(ctx, x)
	default:
		log.Warnf("%v not supported in where rset now", r.Expr)
	}

	return &plans.FilterDefaultPlan{Plan: r.Src, Expr: expr}, nil
}
Esempio n. 19
0
func (s *session) FinishTxn(rollback bool) error {
	// transaction has already been committed or rolled back
	if s.txn == nil {
		return nil
	}
	defer func() {
		s.txn = nil
		variable.GetSessionVars(s).SetStatusFlag(mysql.ServerStatusInTrans, false)
	}()

	if rollback {
		s.resetHistory()
		return s.txn.Rollback()
	}

	err := s.txn.Commit()
	if err != nil {
		if !s.retrying && kv.IsRetryableError(err) {
			err = s.Retry()
		}
		if err != nil {
			log.Warnf("txn:%s, %v", s.txn, err)
			return errors.Trace(err)
		}
	}

	s.resetHistory()
	return nil
}
Esempio n. 20
0
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderID uint64) {
	c.mu.Lock()
	defer c.mu.Unlock()

	r, ok := c.mu.regions[regionID]
	if !ok {
		log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderID)
		return
	}

	var found bool
	for i, p := range r.meta.Peers {
		if p.GetId() == leaderID {
			r.curPeerIdx, r.peer = i, p
			found = true
			break
		}
	}
	if !found {
		log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderID)
		c.dropRegionFromCache(r.VerID())
		return
	}

	store, err := c.pdClient.GetStore(r.peer.GetStoreId())
	if err != nil {
		log.Warnf("regionCache: failed load store %d", r.peer.GetStoreId())
		c.dropRegionFromCache(r.VerID())
		return
	}

	r.addr = store.GetAddress()
}
Esempio n. 21
0
File: txn.go Progetto: yzl11/vessel
func (txn *dbTxn) Rollback() error {
	if !txn.valid {
		return errors.Trace(ErrInvalidTxn)
	}
	log.Warnf("Rollback txn %d", txn.tid)
	return txn.close()
}
Esempio n. 22
0
File: ddl.go Progetto: botvs/tidb
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr) (err error) {
	is := d.GetInformationSchema()
	_, ok := is.SchemaByName(schema)
	if ok {
		return errors.Trace(ErrExists)
	}
	info := &model.DBInfo{Name: schema}
	info.ID, err = d.genGlobalID()
	if err != nil {
		return errors.Trace(err)
	}

	err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
		t := meta.NewMeta(txn)
		err := d.verifySchemaMetaVersion(t, is.SchemaMetaVersion())
		if err != nil {
			return errors.Trace(err)
		}

		err = t.CreateDatabase(info)

		log.Warnf("save schema %s", info)
		return errors.Trace(err)
	})
	if d.onDDLChange != nil {
		err = d.onDDLChange(err)
	}
	return errors.Trace(err)
}
Esempio n. 23
0
func (s *session) Execute(sql string) ([]rset.Recordset, error) {
	statements, err := Compile(s, sql)
	if err != nil {
		log.Errorf("Syntax error: %s", sql)
		log.Errorf("Error occurs at %s.", err)
		return nil, errors.Trace(err)
	}

	var rs []rset.Recordset

	for _, st := range statements {
		r, err := runStmt(s, st)
		if err != nil {
			log.Warnf("session:%v, err:%v", s, err)
			return nil, errors.Trace(err)
		}

		// Record executed query
		if isPreparedStmt(st) {
			ps := st.(*stmts.PreparedStmt)
			s.history.add(ps.ID, st)
		} else {
			s.history.add(0, st)
		}

		if r != nil {
			rs = append(rs, r)
		}
	}

	return rs, nil
}
Esempio n. 24
0
File: txn.go Progetto: astaxie/tidb
//if fail, themis auto rollback
func (txn *hbaseTxn) Rollback() error {
	if !txn.valid {
		return kv.ErrInvalidTxn
	}
	log.Warnf("[kv] Rollback txn %d", txn.tid)
	return txn.close()
}
Esempio n. 25
0
func (s *tikvSnapshot) batchGetKeysByRegions(keys [][]byte, collectF func(k, v []byte)) error {
	groups, _, err := s.store.regionCache.GroupKeysByRegion(keys)
	if err != nil {
		return errors.Trace(err)
	}

	var batches []batchKeys
	for id, g := range groups {
		batches = appendBatchBySize(batches, id, g, func([]byte) int { return 1 }, batchGetSize)
	}

	if len(batches) == 0 {
		return nil
	}
	if len(batches) == 1 {
		return errors.Trace(s.batchGetSingleRegion(batches[0], collectF))
	}
	ch := make(chan error)
	for _, batch := range batches {
		go func(batch batchKeys) {
			ch <- s.batchGetSingleRegion(batch, collectF)
		}(batch)
	}
	for i := 0; i < len(batches); i++ {
		if e := <-ch; e != nil {
			log.Warnf("snapshot batchGet failed: %v, tid: %d", e, s.version.Ver)
			err = e
		}
	}
	return errors.Trace(err)
}
Esempio n. 26
0
File: txn.go Progetto: ninefive/tidb
func (txn *dbTxn) Rollback() error {
	if !txn.valid {
		return ErrInvalidTxn
	}
	log.Warnf("Rollback txn %d", txn.tID)
	return txn.close()
}
Esempio n. 27
0
// If forceNew is true, GetTxn() must return a new transaction.
// In this situation, if current transaction is still in progress,
// there will be an implicit commit and create a new transaction.
func (s *session) GetTxn(forceNew bool) (kv.Transaction, error) {
	var err error
	if s.txn == nil {
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !s.isAutocommit(s) {
			variable.GetSessionVars(s).SetStatusFlag(mysql.ServerStatusInTrans, true)
		}
		log.Infof("New txn:%s in session:%d", s.txn, s.sid)
		return s.txn, nil
	}
	if forceNew {
		err = s.FinishTxn(false)
		if err != nil {
			return nil, errors.Trace(err)
		}
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !s.isAutocommit(s) {
			variable.GetSessionVars(s).SetStatusFlag(mysql.ServerStatusInTrans, true)
		}
		log.Warnf("Force new txn:%s in session:%d", s.txn, s.sid)
	}
	return s.txn, nil
}
Esempio n. 28
0
func (r *rowMutation) mutationList(withValue bool) []*columnMutation {
	var ret []*columnMutation
	var keys []string
	for k, _ := range r.mutations {
		keys = append(keys, k)
	}
	sort.Strings(keys)
	for _, k := range keys {
		v := &mutationValuePair{
			typ: r.mutations[k].typ,
		}
		if withValue {
			v.value = r.mutations[k].value
		}
		c := &hbase.Column{}
		// TODO: handle error, now just ignore
		if err := c.ParseFromString(k); err != nil {
			log.Warnf("parse from string error, column: %s, mutation: %s, error: %v", c, k, err)
		}
		ret = append(ret, &columnMutation{
			Column:            c,
			mutationValuePair: v,
		})
	}
	return ret
}
Esempio n. 29
0
func (s *session) Auth(user string, auth []byte, salt []byte) bool {
	strs := strings.Split(user, "@")
	if len(strs) != 2 {
		log.Warnf("Invalid format for user: %s", user)
		return false
	}
	// Get user password.
	name := strs[0]
	host := strs[1]
	pwd, err := s.getPassword(name, host)
	if err != nil {
		if terror.ExecResultIsEmpty.Equal(err) {
			log.Errorf("User [%s] not exist %v", name, err)
		} else {
			log.Errorf("Get User [%s] password from SystemDB error %v", name, err)
		}
		return false
	}
	if len(pwd) != 0 && len(pwd) != 40 {
		log.Errorf("User [%s] password from SystemDB not like a sha1sum", name)
		return false
	}
	hpwd, err := util.DecodePassword(pwd)
	if err != nil {
		log.Errorf("Decode password string error %v", err)
		return false
	}
	checkAuth := util.CalcPassword(salt, hpwd)
	if !bytes.Equal(auth, checkAuth) {
		return false
	}
	variable.GetSessionVars(s).SetCurrentUser(user)
	return true
}
Esempio n. 30
0
// Both lock and unlock are used for simulating scenario of percolator papers.
func (s *dbStore) tryConditionLockKey(tid uint64, key string, snapshotVal []byte) error {
	s.mu.Lock()
	defer s.mu.Unlock()

	if _, ok := s.keysLocked[key]; ok {
		return errors.Trace(kv.ErrLockConflict)
	}

	metaKey := codec.EncodeBytes(nil, []byte(key))
	currValue, err := s.db.Get(metaKey)
	if errors2.ErrorEqual(err, kv.ErrNotExist) || currValue == nil {
		// If it's a new key, we won't need to check its version
		return nil
	}
	if err != nil {
		return errors.Trace(err)
	}
	_, ver, err := codec.DecodeUint(currValue)
	if err != nil {
		return errors.Trace(err)
	}

	// If there's newer version of this key, returns error.
	if ver > tid {
		log.Warnf("txn:%d, tryLockKey condition not match for key %s, currValue:%q, snapshotVal:%q", tid, key, currValue, snapshotVal)
		return errors.Trace(kv.ErrConditionNotMatch)
	}

	s.keysLocked[key] = tid

	return nil
}