コード例 #1
0
ファイル: txn.go プロジェクト: anywhy/tidb
func (txn *tikvTxn) Commit() error {
	if !txn.valid {
		return kv.ErrInvalidTxn
	}

	log.Debugf("[kv] start to commit txn %d", txn.StartTS())
	if err := txn.us.CheckLazyConditionPairs(); err != nil {
		txn.close()
		return errors.Trace(err)
	}

	committer, err := newTxnCommitter(txn)
	if err != nil {
		txn.close()
		return errors.Trace(err)
	}
	if committer == nil {
		txn.close()
		return nil
	}
	err = committer.Commit()
	if err != nil {
		return errors.Trace(err)
	}
	txn.commitTS = committer.commitTS
	log.Debugf("[kv] finish commit txn %d", txn.StartTS())
	return nil
}
コード例 #2
0
ファイル: worker.go プロジェクト: yzl11/vessel
func (d *ddl) checkOwner(t *meta.Meta) (*model.Owner, error) {
	owner, err := t.GetDDLOwner()
	if err != nil {
		return nil, errors.Trace(err)
	}

	if owner == nil {
		owner = &model.Owner{}
		// try to set onwer
		owner.OwnerID = d.uuid
	}

	now := time.Now().UnixNano()
	// we must wait 2 * lease time to guarantee other servers update the schema,
	// the owner will update its owner status every 2 * lease time, so here we use
	// 4 * lease to check its timeout.
	maxTimeout := int64(4 * d.lease)
	if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
		owner.OwnerID = d.uuid
		owner.LastUpdateTS = now
		// update status.
		if err = t.SetDDLOwner(owner); err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("become owner %s", owner.OwnerID)
	}

	if owner.OwnerID != d.uuid {
		log.Debugf("not owner, owner is %s", owner.OwnerID)
		return nil, errors.Trace(ErrNotOwner)
	}

	return owner, nil
}
コード例 #3
0
ファイル: slots.go プロジェクト: CowLeo/qdb
// SLOTSRESTORE key ttlms value [key ttlms value ...]
func (s *Store) SlotsRestore(db uint32, args [][]byte) error {
	if len(args) == 0 || len(args)%3 != 0 {
		return errArguments("len(args) = %d, expect != 0 && mod 3 = 0", len(args))
	}

	objs := make([]*rdb.ObjEntry, len(args)/3)
	for i := 0; i < len(objs); i++ {
		key := args[i*3]
		ttlms, err := ParseInt(args[i*3+1])
		if err != nil {
			return errArguments("parse args failed - %s", err)
		}
		value := args[i*3+2]

		expireat := int64(0)
		if ttlms != 0 {
			if v, ok := TTLmsToExpireAt(ttlms); ok && v > 0 {
				expireat = v
			} else {
				return errArguments("parse args[%d] ttlms = %d", i*3+1, ttlms)
			}
		}

		obj, err := rdb.DecodeDump(value)
		if err != nil {
			return errArguments("decode args[%d] failed, %s", i*3+2, err)
		}

		objs[i] = &rdb.ObjEntry{
			DB:       db,
			Key:      key,
			ExpireAt: uint64(expireat),
			Value:    obj,
		}
	}

	if err := s.acquire(); err != nil {
		return errors.Trace(err)
	}
	defer s.release()

	ms := &markSet{}
	bt := engine.NewBatch()
	for i := len(objs) - 1; i >= 0; i-- {
		e := objs[i]
		if ms.Has(e.Key) {
			log.Debugf("[%d] restore batch, db = %d, key = %v, ignore", i, e.DB, e.Key)
			continue
		} else {
			log.Debugf("[%d] restore batch, db = %d, key = %v", i, e.DB, e.Key)
		}
		if err := s.restore(bt, e.DB, e.Key, int64(e.ExpireAt), e.Value); err != nil {
			log.Warningf("restore object failed, db = %d, key = %v, err = %s", e.DB, e.Key, err)
			return errors.Trace(err)
		}
		ms.Set(e.Key)
	}
	fw := &Forward{DB: db, Op: "SlotsRestore", Args: args}
	return s.commit(bt, fw)
}
コード例 #4
0
ファイル: region_cache.go プロジェクト: jmptrader/tidb
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderID uint64) {
	c.mu.Lock()
	defer c.mu.Unlock()

	r, ok := c.mu.regions[regionID]
	if !ok {
		log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderID)
		return
	}

	var found bool
	for i, p := range r.meta.Peers {
		if p.GetId() == leaderID {
			r.curPeerIdx, r.peer = i, p
			found = true
			break
		}
	}
	if !found {
		log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderID)
		c.dropRegionFromCache(r.VerID())
		return
	}

	store, err := c.pdClient.GetStore(r.peer.GetStoreId())
	if err != nil {
		log.Warnf("regionCache: failed load store %d", r.peer.GetStoreId())
		c.dropRegionFromCache(r.VerID())
		return
	}

	r.addr = store.GetAddress()
}
コード例 #5
0
ファイル: gc_worker.go プロジェクト: pingcap/tidb
func (w *GCWorker) checkLeader() (bool, error) {
	gcWorkerCounter.WithLabelValues("check_leader").Inc()

	_, err := w.session.Execute("BEGIN")
	if err != nil {
		return false, errors.Trace(err)
	}
	leader, err := w.loadValueFromSysTable(gcLeaderUUIDKey)
	if err != nil {
		w.session.Execute("ROLLBACK")
		return false, errors.Trace(err)
	}
	log.Debugf("[gc worker] got leader: %s", leader)
	if leader == w.uuid {
		err = w.saveTime(gcLeaderLeaseKey, time.Now().Add(gcWorkerLease))
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		_, err = w.session.Execute("COMMIT")
		if err != nil {
			return false, errors.Trace(err)
		}
		return true, nil
	}
	lease, err := w.loadTime(gcLeaderLeaseKey)
	if err != nil {
		return false, errors.Trace(err)
	}
	if lease == nil || lease.Before(time.Now()) {
		log.Debugf("[gc worker] register %s as leader", w.uuid)
		gcWorkerCounter.WithLabelValues("register_leader").Inc()

		err = w.saveValueToSysTable(gcLeaderUUIDKey, w.uuid)
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		err = w.saveValueToSysTable(gcLeaderDescKey, w.desc)
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		err = w.saveTime(gcLeaderLeaseKey, time.Now().Add(gcWorkerLease))
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		_, err = w.session.Execute("COMMIT")
		if err != nil {
			return false, errors.Trace(err)
		}
		return true, nil
	}
	w.session.Execute("ROLLBACK")
	return false, nil
}
コード例 #6
0
ファイル: server.go プロジェクト: pingcap/tso
func (t *tsoTask) Run() error {
	if t.Interrupted() {
		return errors.New("task has been interrupted already")
	}

	// enter here means I am the leader tso.
	tso := t.tso

	log.Debugf("tso leader %s run task", tso)

	if err := tso.syncTimestamp(); err != nil {
		// interrupt here
		log.Errorf("sync timestmap err %v, interrupt this task", err)

		atomic.StoreInt64(&t.interrupted, 1)
		return errors.Trace(err)
	}

	atomic.StoreInt64(&tso.isLeader, 1)

	tsTicker := time.NewTicker(time.Duration(updateTimestampStep) * time.Millisecond)

	defer func() {
		atomic.StoreInt64(&tso.isLeader, 0)

		tsTicker.Stop()

		// we should close all connections.
		t.closeAllConns()
		log.Debugf("tso leader %s task end", tso)
	}()

	for {
		select {
		case err := <-t.interruptCh:
			log.Debugf("tso leader %s is interrupted, err %v", tso, err)

			// we will interrupt this task, and we can't run this task again.
			atomic.StoreInt64(&t.interrupted, 1)
			return errors.Trace(err)
		case <-t.stopCh:
			log.Debugf("tso leader %s is stopped", tso)

			// we will stop this task, maybe run again later.
			return errors.New("task is stopped")
		case conn := <-t.connCh:
			// handle connection below
			tso.handleConn(conn)
		case <-tsTicker.C:
			if err := tso.updateTimestamp(); err != nil {
				return errors.Trace(err)
			}
		}
	}
}
コード例 #7
0
ファイル: themis_txn.go プロジェクト: pingcap/go-themis
func (txn *themisTxn) prewritePrimary() error {
	// hook for test
	if bypass, _, err := txn.hooks.beforePrewritePrimary(txn, nil); !bypass {
		return err
	}
	err := txn.prewriteRowWithLockClean(txn.primary.Table, txn.primaryRow, true)
	if err != nil {
		log.Debugf("prewrite primary %v %q failed: %v", txn.startTs, txn.primaryRow.row, err.Error())
		return errors.Trace(err)
	}
	log.Debugf("prewrite primary %v %q successfully", txn.startTs, txn.primaryRow.row)
	return nil
}
コード例 #8
0
ファイル: ddl_worker.go プロジェクト: XuHuaiyu/tidb
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) {
	var owner *model.Owner
	var err error

	switch flag {
	case ddlJobFlag:
		owner, err = t.GetDDLJobOwner()
	case bgJobFlag:
		owner, err = t.GetBgJobOwner()
	default:
		err = errInvalidJobFlag
	}
	if err != nil {
		return nil, errors.Trace(err)
	}

	if owner == nil {
		owner = &model.Owner{}
		// try to set onwer
		owner.OwnerID = d.uuid
	}

	now := time.Now().UnixNano()
	// we must wait 2 * lease time to guarantee other servers update the schema,
	// the owner will update its owner status every 2 * lease time, so here we use
	// 4 * lease to check its timeout.
	maxTimeout := int64(4 * d.lease)
	if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
		owner.OwnerID = d.uuid
		owner.LastUpdateTS = now
		// update status.
		switch flag {
		case ddlJobFlag:
			err = t.SetDDLJobOwner(owner)
		case bgJobFlag:
			err = t.SetBgJobOwner(owner)
		}
		if err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID)
	}

	if owner.OwnerID != d.uuid {
		log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID)
		return nil, errors.Trace(errNotOwner)
	}

	return owner, nil
}
コード例 #9
0
ファイル: executor_xapi.go プロジェクト: duzhanyuan/tidb
// Next implements Executor Next interface.
func (e *XSelectIndexExec) Next() (*Row, error) {
	if e.tasks == nil {
		startTs := time.Now()
		handles, err := e.fetchHandles()
		if err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("[TIME_INDEX_SCAN] time: %v handles: %d", time.Now().Sub(startTs), len(handles))
		e.buildTableTasks(handles)
		limitCount := int64(-1)
		if e.indexPlan.LimitCount != nil {
			limitCount = *e.indexPlan.LimitCount
		}
		concurrency := 2
		if e.indexPlan.NoLimit {
			concurrency = len(e.tasks)
		} else if limitCount > int64(BaseLookupTableTaskSize) {
			concurrency = int(limitCount/int64(BaseLookupTableTaskSize) + 1)
		}
		log.Debugf("[TIME_INDEX_TABLE_CONCURRENT_SCAN] start %d workers", concurrency)
		e.runTableTasks(concurrency)
	}
	for {
		if e.taskCursor >= len(e.tasks) {
			return nil, nil
		}
		task := e.tasks[e.taskCursor]
		if !task.done {
			startTs := time.Now()
			err := <-task.doneCh
			if err != nil {
				return nil, errors.Trace(err)
			}
			task.done = true
			log.Debugf("[TIME_INDEX_TABLE_SCAN] time: %v handles: %d", time.Now().Sub(startTs), len(task.handles))
		}
		if task.cursor < len(task.rows) {
			row := task.rows[task.cursor]
			if !e.aggregate {
				for i, field := range e.indexPlan.Fields() {
					field.Expr.SetDatum(row.Data[i])
				}
			}
			task.cursor++
			return row, nil
		}
		e.taskCursor++
	}
}
コード例 #10
0
ファイル: region_cache.go プロジェクト: pingcap/tidb
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderStoreID uint64) {
	c.mu.Lock()
	defer c.mu.Unlock()

	r, ok := c.mu.regions[regionID]
	if !ok {
		log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderStoreID)
		return
	}

	if !r.SwitchPeer(leaderStoreID) {
		log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderStoreID)
		c.dropRegionFromCache(r.VerID())
	}
}
コード例 #11
0
ファイル: txn.go プロジェクト: ninefive/tidb
func (txn *dbTxn) Inc(k []byte, step int64) (int64, error) {
	log.Debugf("Inc %s, step %d txn:%d", k, step, txn.tID)
	k = kv.EncodeKey(k)

	if err := txn.markOrigin(k); err != nil {
		return 0, err
	}
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, err
		}

		return step, nil
	}

	if err != nil {
		return 0, err
	}

	intVal, err := strconv.ParseInt(string(val), 10, 0)
	if err != nil {
		return intVal, err
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, err
	}

	return intVal, nil
}
コード例 #12
0
ファイル: client.go プロジェクト: pingcap/tso
func (c *Client) do() error {
	session, err := NewConnection(c.addr, time.Duration(1*time.Second))
	if err != nil {
		return errors.Trace(err)
	}

	log.Debugf("connect tso server %s ok", c.addr)

	defer session.Close()
	for {
		select {
		case req := <-c.requests:
			c.pending.PushBack(req)
			length := len(c.requests)
			for i := 0; i < length; i++ {
				req = <-c.requests
				c.pending.PushBack(req)
			}

			err = c.writeRequests(session)
			if err != nil {
				return errors.Trace(err)
			}
			err = c.handleResponse(session)
			if err != nil {
				return errors.Trace(err)
			}
		case addr := <-c.leaderCh:
			oldAddr := c.addr
			c.addr = addr
			return errors.Errorf("leader change %s -> %s", oldAddr, addr)
		}
	}
}
コード例 #13
0
ファイル: worker.go プロジェクト: yzl11/vessel
// onWorker is for async online schema change, it will try to become the owner first,
// then wait or pull the job queue to handle a schema change job.
func (d *ddl) onWorker() {
	defer d.wait.Done()

	// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
	// every 2 * lease time, if lease is 0, we will use default 10s.
	checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)

	ticker := time.NewTicker(checkTime)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			log.Debugf("wait %s to check DDL status again", checkTime)
		case <-d.jobCh:
		case <-d.quitCh:
			return
		}

		err := d.handleJobQueue()
		if err != nil {
			log.Errorf("handle job err %v", errors.ErrorStack(err))
		}
	}
}
コード例 #14
0
ファイル: keys.go プロジェクト: cuiwm/reborn
func (s *Store) restore(bt *engine.Batch, db uint32, key []byte, expireat int64, obj interface{}) error {
	_, err := s.deleteIfExists(bt, db, key)
	if err != nil {
		return err
	}

	if !IsExpired(expireat) {
		var o storeRow
		switch obj.(type) {
		default:
			return errors.Trace(ErrObjectValue)
		case rdb.String:
			o = newStringRow(db, key)
		case rdb.Hash:
			o = newHashRow(db, key)
		case rdb.List:
			o = newListRow(db, key)
		case rdb.ZSet:
			o = newZSetRow(db, key)
		case rdb.Set:
			o = newSetRow(db, key)
		}
		return o.storeObject(s, bt, expireat, obj)
	}

	log.Debugf("restore an expired object, db = %d, key = %v, expireat = %d", db, key, expireat)
	return nil
}
コード例 #15
0
ファイル: kv.go プロジェクト: stumaxim28/tidb
// Begin transaction
func (s *dbStore) Begin() (kv.Transaction, error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	beginVer, err := globalVersionProvider.CurrentVersion()
	if err != nil {
		return nil, err
	}
	txn := &dbTxn{
		tid:          beginVer.Ver,
		valid:        true,
		store:        s,
		version:      kv.MinVersion,
		snapshotVals: make(map[string][]byte),
	}
	log.Debugf("Begin txn:%d", txn.tid)
	txn.UnionStore, err = kv.NewUnionStore(&dbSnapshot{
		db:      s.db,
		version: beginVer,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return txn, nil
}
コード例 #16
0
ファイル: txn.go プロジェクト: henrylee2cn/tidb
func (txn *hbaseTxn) Inc(k kv.Key, step int64) (int64, error) {
	log.Debugf("Inc %q, step %d txn:%d", k, step, txn.tid)
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, errors.Trace(err)
		}

		return step, nil
	}

	if err != nil {
		return 0, errors.Trace(err)
	}

	intVal, err := strconv.ParseInt(string(val), 10, 64)
	if err != nil {
		return intVal, errors.Trace(err)
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, errors.Trace(err)
	}
	return intVal, nil
}
コード例 #17
0
ファイル: kv.go プロジェクト: yzl11/vessel
// Begin transaction
func (s *dbStore) Begin() (kv.Transaction, error) {
	providerMu.RLock()
	beginVer, err := globalVersionProvider.CurrentVersion()
	providerMu.RUnlock()
	if err != nil {
		return nil, errors.Trace(err)
	}

	s.mu.RLock()
	closed := s.closed
	s.mu.RUnlock()
	if closed {
		return nil, errors.Trace(ErrDBClosed)
	}

	txn := &dbTxn{
		tid:          beginVer.Ver,
		valid:        true,
		store:        s,
		version:      kv.MinVersion,
		snapshotVals: make(map[string]struct{}),
		opts:         make(map[kv.Option]interface{}),
	}
	log.Debugf("Begin txn:%d", txn.tid)
	txn.UnionStore = kv.NewUnionStore(newSnapshot(s, s.db, beginVer), options(txn.opts))
	return txn, nil
}
コード例 #18
0
ファイル: join.go プロジェクト: maserg/tidb
func (r *JoinPlan) nextRightJoin(ctx context.Context) (row *plan.Row, err error) {
	for {
		if r.cursor < len(r.matchedRows) {
			row = r.matchedRows[r.cursor]
			r.cursor++
			return
		}
		var rightRow *plan.Row
		rightRow, err = r.Right.Next(ctx)
		if rightRow == nil || err != nil {
			return nil, errors.Trace(err)
		}

		tempExpr := r.On.Clone()
		visitor := NewIdentEvalVisitor(r.Right.GetFields(), rightRow.Data)
		_, err = tempExpr.Accept(visitor)
		if err != nil {
			return nil, errors.Trace(err)
		}
		var filtered bool
		var p plan.Plan
		p, filtered, err = r.Left.Filter(ctx, tempExpr)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if filtered {
			log.Debugf("right join use index")
		}
		err = r.findMatchedRows(ctx, rightRow, p, true)
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
}
コード例 #19
0
ファイル: txn.go プロジェクト: yzl11/vessel
func (txn *dbTxn) Inc(k kv.Key, step int64) (int64, error) {
	log.Debugf("Inc %q, step %d txn:%d", k, step, txn.tid)
	k = kv.EncodeKey(k)

	txn.markOrigin(k)
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, errors.Trace(err)
		}

		return step, nil
	}
	if err != nil {
		return 0, errors.Trace(err)
	}

	intVal, err := strconv.ParseInt(string(val), 10, 0)
	if err != nil {
		return intVal, errors.Trace(err)
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, errors.Trace(err)
	}
	txn.store.compactor.OnSet(k)
	return intVal, nil
}
コード例 #20
0
ファイル: slots.go プロジェクト: CowLeo/qdb
// SLOTSMGRTTAGONE host port timeout key
func (s *Store) SlotsMgrtTagOne(db uint32, args [][]byte) (int64, error) {
	if len(args) != 4 {
		return 0, errArguments("len(args) = %d, expect = 4", len(args))
	}

	host := string(args[0])
	port, err := ParseInt(args[1])
	if err != nil {
		return 0, errArguments("parse args failed - %s", err)
	}
	ttlms, err := ParseInt(args[2])
	if err != nil {
		return 0, errArguments("parse args failed - %s", err)
	}
	key := args[3]

	var timeout = time.Duration(ttlms) * time.Millisecond
	if timeout == 0 {
		timeout = time.Second
	}
	addr := fmt.Sprintf("%s:%d", host, port)

	if err := s.acquire(); err != nil {
		return 0, errors.Trace(err)
	}
	defer s.release()

	log.Debugf("migrate one with tag, addr = %s, timeout = %d, db = %d, key = %v", addr, timeout, db, key)

	if tag := HashTag(key); len(tag) == len(key) {
		return s.migrateOne(addr, timeout, db, key)
	} else {
		return s.migrateTag(addr, timeout, db, tag)
	}
}
コード例 #21
0
ファイル: conn.go プロジェクト: yangxuanjia/tidb
func (cc *clientConn) handleQuery(sql string) (err error) {
	startTs := time.Now()
	rs, err := cc.ctx.Execute(sql)
	if err != nil {
		return errors.Trace(err)
	}
	if rs != nil {
		if len(rs) == 1 {
			err = cc.writeResultset(rs[0], false, false)
		} else {
			err = cc.writeMultiResultset(rs, false)
		}
	} else {
		loadDataInfo := cc.ctx.Value(executor.LoadDataVarKey)
		if loadDataInfo != nil {
			if err = cc.handleLoadData(loadDataInfo.(*executor.LoadDataInfo)); err != nil {
				return errors.Trace(err)
			}
		}
		err = cc.writeOK()
	}
	costTime := time.Since(startTs)
	if len(sql) > 1024 {
		sql = sql[:1024]
	}
	if costTime < time.Second {
		log.Debugf("[TIME_QUERY] %v %s", costTime, sql)
	} else {
		log.Warnf("[TIME_QUERY] %v %s", costTime, sql)
	}
	metrics.Query(costTime)
	return errors.Trace(err)
}
コード例 #22
0
ファイル: new_executor.go プロジェクト: anywhy/tidb
// Next implements Executor interface.
func (e *NewTableScanExec) Next() (*Row, error) {
	if e.result == nil {
		err := e.doRequest()
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
	for {
		if e.subResult == nil {
			var err error
			startTs := time.Now()
			e.subResult, err = e.result.Next()
			if err != nil {
				return nil, errors.Trace(err)
			}
			if e.subResult == nil {
				return nil, nil
			}
			log.Debugf("[TIME_TABLE_SCAN] %v", time.Now().Sub(startTs))
		}
		h, rowData, err := e.subResult.Next()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if rowData == nil {
			e.subResult = nil
			continue
		}
		return resultRowToRow(e.table, h, rowData, e.asName), nil
	}
}
コード例 #23
0
ファイル: bench.go プロジェクト: disksing/tso
func main() {
	go http.ListenAndServe(":6666", nil)
	var wg sync.WaitGroup
	start := time.Now()
	for x := 0; x < clientCnt; x++ {
		c := client.NewClient(&client.Conf{ServerAddr: *serverAddress})
		wg.Add(1)
		go func() {
			defer wg.Done()
			cnt := total / clientCnt
			prs := make([]*client.PipelineRequest, cnt)
			for i := 0; i < cnt; i++ {
				pr := c.GoGetTimestamp()
				prs[i] = pr
			}

			for i := 0; i < cnt; i++ {
				prs[i].GetTS()
			}
		}()
	}
	wg.Wait()

	log.Debugf("Total %d, use %v/s", total, time.Since(start).Seconds())
}
コード例 #24
0
ファイル: statement.go プロジェクト: XuHuaiyu/tidb
func (ps *perfSchema) EndStatement(state *StatementState) {
	if !enablePerfSchema {
		return
	}
	if state == nil {
		return
	}

	switch state.timerName {
	case timerNameNanosec:
		state.timerEnd = time.Now().UnixNano()
	case timerNameMicrosec:
		state.timerEnd = time.Now().UnixNano() / int64(time.Microsecond)
	case timerNameMillisec:
		state.timerEnd = time.Now().UnixNano() / int64(time.Millisecond)
	default:
		return
	}

	log.Debugf("EndStatement: sql %s, connection id %d, type %s", state.sqlText, state.connID, state.stmtType)

	record := state2Record(state)
	err := ps.updateEventsStmtsCurrent(state.connID, record)
	if err != nil {
		log.Error("Unable to update events_statements_current table")
	}
	err = ps.appendEventsStmtsHistory(record)
	if err != nil {
		log.Errorf("Unable to append to events_statements_history table %v", errors.ErrorStack(err))
	}
}
コード例 #25
0
ファイル: conn_select.go プロジェクト: wangjild/dbatman
func (session *Session) handleQuery(stmt parser.IStatement, sqlstmt string) error {

	if err := session.checkDB(stmt); err != nil {
		log.Debugf("check db error: %s", err.Error())
		return err
	}

	isread := false

	if s, ok := stmt.(parser.ISelect); ok {
		isread = !s.IsLocked()
	} else if _, sok := stmt.(parser.IShow); sok {
		isread = true
	}

	rs, err := session.Executor(isread).Query(sqlstmt)
	// TODO here should handler error
	if err != nil {
		return session.handleMySQLError(err)
	}

	defer rs.Close()

	return session.writeRows(rs)
}
コード例 #26
0
ファイル: domain.go プロジェクト: stumaxim28/tidb
func (do *Domain) loadInfoSchema(m *meta.TMeta) (err error) {
	schemaMetaVersion, err := m.GetSchemaVersion()
	if err != nil {
		return errors.Trace(err)
	}

	info := do.infoHandle.Get()
	if info != nil && schemaMetaVersion > 0 && schemaMetaVersion == info.SchemaMetaVersion() {
		log.Debugf("schema version is still %d, no need reload", schemaMetaVersion)
		return nil
	}

	schemas, err := m.ListDatabases()
	if err != nil {
		return errors.Trace(err)
	}

	for _, di := range schemas {
		tables, err := m.ListTables(di.ID)
		if err != nil {
			return errors.Trace(err)
		}

		di.Tables = tables
	}

	log.Infof("loadInfoSchema %d", schemaMetaVersion)
	do.infoHandle.Set(schemas, schemaMetaVersion)
	return
}
コード例 #27
0
ファイル: session.go プロジェクト: pingcap/tidb
// ExecRestrictedSQL implements RestrictedSQLExecutor interface.
// This is used for executing some restricted sql statements, usually executed during a normal statement execution.
// Unlike normal Exec, it doesn't reset statement status, doesn't commit or rollback the current transaction
// and doesn't write binlog.
func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (ast.RecordSet, error) {
	if err := s.checkSchemaValidOrRollback(); err != nil {
		return nil, errors.Trace(err)
	}
	charset, collation := s.sessionVars.GetCharsetInfo()
	rawStmts, err := s.ParseSQL(sql, charset, collation)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if len(rawStmts) != 1 {
		log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql)
		return nil, errors.New("wrong number of statement")
	}
	// Some execution is done in compile stage, so we reset it before compile.
	st, err := Compile(s, rawStmts[0])
	if err != nil {
		log.Errorf("Compile %s with error: %v", sql, err)
		return nil, errors.Trace(err)
	}
	// Check statement for some restrictions.
	// For example only support DML on system meta table.
	// TODO: Add more restrictions.
	log.Debugf("Executing %s [%s]", st.OriginText(), sql)
	s.sessionVars.InRestrictedSQL = true
	rs, err := st.Exec(ctx)
	s.sessionVars.InRestrictedSQL = false
	return rs, errors.Trace(err)
}
コード例 #28
0
ファイル: domain.go プロジェクト: jmptrader/tidb
// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used
// infoschema version, if it is the same as the schema version at startTS, we don't need to reload again.
func (do *Domain) loadInfoSchema(handle *infoschema.Handle, usedSchemaVersion int64, startTS uint64) error {
	snapshot, err := do.store.GetSnapshot(kv.NewVersion(startTS))
	if err != nil {
		return errors.Trace(err)
	}
	m := meta.NewSnapshotMeta(snapshot)
	latestSchemaVersion, err := m.GetSchemaVersion()
	if err != nil {
		return errors.Trace(err)
	}
	if usedSchemaVersion != 0 && usedSchemaVersion == latestSchemaVersion {
		log.Debugf("[ddl] schema version is still %d, no need reload", usedSchemaVersion)
		return nil
	}
	ok, err := do.tryLoadSchemaDiffs(m, usedSchemaVersion, latestSchemaVersion)
	if err != nil {
		// We can fall back to full load, don't need to return the error.
		log.Errorf("[ddl] failed to load schema diff %v", err)
	}
	if ok {
		log.Infof("[ddl] diff load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
		return nil
	}
	schemas, err := do.getAllSchemasWithTablesFromMeta(m)
	if err != nil {
		return errors.Trace(err)
	}

	newISBuilder, err := infoschema.NewBuilder(handle).InitWithDBInfos(schemas, latestSchemaVersion)
	if err != nil {
		return errors.Trace(err)
	}
	log.Infof("[ddl] full load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
	return newISBuilder.Build()
}
コード例 #29
0
ファイル: session.go プロジェクト: losas/tidb
// ExecRestrictedSQL implements SQLHelper interface.
// This is used for executing some restricted sql statements.
func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (rset.Recordset, error) {
	if ctx.Value(&sqlexec.RestrictedSQLExecutorKeyType{}) != nil {
		// We do not support run this function concurrently.
		// TODO: Maybe we should remove this restriction latter.
		return nil, errors.New("Should not call ExecRestrictedSQL concurrently.")
	}
	statements, err := Compile(ctx, sql)
	if err != nil {
		log.Errorf("Compile %s with error: %v", sql, err)
		return nil, errors.Trace(err)
	}
	if len(statements) != 1 {
		log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql)
		return nil, errors.New("Wrong number of statement.")
	}
	st := statements[0]
	// Check statement for some restriction
	// For example only support DML on system meta table.
	// TODO: Add more restrictions.
	log.Debugf("Executing %s [%s]", st.OriginText(), sql)
	ctx.SetValue(&sqlexec.RestrictedSQLExecutorKeyType{}, true)
	defer ctx.ClearValue(&sqlexec.RestrictedSQLExecutorKeyType{})
	rs, err := st.Exec(ctx)
	return rs, errors.Trace(err)
}
コード例 #30
0
ファイル: executor_distsql.go プロジェクト: jmptrader/tidb
func (e *XSelectIndexExec) nextForDoubleRead() (*Row, error) {
	var startTs time.Time
	if e.taskChan == nil {
		startTs = time.Now()
		idxResult, err := e.doIndexRequest()
		if err != nil {
			return nil, errors.Trace(err)
		}
		idxResult.IgnoreData()
		idxResult.Fetch()

		// Use a background goroutine to fetch index and put the result in e.taskChan.
		// e.taskChan serves as a pipeline, so fetching index and getting table data can
		// run concurrently.
		e.taskChan = make(chan *lookupTableTask, 50)
		go e.fetchHandles(idxResult, e.taskChan)
	}

	for {
		if e.taskCurr == nil {
			taskCurr, ok := <-e.taskChan
			if !ok {
				log.Debugf("[TIME_INDEX_TABLE_SCAN] time: %v", time.Since(startTs))
				return nil, e.tasksErr
			}
			e.taskCurr = taskCurr
		}

		row, err := e.taskCurr.getRow()
		if err != nil || row != nil {
			return row, errors.Trace(err)
		}
		e.taskCurr = nil
	}
}