Example #1
0
File: txn.go Project: anywhy/tidb
func (txn *tikvTxn) Commit() error {
	if !txn.valid {
		return kv.ErrInvalidTxn
	}

	log.Debugf("[kv] start to commit txn %d", txn.StartTS())
	if err := txn.us.CheckLazyConditionPairs(); err != nil {
		txn.close()
		return errors.Trace(err)
	}

	committer, err := newTxnCommitter(txn)
	if err != nil {
		txn.close()
		return errors.Trace(err)
	}
	if committer == nil {
		txn.close()
		return nil
	}
	err = committer.Commit()
	if err != nil {
		return errors.Trace(err)
	}
	txn.commitTS = committer.commitTS
	log.Debugf("[kv] finish commit txn %d", txn.StartTS())
	return nil
}
Example #2
0
func (d *ddl) checkOwner(t *meta.Meta) (*model.Owner, error) {
	owner, err := t.GetDDLOwner()
	if err != nil {
		return nil, errors.Trace(err)
	}

	if owner == nil {
		owner = &model.Owner{}
		// try to set onwer
		owner.OwnerID = d.uuid
	}

	now := time.Now().UnixNano()
	// we must wait 2 * lease time to guarantee other servers update the schema,
	// the owner will update its owner status every 2 * lease time, so here we use
	// 4 * lease to check its timeout.
	maxTimeout := int64(4 * d.lease)
	if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
		owner.OwnerID = d.uuid
		owner.LastUpdateTS = now
		// update status.
		if err = t.SetDDLOwner(owner); err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("become owner %s", owner.OwnerID)
	}

	if owner.OwnerID != d.uuid {
		log.Debugf("not owner, owner is %s", owner.OwnerID)
		return nil, errors.Trace(ErrNotOwner)
	}

	return owner, nil
}
Example #3
0
File: slots.go Project: CowLeo/qdb
// SLOTSRESTORE key ttlms value [key ttlms value ...]
func (s *Store) SlotsRestore(db uint32, args [][]byte) error {
	if len(args) == 0 || len(args)%3 != 0 {
		return errArguments("len(args) = %d, expect != 0 && mod 3 = 0", len(args))
	}

	objs := make([]*rdb.ObjEntry, len(args)/3)
	for i := 0; i < len(objs); i++ {
		key := args[i*3]
		ttlms, err := ParseInt(args[i*3+1])
		if err != nil {
			return errArguments("parse args failed - %s", err)
		}
		value := args[i*3+2]

		expireat := int64(0)
		if ttlms != 0 {
			if v, ok := TTLmsToExpireAt(ttlms); ok && v > 0 {
				expireat = v
			} else {
				return errArguments("parse args[%d] ttlms = %d", i*3+1, ttlms)
			}
		}

		obj, err := rdb.DecodeDump(value)
		if err != nil {
			return errArguments("decode args[%d] failed, %s", i*3+2, err)
		}

		objs[i] = &rdb.ObjEntry{
			DB:       db,
			Key:      key,
			ExpireAt: uint64(expireat),
			Value:    obj,
		}
	}

	if err := s.acquire(); err != nil {
		return errors.Trace(err)
	}
	defer s.release()

	ms := &markSet{}
	bt := engine.NewBatch()
	for i := len(objs) - 1; i >= 0; i-- {
		e := objs[i]
		if ms.Has(e.Key) {
			log.Debugf("[%d] restore batch, db = %d, key = %v, ignore", i, e.DB, e.Key)
			continue
		} else {
			log.Debugf("[%d] restore batch, db = %d, key = %v", i, e.DB, e.Key)
		}
		if err := s.restore(bt, e.DB, e.Key, int64(e.ExpireAt), e.Value); err != nil {
			log.Warningf("restore object failed, db = %d, key = %v, err = %s", e.DB, e.Key, err)
			return errors.Trace(err)
		}
		ms.Set(e.Key)
	}
	fw := &Forward{DB: db, Op: "SlotsRestore", Args: args}
	return s.commit(bt, fw)
}
Example #4
0
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderID uint64) {
	c.mu.Lock()
	defer c.mu.Unlock()

	r, ok := c.mu.regions[regionID]
	if !ok {
		log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderID)
		return
	}

	var found bool
	for i, p := range r.meta.Peers {
		if p.GetId() == leaderID {
			r.curPeerIdx, r.peer = i, p
			found = true
			break
		}
	}
	if !found {
		log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderID)
		c.dropRegionFromCache(r.VerID())
		return
	}

	store, err := c.pdClient.GetStore(r.peer.GetStoreId())
	if err != nil {
		log.Warnf("regionCache: failed load store %d", r.peer.GetStoreId())
		c.dropRegionFromCache(r.VerID())
		return
	}

	r.addr = store.GetAddress()
}
Example #5
0
func (w *GCWorker) checkLeader() (bool, error) {
	gcWorkerCounter.WithLabelValues("check_leader").Inc()

	_, err := w.session.Execute("BEGIN")
	if err != nil {
		return false, errors.Trace(err)
	}
	leader, err := w.loadValueFromSysTable(gcLeaderUUIDKey)
	if err != nil {
		w.session.Execute("ROLLBACK")
		return false, errors.Trace(err)
	}
	log.Debugf("[gc worker] got leader: %s", leader)
	if leader == w.uuid {
		err = w.saveTime(gcLeaderLeaseKey, time.Now().Add(gcWorkerLease))
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		_, err = w.session.Execute("COMMIT")
		if err != nil {
			return false, errors.Trace(err)
		}
		return true, nil
	}
	lease, err := w.loadTime(gcLeaderLeaseKey)
	if err != nil {
		return false, errors.Trace(err)
	}
	if lease == nil || lease.Before(time.Now()) {
		log.Debugf("[gc worker] register %s as leader", w.uuid)
		gcWorkerCounter.WithLabelValues("register_leader").Inc()

		err = w.saveValueToSysTable(gcLeaderUUIDKey, w.uuid)
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		err = w.saveValueToSysTable(gcLeaderDescKey, w.desc)
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		err = w.saveTime(gcLeaderLeaseKey, time.Now().Add(gcWorkerLease))
		if err != nil {
			w.session.Execute("ROLLBACK")
			return false, errors.Trace(err)
		}
		_, err = w.session.Execute("COMMIT")
		if err != nil {
			return false, errors.Trace(err)
		}
		return true, nil
	}
	w.session.Execute("ROLLBACK")
	return false, nil
}
Example #6
0
func (t *tsoTask) Run() error {
	if t.Interrupted() {
		return errors.New("task has been interrupted already")
	}

	// enter here means I am the leader tso.
	tso := t.tso

	log.Debugf("tso leader %s run task", tso)

	if err := tso.syncTimestamp(); err != nil {
		// interrupt here
		log.Errorf("sync timestmap err %v, interrupt this task", err)

		atomic.StoreInt64(&t.interrupted, 1)
		return errors.Trace(err)
	}

	atomic.StoreInt64(&tso.isLeader, 1)

	tsTicker := time.NewTicker(time.Duration(updateTimestampStep) * time.Millisecond)

	defer func() {
		atomic.StoreInt64(&tso.isLeader, 0)

		tsTicker.Stop()

		// we should close all connections.
		t.closeAllConns()
		log.Debugf("tso leader %s task end", tso)
	}()

	for {
		select {
		case err := <-t.interruptCh:
			log.Debugf("tso leader %s is interrupted, err %v", tso, err)

			// we will interrupt this task, and we can't run this task again.
			atomic.StoreInt64(&t.interrupted, 1)
			return errors.Trace(err)
		case <-t.stopCh:
			log.Debugf("tso leader %s is stopped", tso)

			// we will stop this task, maybe run again later.
			return errors.New("task is stopped")
		case conn := <-t.connCh:
			// handle connection below
			tso.handleConn(conn)
		case <-tsTicker.C:
			if err := tso.updateTimestamp(); err != nil {
				return errors.Trace(err)
			}
		}
	}
}
Example #7
0
func (txn *themisTxn) prewritePrimary() error {
	// hook for test
	if bypass, _, err := txn.hooks.beforePrewritePrimary(txn, nil); !bypass {
		return err
	}
	err := txn.prewriteRowWithLockClean(txn.primary.Table, txn.primaryRow, true)
	if err != nil {
		log.Debugf("prewrite primary %v %q failed: %v", txn.startTs, txn.primaryRow.row, err.Error())
		return errors.Trace(err)
	}
	log.Debugf("prewrite primary %v %q successfully", txn.startTs, txn.primaryRow.row)
	return nil
}
Example #8
0
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) {
	var owner *model.Owner
	var err error

	switch flag {
	case ddlJobFlag:
		owner, err = t.GetDDLJobOwner()
	case bgJobFlag:
		owner, err = t.GetBgJobOwner()
	default:
		err = errInvalidJobFlag
	}
	if err != nil {
		return nil, errors.Trace(err)
	}

	if owner == nil {
		owner = &model.Owner{}
		// try to set onwer
		owner.OwnerID = d.uuid
	}

	now := time.Now().UnixNano()
	// we must wait 2 * lease time to guarantee other servers update the schema,
	// the owner will update its owner status every 2 * lease time, so here we use
	// 4 * lease to check its timeout.
	maxTimeout := int64(4 * d.lease)
	if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
		owner.OwnerID = d.uuid
		owner.LastUpdateTS = now
		// update status.
		switch flag {
		case ddlJobFlag:
			err = t.SetDDLJobOwner(owner)
		case bgJobFlag:
			err = t.SetBgJobOwner(owner)
		}
		if err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID)
	}

	if owner.OwnerID != d.uuid {
		log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID)
		return nil, errors.Trace(errNotOwner)
	}

	return owner, nil
}
Example #9
0
// Next implements Executor Next interface.
func (e *XSelectIndexExec) Next() (*Row, error) {
	if e.tasks == nil {
		startTs := time.Now()
		handles, err := e.fetchHandles()
		if err != nil {
			return nil, errors.Trace(err)
		}
		log.Debugf("[TIME_INDEX_SCAN] time: %v handles: %d", time.Now().Sub(startTs), len(handles))
		e.buildTableTasks(handles)
		limitCount := int64(-1)
		if e.indexPlan.LimitCount != nil {
			limitCount = *e.indexPlan.LimitCount
		}
		concurrency := 2
		if e.indexPlan.NoLimit {
			concurrency = len(e.tasks)
		} else if limitCount > int64(BaseLookupTableTaskSize) {
			concurrency = int(limitCount/int64(BaseLookupTableTaskSize) + 1)
		}
		log.Debugf("[TIME_INDEX_TABLE_CONCURRENT_SCAN] start %d workers", concurrency)
		e.runTableTasks(concurrency)
	}
	for {
		if e.taskCursor >= len(e.tasks) {
			return nil, nil
		}
		task := e.tasks[e.taskCursor]
		if !task.done {
			startTs := time.Now()
			err := <-task.doneCh
			if err != nil {
				return nil, errors.Trace(err)
			}
			task.done = true
			log.Debugf("[TIME_INDEX_TABLE_SCAN] time: %v handles: %d", time.Now().Sub(startTs), len(task.handles))
		}
		if task.cursor < len(task.rows) {
			row := task.rows[task.cursor]
			if !e.aggregate {
				for i, field := range e.indexPlan.Fields() {
					field.Expr.SetDatum(row.Data[i])
				}
			}
			task.cursor++
			return row, nil
		}
		e.taskCursor++
	}
}
Example #10
0
// UpdateLeader update some region cache with newer leader info.
func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderStoreID uint64) {
	c.mu.Lock()
	defer c.mu.Unlock()

	r, ok := c.mu.regions[regionID]
	if !ok {
		log.Debugf("regionCache: cannot find region when updating leader %d,%d", regionID, leaderStoreID)
		return
	}

	if !r.SwitchPeer(leaderStoreID) {
		log.Debugf("regionCache: cannot find peer when updating leader %d,%d", regionID, leaderStoreID)
		c.dropRegionFromCache(r.VerID())
	}
}
Example #11
0
File: txn.go Project: ninefive/tidb
func (txn *dbTxn) Inc(k []byte, step int64) (int64, error) {
	log.Debugf("Inc %s, step %d txn:%d", k, step, txn.tID)
	k = kv.EncodeKey(k)

	if err := txn.markOrigin(k); err != nil {
		return 0, err
	}
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, err
		}

		return step, nil
	}

	if err != nil {
		return 0, err
	}

	intVal, err := strconv.ParseInt(string(val), 10, 0)
	if err != nil {
		return intVal, err
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, err
	}

	return intVal, nil
}
Example #12
0
func (c *Client) do() error {
	session, err := NewConnection(c.addr, time.Duration(1*time.Second))
	if err != nil {
		return errors.Trace(err)
	}

	log.Debugf("connect tso server %s ok", c.addr)

	defer session.Close()
	for {
		select {
		case req := <-c.requests:
			c.pending.PushBack(req)
			length := len(c.requests)
			for i := 0; i < length; i++ {
				req = <-c.requests
				c.pending.PushBack(req)
			}

			err = c.writeRequests(session)
			if err != nil {
				return errors.Trace(err)
			}
			err = c.handleResponse(session)
			if err != nil {
				return errors.Trace(err)
			}
		case addr := <-c.leaderCh:
			oldAddr := c.addr
			c.addr = addr
			return errors.Errorf("leader change %s -> %s", oldAddr, addr)
		}
	}
}
Example #13
0
// onWorker is for async online schema change, it will try to become the owner first,
// then wait or pull the job queue to handle a schema change job.
func (d *ddl) onWorker() {
	defer d.wait.Done()

	// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
	// every 2 * lease time, if lease is 0, we will use default 10s.
	checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)

	ticker := time.NewTicker(checkTime)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			log.Debugf("wait %s to check DDL status again", checkTime)
		case <-d.jobCh:
		case <-d.quitCh:
			return
		}

		err := d.handleJobQueue()
		if err != nil {
			log.Errorf("handle job err %v", errors.ErrorStack(err))
		}
	}
}
Example #14
0
File: keys.go Project: cuiwm/reborn
func (s *Store) restore(bt *engine.Batch, db uint32, key []byte, expireat int64, obj interface{}) error {
	_, err := s.deleteIfExists(bt, db, key)
	if err != nil {
		return err
	}

	if !IsExpired(expireat) {
		var o storeRow
		switch obj.(type) {
		default:
			return errors.Trace(ErrObjectValue)
		case rdb.String:
			o = newStringRow(db, key)
		case rdb.Hash:
			o = newHashRow(db, key)
		case rdb.List:
			o = newListRow(db, key)
		case rdb.ZSet:
			o = newZSetRow(db, key)
		case rdb.Set:
			o = newSetRow(db, key)
		}
		return o.storeObject(s, bt, expireat, obj)
	}

	log.Debugf("restore an expired object, db = %d, key = %v, expireat = %d", db, key, expireat)
	return nil
}
Example #15
0
// Begin transaction
func (s *dbStore) Begin() (kv.Transaction, error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	beginVer, err := globalVersionProvider.CurrentVersion()
	if err != nil {
		return nil, err
	}
	txn := &dbTxn{
		tid:          beginVer.Ver,
		valid:        true,
		store:        s,
		version:      kv.MinVersion,
		snapshotVals: make(map[string][]byte),
	}
	log.Debugf("Begin txn:%d", txn.tid)
	txn.UnionStore, err = kv.NewUnionStore(&dbSnapshot{
		db:      s.db,
		version: beginVer,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return txn, nil
}
Example #16
0
func (txn *hbaseTxn) Inc(k kv.Key, step int64) (int64, error) {
	log.Debugf("Inc %q, step %d txn:%d", k, step, txn.tid)
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, errors.Trace(err)
		}

		return step, nil
	}

	if err != nil {
		return 0, errors.Trace(err)
	}

	intVal, err := strconv.ParseInt(string(val), 10, 64)
	if err != nil {
		return intVal, errors.Trace(err)
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, errors.Trace(err)
	}
	return intVal, nil
}
Example #17
0
File: kv.go Project: yzl11/vessel
// Begin transaction
func (s *dbStore) Begin() (kv.Transaction, error) {
	providerMu.RLock()
	beginVer, err := globalVersionProvider.CurrentVersion()
	providerMu.RUnlock()
	if err != nil {
		return nil, errors.Trace(err)
	}

	s.mu.RLock()
	closed := s.closed
	s.mu.RUnlock()
	if closed {
		return nil, errors.Trace(ErrDBClosed)
	}

	txn := &dbTxn{
		tid:          beginVer.Ver,
		valid:        true,
		store:        s,
		version:      kv.MinVersion,
		snapshotVals: make(map[string]struct{}),
		opts:         make(map[kv.Option]interface{}),
	}
	log.Debugf("Begin txn:%d", txn.tid)
	txn.UnionStore = kv.NewUnionStore(newSnapshot(s, s.db, beginVer), options(txn.opts))
	return txn, nil
}
Example #18
0
File: join.go Project: maserg/tidb
func (r *JoinPlan) nextRightJoin(ctx context.Context) (row *plan.Row, err error) {
	for {
		if r.cursor < len(r.matchedRows) {
			row = r.matchedRows[r.cursor]
			r.cursor++
			return
		}
		var rightRow *plan.Row
		rightRow, err = r.Right.Next(ctx)
		if rightRow == nil || err != nil {
			return nil, errors.Trace(err)
		}

		tempExpr := r.On.Clone()
		visitor := NewIdentEvalVisitor(r.Right.GetFields(), rightRow.Data)
		_, err = tempExpr.Accept(visitor)
		if err != nil {
			return nil, errors.Trace(err)
		}
		var filtered bool
		var p plan.Plan
		p, filtered, err = r.Left.Filter(ctx, tempExpr)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if filtered {
			log.Debugf("right join use index")
		}
		err = r.findMatchedRows(ctx, rightRow, p, true)
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
}
Example #19
0
File: txn.go Project: yzl11/vessel
func (txn *dbTxn) Inc(k kv.Key, step int64) (int64, error) {
	log.Debugf("Inc %q, step %d txn:%d", k, step, txn.tid)
	k = kv.EncodeKey(k)

	txn.markOrigin(k)
	val, err := txn.UnionStore.Get(k)
	if kv.IsErrNotFound(err) {
		err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(step, 10)))
		if err != nil {
			return 0, errors.Trace(err)
		}

		return step, nil
	}
	if err != nil {
		return 0, errors.Trace(err)
	}

	intVal, err := strconv.ParseInt(string(val), 10, 0)
	if err != nil {
		return intVal, errors.Trace(err)
	}

	intVal += step
	err = txn.UnionStore.Set(k, []byte(strconv.FormatInt(intVal, 10)))
	if err != nil {
		return 0, errors.Trace(err)
	}
	txn.store.compactor.OnSet(k)
	return intVal, nil
}
Example #20
0
File: slots.go Project: CowLeo/qdb
// SLOTSMGRTTAGONE host port timeout key
func (s *Store) SlotsMgrtTagOne(db uint32, args [][]byte) (int64, error) {
	if len(args) != 4 {
		return 0, errArguments("len(args) = %d, expect = 4", len(args))
	}

	host := string(args[0])
	port, err := ParseInt(args[1])
	if err != nil {
		return 0, errArguments("parse args failed - %s", err)
	}
	ttlms, err := ParseInt(args[2])
	if err != nil {
		return 0, errArguments("parse args failed - %s", err)
	}
	key := args[3]

	var timeout = time.Duration(ttlms) * time.Millisecond
	if timeout == 0 {
		timeout = time.Second
	}
	addr := fmt.Sprintf("%s:%d", host, port)

	if err := s.acquire(); err != nil {
		return 0, errors.Trace(err)
	}
	defer s.release()

	log.Debugf("migrate one with tag, addr = %s, timeout = %d, db = %d, key = %v", addr, timeout, db, key)

	if tag := HashTag(key); len(tag) == len(key) {
		return s.migrateOne(addr, timeout, db, key)
	} else {
		return s.migrateTag(addr, timeout, db, tag)
	}
}
Example #21
0
func (cc *clientConn) handleQuery(sql string) (err error) {
	startTs := time.Now()
	rs, err := cc.ctx.Execute(sql)
	if err != nil {
		return errors.Trace(err)
	}
	if rs != nil {
		if len(rs) == 1 {
			err = cc.writeResultset(rs[0], false, false)
		} else {
			err = cc.writeMultiResultset(rs, false)
		}
	} else {
		loadDataInfo := cc.ctx.Value(executor.LoadDataVarKey)
		if loadDataInfo != nil {
			if err = cc.handleLoadData(loadDataInfo.(*executor.LoadDataInfo)); err != nil {
				return errors.Trace(err)
			}
		}
		err = cc.writeOK()
	}
	costTime := time.Since(startTs)
	if len(sql) > 1024 {
		sql = sql[:1024]
	}
	if costTime < time.Second {
		log.Debugf("[TIME_QUERY] %v %s", costTime, sql)
	} else {
		log.Warnf("[TIME_QUERY] %v %s", costTime, sql)
	}
	metrics.Query(costTime)
	return errors.Trace(err)
}
Example #22
0
// Next implements Executor interface.
func (e *NewTableScanExec) Next() (*Row, error) {
	if e.result == nil {
		err := e.doRequest()
		if err != nil {
			return nil, errors.Trace(err)
		}
	}
	for {
		if e.subResult == nil {
			var err error
			startTs := time.Now()
			e.subResult, err = e.result.Next()
			if err != nil {
				return nil, errors.Trace(err)
			}
			if e.subResult == nil {
				return nil, nil
			}
			log.Debugf("[TIME_TABLE_SCAN] %v", time.Now().Sub(startTs))
		}
		h, rowData, err := e.subResult.Next()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if rowData == nil {
			e.subResult = nil
			continue
		}
		return resultRowToRow(e.table, h, rowData, e.asName), nil
	}
}
Example #23
0
func main() {
	go http.ListenAndServe(":6666", nil)
	var wg sync.WaitGroup
	start := time.Now()
	for x := 0; x < clientCnt; x++ {
		c := client.NewClient(&client.Conf{ServerAddr: *serverAddress})
		wg.Add(1)
		go func() {
			defer wg.Done()
			cnt := total / clientCnt
			prs := make([]*client.PipelineRequest, cnt)
			for i := 0; i < cnt; i++ {
				pr := c.GoGetTimestamp()
				prs[i] = pr
			}

			for i := 0; i < cnt; i++ {
				prs[i].GetTS()
			}
		}()
	}
	wg.Wait()

	log.Debugf("Total %d, use %v/s", total, time.Since(start).Seconds())
}
Example #24
0
func (ps *perfSchema) EndStatement(state *StatementState) {
	if !enablePerfSchema {
		return
	}
	if state == nil {
		return
	}

	switch state.timerName {
	case timerNameNanosec:
		state.timerEnd = time.Now().UnixNano()
	case timerNameMicrosec:
		state.timerEnd = time.Now().UnixNano() / int64(time.Microsecond)
	case timerNameMillisec:
		state.timerEnd = time.Now().UnixNano() / int64(time.Millisecond)
	default:
		return
	}

	log.Debugf("EndStatement: sql %s, connection id %d, type %s", state.sqlText, state.connID, state.stmtType)

	record := state2Record(state)
	err := ps.updateEventsStmtsCurrent(state.connID, record)
	if err != nil {
		log.Error("Unable to update events_statements_current table")
	}
	err = ps.appendEventsStmtsHistory(record)
	if err != nil {
		log.Errorf("Unable to append to events_statements_history table %v", errors.ErrorStack(err))
	}
}
Example #25
0
func (session *Session) handleQuery(stmt parser.IStatement, sqlstmt string) error {

	if err := session.checkDB(stmt); err != nil {
		log.Debugf("check db error: %s", err.Error())
		return err
	}

	isread := false

	if s, ok := stmt.(parser.ISelect); ok {
		isread = !s.IsLocked()
	} else if _, sok := stmt.(parser.IShow); sok {
		isread = true
	}

	rs, err := session.Executor(isread).Query(sqlstmt)
	// TODO here should handler error
	if err != nil {
		return session.handleMySQLError(err)
	}

	defer rs.Close()

	return session.writeRows(rs)
}
Example #26
0
func (do *Domain) loadInfoSchema(m *meta.TMeta) (err error) {
	schemaMetaVersion, err := m.GetSchemaVersion()
	if err != nil {
		return errors.Trace(err)
	}

	info := do.infoHandle.Get()
	if info != nil && schemaMetaVersion > 0 && schemaMetaVersion == info.SchemaMetaVersion() {
		log.Debugf("schema version is still %d, no need reload", schemaMetaVersion)
		return nil
	}

	schemas, err := m.ListDatabases()
	if err != nil {
		return errors.Trace(err)
	}

	for _, di := range schemas {
		tables, err := m.ListTables(di.ID)
		if err != nil {
			return errors.Trace(err)
		}

		di.Tables = tables
	}

	log.Infof("loadInfoSchema %d", schemaMetaVersion)
	do.infoHandle.Set(schemas, schemaMetaVersion)
	return
}
Example #27
0
// ExecRestrictedSQL implements RestrictedSQLExecutor interface.
// This is used for executing some restricted sql statements, usually executed during a normal statement execution.
// Unlike normal Exec, it doesn't reset statement status, doesn't commit or rollback the current transaction
// and doesn't write binlog.
func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (ast.RecordSet, error) {
	if err := s.checkSchemaValidOrRollback(); err != nil {
		return nil, errors.Trace(err)
	}
	charset, collation := s.sessionVars.GetCharsetInfo()
	rawStmts, err := s.ParseSQL(sql, charset, collation)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if len(rawStmts) != 1 {
		log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql)
		return nil, errors.New("wrong number of statement")
	}
	// Some execution is done in compile stage, so we reset it before compile.
	st, err := Compile(s, rawStmts[0])
	if err != nil {
		log.Errorf("Compile %s with error: %v", sql, err)
		return nil, errors.Trace(err)
	}
	// Check statement for some restrictions.
	// For example only support DML on system meta table.
	// TODO: Add more restrictions.
	log.Debugf("Executing %s [%s]", st.OriginText(), sql)
	s.sessionVars.InRestrictedSQL = true
	rs, err := st.Exec(ctx)
	s.sessionVars.InRestrictedSQL = false
	return rs, errors.Trace(err)
}
Example #28
0
// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used
// infoschema version, if it is the same as the schema version at startTS, we don't need to reload again.
func (do *Domain) loadInfoSchema(handle *infoschema.Handle, usedSchemaVersion int64, startTS uint64) error {
	snapshot, err := do.store.GetSnapshot(kv.NewVersion(startTS))
	if err != nil {
		return errors.Trace(err)
	}
	m := meta.NewSnapshotMeta(snapshot)
	latestSchemaVersion, err := m.GetSchemaVersion()
	if err != nil {
		return errors.Trace(err)
	}
	if usedSchemaVersion != 0 && usedSchemaVersion == latestSchemaVersion {
		log.Debugf("[ddl] schema version is still %d, no need reload", usedSchemaVersion)
		return nil
	}
	ok, err := do.tryLoadSchemaDiffs(m, usedSchemaVersion, latestSchemaVersion)
	if err != nil {
		// We can fall back to full load, don't need to return the error.
		log.Errorf("[ddl] failed to load schema diff %v", err)
	}
	if ok {
		log.Infof("[ddl] diff load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
		return nil
	}
	schemas, err := do.getAllSchemasWithTablesFromMeta(m)
	if err != nil {
		return errors.Trace(err)
	}

	newISBuilder, err := infoschema.NewBuilder(handle).InitWithDBInfos(schemas, latestSchemaVersion)
	if err != nil {
		return errors.Trace(err)
	}
	log.Infof("[ddl] full load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
	return newISBuilder.Build()
}
Example #29
0
// ExecRestrictedSQL implements SQLHelper interface.
// This is used for executing some restricted sql statements.
func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (rset.Recordset, error) {
	if ctx.Value(&sqlexec.RestrictedSQLExecutorKeyType{}) != nil {
		// We do not support run this function concurrently.
		// TODO: Maybe we should remove this restriction latter.
		return nil, errors.New("Should not call ExecRestrictedSQL concurrently.")
	}
	statements, err := Compile(ctx, sql)
	if err != nil {
		log.Errorf("Compile %s with error: %v", sql, err)
		return nil, errors.Trace(err)
	}
	if len(statements) != 1 {
		log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql)
		return nil, errors.New("Wrong number of statement.")
	}
	st := statements[0]
	// Check statement for some restriction
	// For example only support DML on system meta table.
	// TODO: Add more restrictions.
	log.Debugf("Executing %s [%s]", st.OriginText(), sql)
	ctx.SetValue(&sqlexec.RestrictedSQLExecutorKeyType{}, true)
	defer ctx.ClearValue(&sqlexec.RestrictedSQLExecutorKeyType{})
	rs, err := st.Exec(ctx)
	return rs, errors.Trace(err)
}
Example #30
0
func (e *XSelectIndexExec) nextForDoubleRead() (*Row, error) {
	var startTs time.Time
	if e.taskChan == nil {
		startTs = time.Now()
		idxResult, err := e.doIndexRequest()
		if err != nil {
			return nil, errors.Trace(err)
		}
		idxResult.IgnoreData()
		idxResult.Fetch()

		// Use a background goroutine to fetch index and put the result in e.taskChan.
		// e.taskChan serves as a pipeline, so fetching index and getting table data can
		// run concurrently.
		e.taskChan = make(chan *lookupTableTask, 50)
		go e.fetchHandles(idxResult, e.taskChan)
	}

	for {
		if e.taskCurr == nil {
			taskCurr, ok := <-e.taskChan
			if !ok {
				log.Debugf("[TIME_INDEX_TABLE_SCAN] time: %v", time.Since(startTs))
				return nil, e.tasksErr
			}
			e.taskCurr = taskCurr
		}

		row, err := e.taskCurr.getRow()
		if err != nil || row != nil {
			return row, errors.Trace(err)
		}
		e.taskCurr = nil
	}
}