Beispiel #1
0
func (c *client) watchLeader(leaderPath string, revision int64) {
	defer c.wg.Done()
WATCH:
	for {
		log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
		rch := c.etcdClient.Watch(context.Background(), leaderPath, clientv3.WithRev(revision))
		select {
		case resp := <-rch:
			if resp.Canceled {
				log.Warn("[pd] leader watcher canceled")
				continue WATCH
			}
			leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
			if err != nil {
				log.Warn(err)
				continue WATCH
			}
			log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
			c.workerMutex.Lock()
			c.worker.stop(errors.New("[pd] leader change"))
			c.worker = newRPCWorker(leaderAddr, c.clusterID)
			c.workerMutex.Unlock()
			revision = rev
		case <-c.quit:
			return
		}
	}
}
Beispiel #2
0
func (h *Handler) run() error {
	log.Infof("open listen address '%s' and start service", h.l.Addr())

	for {
		if nc, err := h.l.Accept(); err != nil {
			return errors.Trace(err)
		} else {
			h.counters.clientsAccepted.Add(1)
			go func() {
				h.counters.clients.Add(1)
				defer h.counters.clients.Sub(1)

				c := newConn(nc, h, h.config.ConnTimeout)

				log.Infof("new connection: %s", c)
				if err := c.serve(h); err != nil {
					if errors.Cause(err) == io.EOF {
						log.Infof("connection lost: %s [io.EOF]", c)
					} else {
						log.Warningf("connection lost: %s, err = %s", c, err)
					}
				} else {
					log.Infof("connection exit: %s", c)
				}
			}()
		}
	}
	return nil
}
Beispiel #3
0
func (s *Store) Reset() error {
	if err := s.acquire(); err != nil {
		return err
	}
	defer s.release()
	log.Infof("store is reseting...")
	for i := s.splist.Len(); i != 0; i-- {
		v := s.splist.Remove(s.splist.Front()).(*StoreSnapshot)
		v.Close()
	}
	for i := s.itlist.Len(); i != 0; i-- {
		v := s.itlist.Remove(s.itlist.Front()).(*storeIterator)
		v.Close()
	}
	if err := s.db.Clear(); err != nil {
		s.db.Close()
		s.db = nil
		log.Errorf("store reset failed - %s", err)
		return err
	} else {
		s.serial++
		log.Infof("store is reset")
		return nil
	}
}
Beispiel #4
0
// if no need full resync, returns false and sync offset
func (h *Handler) needFullReSync(c *conn, args [][]byte) (bool, int64) {
	masterRunID := args[0]

	if !bytes.EqualFold(masterRunID, h.runID) {
		if !bytes.Equal(masterRunID, []byte{'?'}) {
			log.Infof("Partial resynchronization not accepted, runid mismatch, server is %s, but client is %s", h.runID, masterRunID)
		} else {
			log.Infof("Full resync requested by slave.")
		}
		return true, 0
	}

	syncOffset, err := strconv.ParseInt(string(args[1]), 10, 64)
	if err != nil {
		log.Errorf("PSYNC parse sync offset err, try full resync - %s", err)
		return true, 0
	}

	r := &h.repl

	h.repl.RLock()
	defer h.repl.RUnlock()

	if r.backlogBuf == nil || syncOffset < r.backlogOffset ||
		syncOffset > (r.backlogOffset+int64(r.backlogBuf.Len())) {
		log.Infof("unable to partial resync with the slave for lack of backlog, slave offset %d", syncOffset)
		if syncOffset > r.masterOffset {
			log.Infof("slave tried to PSYNC with an offset %d larger than master offset %d", syncOffset, r.masterOffset)
		}

		return true, 0
	}

	return false, syncOffset
}
Beispiel #5
0
func (c *Customer) run() error {
	ticker := time.NewTicker(1 * time.Second)
	cnt := 0
	defer func() {
		ticker.Stop()
		wg.Done()
		log.Infof("Customer_%s QUIT succ", c.id)
	}()
	for {
		select {
		case n := <-c.recvCh:
			c.balance += n
		case <-ticker.C:
			cnt += 1
			if cnt > maxActionCnt {
				return nil
			}
			log.Infof("[Customer_%s] round %d", c.id, cnt)
			err := c.randomDo()
			if err != nil {
				log.Errorf("ERRORRRRR!!!!")
				return errors.Trace(err)
			}
		}
	}
}
Beispiel #6
0
func (w *GCWorker) start() {
	log.Infof("[gc worker] %s start.", w.uuid)
	ticker := time.NewTicker(gcWorkerTickInterval)
	for {
		select {
		case <-ticker.C:
			isLeader, err := w.checkLeader()
			if err != nil {
				log.Warnf("[gc worker] check leader err: %v", err)
				break
			}
			if isLeader {
				err = w.leaderTick()
				if err != nil {
					log.Warnf("[gc worker] leader tick err: %v", err)
				}
			}
		case err := <-w.done:
			w.gcIsRunning = false
			w.lastFinish = time.Now()
			if err != nil {
				log.Errorf("[gc worker] runGCJob error: %v", err)
				break
			}
		case <-w.quit:
			log.Infof("[gc worker] (%s) quit.", w.uuid)
			return
		}
	}
}
Beispiel #7
0
// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used
// infoschema version, if it is the same as the schema version at startTS, we don't need to reload again.
func (do *Domain) loadInfoSchema(handle *infoschema.Handle, usedSchemaVersion int64, startTS uint64) error {
	snapshot, err := do.store.GetSnapshot(kv.NewVersion(startTS))
	if err != nil {
		return errors.Trace(err)
	}
	m := meta.NewSnapshotMeta(snapshot)
	latestSchemaVersion, err := m.GetSchemaVersion()
	if err != nil {
		return errors.Trace(err)
	}
	if usedSchemaVersion != 0 && usedSchemaVersion == latestSchemaVersion {
		log.Debugf("[ddl] schema version is still %d, no need reload", usedSchemaVersion)
		return nil
	}
	ok, err := do.tryLoadSchemaDiffs(m, usedSchemaVersion, latestSchemaVersion)
	if err != nil {
		// We can fall back to full load, don't need to return the error.
		log.Errorf("[ddl] failed to load schema diff %v", err)
	}
	if ok {
		log.Infof("[ddl] diff load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
		return nil
	}
	schemas, err := do.getAllSchemasWithTablesFromMeta(m)
	if err != nil {
		return errors.Trace(err)
	}

	newISBuilder, err := infoschema.NewBuilder(handle).InitWithDBInfos(schemas, latestSchemaVersion)
	if err != nil {
		return errors.Trace(err)
	}
	log.Infof("[ddl] full load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
	return newISBuilder.Build()
}
Beispiel #8
0
func (c *Conf) CheckConfigUpdate() {
	if c.proxyConfig.Global.ConfAutoload == 1 {
		for {
			time.Sleep(time.Minute)
			log.Infof("CheckConfigUpdate checking")
			fileinfo, err := os.Stat(c.path)
			if err != nil {
				log.Errorf("CheckConfigUpdate error %s", err.Error())
				continue
			}
			//config been modified
			if c.lastModifiedTime.Before(fileinfo.ModTime()) {
				log.Infof("CheckConfigUpdate config change and load new config")
				defaultProxyConfig := getDefaultProxyConfig()
				err = c.parseConfigFile(defaultProxyConfig)
				if err != nil {
					log.Errorf("CheckConfigUpdate error %s", err.Error())
					continue
				}
				c.lastModifiedTime = fileinfo.ModTime()
				//goroutine need mutex lock
				c.mu.Lock()
				c.proxyConfig = defaultProxyConfig
				c.mu.Unlock()
				log.Infof("CheckConfigUpdate new config load success")
			}
		}

	}
}
Beispiel #9
0
func (d *ddl) doDDLJob(ctx context.Context, job *model.Job) error {
	// For every DDL, we must commit current transaction.
	if err := ctx.CommitTxn(); err != nil {
		return errors.Trace(err)
	}

	// Get a global job ID and put the DDL job in the queue.
	err := d.addDDLJob(ctx, job)
	if err != nil {
		return errors.Trace(err)
	}

	// Notice worker that we push a new job and wait the job done.
	asyncNotify(d.ddlJobCh)
	log.Infof("[ddl] start DDL job %s", job)

	var historyJob *model.Job
	jobID := job.ID
	// For a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
	// For every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
	ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second))
	startTime := time.Now()
	jobsGauge.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String()).Inc()
	defer func() {
		ticker.Stop()
		jobsGauge.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String()).Dec()
		retLabel := handleJobSucc
		if err != nil {
			retLabel = handleJobFailed
		}
		handleJobHistogram.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String(),
			retLabel).Observe(time.Since(startTime).Seconds())
	}()
	for {
		select {
		case <-d.ddlJobDoneCh:
		case <-ticker.C:
		}

		historyJob, err = d.getHistoryDDLJob(jobID)
		if err != nil {
			log.Errorf("[ddl] get history DDL job err %v, check again", err)
			continue
		} else if historyJob == nil {
			log.Warnf("[ddl] DDL job %d is not in history, maybe not run", jobID)
			continue
		}

		// If a job is a history job, the state must be JobDone or JobCancel.
		if historyJob.State == model.JobDone {
			log.Infof("[ddl] DDL job %d is finished", jobID)
			return nil
		}

		return errors.Trace(historyJob.Error)
	}
}
Beispiel #10
0
// execute executes the two-phase commit protocol.
func (c *twoPhaseCommitter) execute() error {
	ctx := context.Background()
	defer func() {
		// Always clean up all written keys if the txn does not commit.
		c.mu.RLock()
		writtenKeys := c.mu.writtenKeys
		committed := c.mu.committed
		c.mu.RUnlock()
		if !committed {
			go func() {
				err := c.cleanupKeys(NewBackoffer(cleanupMaxBackoff, ctx), writtenKeys)
				if err != nil {
					log.Infof("2PC cleanup err: %v, tid: %d", err, c.startTS)
				} else {
					log.Infof("2PC clean up done, tid: %d", c.startTS)
				}
			}()
		}
	}()

	binlogChan := c.prewriteBinlog()
	err := c.prewriteKeys(NewBackoffer(prewriteMaxBackoff, ctx), c.keys)
	if binlogChan != nil {
		binlogErr := <-binlogChan
		if binlogErr != nil {
			return errors.Trace(binlogErr)
		}
	}
	if err != nil {
		log.Warnf("2PC failed on prewrite: %v, tid: %d", err, c.startTS)
		return errors.Trace(err)
	}

	commitTS, err := c.store.getTimestampWithRetry(NewBackoffer(tsoMaxBackoff, ctx))
	if err != nil {
		log.Warnf("2PC get commitTS failed: %v, tid: %d", err, c.startTS)
		return errors.Trace(err)
	}
	c.commitTS = commitTS

	if c.store.oracle.IsExpired(c.startTS, maxTxnTimeUse) {
		err = errors.Errorf("txn takes too much time, start: %d, commit: %d", c.startTS, c.commitTS)
		return errors.Annotate(err, txnRetryableMark)
	}

	err = c.commitKeys(NewBackoffer(commitMaxBackoff, ctx), c.keys)
	if err != nil {
		if !c.mu.committed {
			log.Warnf("2PC failed on commit: %v, tid: %d", err, c.startTS)
			return errors.Trace(err)
		}
		log.Warnf("2PC succeed with error: %v, tid: %d", err, c.startTS)
	}
	return nil
}
Beispiel #11
0
// DoGC sends GC command to KV, it is exported for testing purpose.
func (w *GCWorker) DoGC(safePoint uint64) error {
	gcWorkerCounter.WithLabelValues("do_gc").Inc()

	req := &kvrpcpb.Request{
		Type: kvrpcpb.MessageType_CmdGC,
		CmdGcReq: &kvrpcpb.CmdGCRequest{
			SafePoint: safePoint,
		},
	}
	bo := NewBackoffer(gcMaxBackoff, goctx.Background())

	log.Infof("[gc worker] %s start gc, safePoint: %v.", w.uuid, safePoint)
	startTime := time.Now()
	regions := 0

	var key []byte
	for {
		select {
		case <-w.quit:
			return errors.New("[gc worker] gc job canceled")
		default:
		}

		loc, err := w.store.regionCache.LocateKey(bo, key)
		if err != nil {
			return errors.Trace(err)
		}
		resp, err := w.store.SendKVReq(bo, req, loc.Region, readTimeoutLong)
		if err != nil {
			return errors.Trace(err)
		}
		if regionErr := resp.GetRegionError(); regionErr != nil {
			err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
			if err != nil {
				return errors.Trace(err)
			}
			continue
		}
		gcResp := resp.GetCmdGcResp()
		if gcResp == nil {
			return errors.Trace(errBodyMissing)
		}
		if gcResp.GetError() != nil {
			return errors.Errorf("unexpected gc error: %s", gcResp.GetError())
		}
		regions++
		key = loc.EndKey
		if len(key) == 0 {
			break
		}
	}
	log.Infof("[gc worker] %s finish gc, safePoint: %v, regions: %v, cost time: %s", w.uuid, safePoint, regions, time.Since(startTime))
	gcHistogram.WithLabelValues("do_gc").Observe(time.Since(startTime).Seconds())
	return nil
}
Beispiel #12
0
// runDDLJob runs a DDL job.
func (d *ddl) runDDLJob(t *meta.Meta, job *model.Job) {
	log.Infof("[ddl] run DDL job %s", job)
	if job.IsFinished() {
		return
	}

	if job.State != model.JobRollback {
		job.State = model.JobRunning
	}

	var err error
	switch job.Type {
	case model.ActionCreateSchema:
		err = d.onCreateSchema(t, job)
	case model.ActionDropSchema:
		err = d.onDropSchema(t, job)
	case model.ActionCreateTable:
		err = d.onCreateTable(t, job)
	case model.ActionDropTable:
		err = d.onDropTable(t, job)
	case model.ActionAddColumn:
		err = d.onAddColumn(t, job)
	case model.ActionDropColumn:
		err = d.onDropColumn(t, job)
	case model.ActionModifyColumn:
		err = d.onModifyColumn(t, job)
	case model.ActionAddIndex:
		err = d.onCreateIndex(t, job)
	case model.ActionDropIndex:
		err = d.onDropIndex(t, job)
	case model.ActionAddForeignKey:
		err = d.onCreateForeignKey(t, job)
	case model.ActionDropForeignKey:
		err = d.onDropForeignKey(t, job)
	case model.ActionTruncateTable:
		err = d.onTruncateTable(t, job)
	default:
		// Invalid job, cancel it.
		job.State = model.JobCancelled
		err = errInvalidDDLJob.Gen("invalid ddl job %v", job)
	}

	// Save errors in job, so that others can know errors happened.
	if err != nil {
		// If job is not cancelled, we should log this error.
		if job.State != model.JobCancelled {
			log.Errorf("[ddl] run ddl job err %v", errors.ErrorStack(err))
		} else {
			log.Infof("[ddl] the job is normal to cancel because %v", errors.ErrorStack(err))
		}

		job.Error = toTError(err)
		job.ErrorCount++
	}
}
Beispiel #13
0
func logger(runmode string) macaron.Handler {
	return func(ctx *macaron.Context) {
		if runmode == "dev" {
			log.Debug("")
			log.Debug("----------------------------------------------------------------------------------")
		}

		log.Infof("[%s] [%s]", ctx.Req.Method, ctx.Req.RequestURI)
		log.Infof("[Header] %v", ctx.Req.Header)
	}
}
Beispiel #14
0
func (w *GCWorker) doGC(safePoint uint64) error {
	req := &kvrpcpb.Request{
		Type: kvrpcpb.MessageType_CmdGC,
		CmdGcReq: &kvrpcpb.CmdGCRequest{
			SafePoint: safePoint,
		},
	}
	bo := NewBackoffer(gcMaxBackoff)

	log.Infof("[gc worker] %s start gc, safePoint: %v.", w.uuid, safePoint)
	startTime := time.Now()
	regions := 0

	var key []byte
	for {
		select {
		case <-w.quit:
			return errors.New("[gc worker] gc job canceled")
		default:
		}

		region, err := w.store.regionCache.GetRegion(bo, key)
		if err != nil {
			return errors.Trace(err)
		}
		resp, err := w.store.SendKVReq(bo, req, region.VerID())
		if err != nil {
			return errors.Trace(err)
		}
		if regionErr := resp.GetRegionError(); regionErr != nil {
			err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
			if err != nil {
				return errors.Trace(err)
			}
			continue
		}
		gcResp := resp.GetCmdGcResp()
		if gcResp == nil {
			return errors.Trace(errBodyMissing)
		}
		if gcResp.GetError() != nil {
			return errors.Errorf("unexpected gc error: %s", gcResp.GetError())
		}
		regions++
		key = region.EndKey()
		if len(key) == 0 {
			break
		}
	}
	log.Infof("[gc worker] %s finish gc, safePoint: %v, regions: %v, cost time: %s", w.uuid, safePoint, regions, time.Now().Sub(startTime))
	return nil
}
Beispiel #15
0
func (s *Server) handleTopoEvent() {
	for {
		select {
		case r := <-s.reqCh:
			if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE {
				s.bufferedReq.PushBack(r)
				continue
			}

			for e := s.bufferedReq.Front(); e != nil; {
				next := e.Next()
				if s.dispatch(e.Value.(*PipelineRequest)) {
					s.bufferedReq.Remove(e)
				}
				e = next
			}

			if !s.dispatch(r) {
				log.Fatalf("should never happend, %+v, %+v", r, s.slots[r.slotIdx].slotInfo)
			}
		case e := <-s.evtbus:
			switch e.(type) {
			case *killEvent:
				s.handleMarkOffline()
				e.(*killEvent).done <- nil
			default:
				if s.top.IsSessionExpiredEvent(e) {
					log.Fatalf("session expired: %+v", e)
				}

				evtPath := GetEventPath(e)
				log.Infof("got event %s, %v, lastActionSeq %d", s.pi.ID, e, s.lastActionSeq)
				if strings.Index(evtPath, models.GetActionResponsePath(s.conf.ProductName)) == 0 {
					seq, err := strconv.Atoi(path.Base(evtPath))
					if err != nil {
						log.Warning(err)
					} else {
						if seq < s.lastActionSeq {
							log.Infof("ignore, lastActionSeq %d, seq %d", s.lastActionSeq, seq)
							continue
						}
					}

				}

				s.processAction(e)
			}
		}
	}
}
Beispiel #16
0
func (t *haTask) doFailover(s *models.Server) error {
	// first get all slaves
	group := models.NewServerGroup(globalEnv.ProductName(), s.GroupId)

	var err error
	group.Servers, err = group.GetServers(globalConn)
	if err != nil {
		return errors.Trace(err)
	}

	slaves := make([]*models.Server, 0, len(group.Servers))
	slaveAddrs := make([]string, 0, len(group.Servers))

	for _, s := range group.Servers {
		if s.Type == models.SERVER_TYPE_SLAVE {
			slaves = append(slaves, s)
			slaveAddrs = append(slaveAddrs, s.Addr)
		}
	}

	// elect a new master
	log.Infof("elect a new master in %v", slaveAddrs)
	addr, err := t.electNewMaster(slaves)
	if err != nil {
		return errors.Trace(err)
	}

	// prmote it as new master
	log.Infof("promote %s as the new master", addr)
	if err := group.Promote(globalConn, addr, globalEnv.StoreAuth()); err != nil {
		// should we fatal here and let human intervention ???
		return errors.Trace(err)
	}

	// let other slaves replicate from the new master
	for _, slave := range slaves {
		if slave.Addr == addr {
			continue
		}

		log.Infof("let %s slaveof new master %s", slave.Addr, addr)
		if err := utils.SlaveOf(slave.Addr, addr, globalEnv.StoreAuth()); err != nil {
			// should we fatal here and let human intervention ???
			return errors.Trace(err)
		}
	}

	return nil
}
Beispiel #17
0
func (s *Store) CompactAll() error {
	if err := s.acquire(); err != nil {
		return err
	}
	defer s.release()
	log.Infof("store is compacting all...")
	if err := s.compact([]byte{MetaCode}, []byte{MetaCode + 1}); err != nil {
		return err
	}
	if err := s.compact([]byte{DataCode}, []byte{DataCode + 1}); err != nil {
		return err
	}
	log.Infof("store is compacted")
	return nil
}
Beispiel #18
0
func main() {
	flag.Parse()

	oracle, err := server.NewTimestampOracle(*addr)
	if err != nil {
		log.Fatal(err)
	}

	go http.ListenAndServe(":5555", nil)

	sc := make(chan os.Signal, 1)
	signal.Notify(sc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)

	go func() {
		sig := <-sc
		log.Infof("Got signal [%d] to exit.", sig)
		oracle.Close()
		os.Exit(0)
	}()

	oracle.Run()
}
Beispiel #19
0
// Alloc allocs the next autoID for table with tableID.
// It gets a batch of autoIDs at a time. So it does not need to access storage for each call.
func (alloc *allocator) Alloc(tableID int64) (int64, error) {
	if tableID == 0 {
		return 0, errors.New("Invalid tableID")
	}
	metaKey := meta.AutoIDKey(tableID)
	alloc.mu.Lock()
	defer alloc.mu.Unlock()
	if alloc.base == alloc.end { // step
		err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
			end, err := meta.GenID(txn, []byte(metaKey), step)
			if err != nil {
				return errors.Trace(err)
			}

			alloc.end = end
			alloc.base = alloc.end - step
			return nil
		})

		if err != nil {
			return 0, errors.Trace(err)
		}
	}

	alloc.base++
	log.Infof("Alloc id %d, table ID:%d, from %p, store ID:%s", alloc.base, tableID, alloc, alloc.store.UUID())
	return alloc.base, nil
}
Beispiel #20
0
func (d *ddl) CreateTable(ctx context.Context, ident table.Ident, colDefs []*coldef.ColumnDef, constraints []*coldef.TableConstraint) (err error) {
	is := d.GetInformationSchema()
	if !is.SchemaExists(ident.Schema) {
		return errors.Trace(qerror.ErrDatabaseNotExist)
	}
	if is.TableExists(ident.Schema, ident.Name) {
		return errors.Trace(ErrExists)
	}
	if err = checkDuplicateColumn(colDefs); err != nil {
		return errors.Trace(err)
	}

	cols, newConstraints, err := d.buildColumnsAndConstraints(colDefs, constraints)
	if err != nil {
		return errors.Trace(err)
	}

	tbInfo, err := d.buildTableInfo(ident.Name, cols, newConstraints)
	if err != nil {
		return errors.Trace(err)
	}
	log.Infof("New table: %+v", tbInfo)
	err = d.updateInfoSchema(ctx, ident.Schema, tbInfo)
	return errors.Trace(err)
}
Beispiel #21
0
// If forceNew is true, GetTxn() must return a new transaction.
// In this situation, if current transaction is still in progress,
// there will be an implicit commit and create a new transaction.
func (s *session) GetTxn(forceNew bool) (kv.Transaction, error) {
	var err error
	if s.txn == nil {
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, err
		}

		log.Infof("New txn:%s in session:%d", s.txn, s.sid)
		return s.txn, nil
	}
	if forceNew {
		err = s.txn.Commit()
		if err != nil {
			return nil, err
		}
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, err
		}
		log.Warnf("Force new txn:%s in session:%d", s.txn, s.sid)
	}
	return s.txn, nil
}
Beispiel #22
0
// If forceNew is true, GetTxn() must return a new transaction.
// In this situation, if current transaction is still in progress,
// there will be an implicit commit and create a new transaction.
func (s *session) GetTxn(forceNew bool) (kv.Transaction, error) {
	var err error
	if s.txn == nil {
		s.resetHistory()
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !s.isAutocommit(s) {
			variable.GetSessionVars(s).SetStatusFlag(mysql.ServerStatusInTrans, true)
		}
		log.Infof("New txn:%s in session:%d", s.txn, s.sid)
		return s.txn, nil
	}
	if forceNew {
		err = s.FinishTxn(false)
		if err != nil {
			return nil, errors.Trace(err)
		}
		s.txn, err = s.store.Begin()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !s.isAutocommit(s) {
			variable.GetSessionVars(s).SetStatusFlag(mysql.ServerStatusInTrans, true)
		}
		log.Warnf("Force new txn:%s in session:%d", s.txn, s.sid)
	}
	return s.txn, nil
}
Beispiel #23
0
// Alloc allocs the next autoID for table with tableID.
// It gets a batch of autoIDs at a time. So it does not need to access storage for each call.
func (alloc *allocator) Alloc(tableID int64) (int64, error) {
	if tableID == 0 {
		return 0, errors.New("Invalid tableID")
	}
	alloc.mu.Lock()
	defer alloc.mu.Unlock()
	if alloc.base == alloc.end { // step
		err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
			m := meta.NewMeta(txn)
			// err1 is used for passing `go tool vet --shadow` check.
			end, err1 := m.GenAutoTableID(alloc.dbID, tableID, step)
			if err1 != nil {
				return errors.Trace(err1)
			}

			alloc.end = end
			alloc.base = alloc.end - step
			return nil
		})

		if err != nil {
			return 0, errors.Trace(err)
		}
	}

	alloc.base++
	log.Infof("Alloc id %d, table ID:%d, from %p, database ID:%d", alloc.base, tableID, alloc, alloc.dbID)
	return alloc.base, nil
}
Beispiel #24
0
func newHandler(c *Config, s *store.Store) (*Handler, error) {
	h := &Handler{
		config:       c,
		master:       make(chan *conn, 0),
		slaveofReply: make(chan struct{}, 1),
		signal:       make(chan int, 0),
		store:        s,
		bgSaveSem:    sync2.NewSemaphore(1),
		conns:        make(map[*conn]struct{}),
	}

	h.runID = make([]byte, 40)
	getRandomHex(h.runID)
	log.Infof("server runid is %s", h.runID)

	l, err := net.Listen("tcp", h.config.Listen)
	if err != nil {
		return nil, errors.Trace(err)
	}
	h.l = l

	if err = h.initReplication(s); err != nil {
		h.close()
		return nil, errors.Trace(err)
	}

	h.htable = globalCommands

	go h.daemonSyncMaster()

	return h, nil
}
Beispiel #25
0
func NewEtcdConn(zkAddr string, zkSessionTimeout int) (Conn, error) {
	singleInstanceLock.Lock()
	defer singleInstanceLock.Unlock()
	if etcdInstance != nil {
		return etcdInstance, nil
	}

	p := pools.NewResourcePool(func() (pools.Resource, error) {
		cluster := strings.Split(zkAddr, ",")
		for i, addr := range cluster {
			if !strings.HasPrefix(addr, "http://") {
				cluster[i] = "http://" + addr
			}
		}
		return &PooledEtcdClient{c: etcd.NewClient(cluster)}, nil
	}, 10, 10, 0)

	etcdInstance = &etcdImpl{
		cluster:  zkAddr,
		pool:     p,
		indexMap: make(map[string]uint64),
	}

	log.Infof("new etcd %s", zkAddr)
	if etcdInstance == nil {
		return nil, errors.New("unknown error")
	}

	return etcdInstance, nil
}
Beispiel #26
0
func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error {
	sessVars := e.ctx.GetSessionVars()
	log.Infof("[%d] execute rollback statement", sessVars.ConnectionID)
	err := e.ctx.RollbackTxn()
	sessVars.SetStatusFlag(mysql.ServerStatusInTrans, false)
	return errors.Trace(err)
}
Beispiel #27
0
func init() {
	poolmap.m = make(map[string]*list.List)
	go func() {
		for {
			time.Sleep(time.Second)
			poolmap.Lock()
			for addr, pool := range poolmap.m {
				for i := pool.Len(); i != 0; i-- {
					c := pool.Remove(pool.Front()).(*conn)
					if time.Now().Before(c.last.Add(maxConnIdleTime)) {
						pool.PushBack(c)
					} else {
						c.sock.Close()
						log.Infof("close connection %s : %s", addr, c)
					}
				}
				if pool.Len() != 0 {
					continue
				}
				delete(poolmap.m, addr)
			}
			poolmap.Unlock()
		}
	}()
}
Beispiel #28
0
// NewClient creates a PD client.
func NewClient(pdAddrs []string, clusterID uint64) (Client, error) {
	log.Infof("[pd] create pd client with endpoints %v", pdAddrs)
	client := &client{
		worker: newRPCWorker(pdAddrs, clusterID),
	}
	return client, nil
}
Beispiel #29
0
// NewServer creates a new Server.
func NewServer(cfg *Config, driver IDriver) (*Server, error) {
	s := &Server{
		cfg:               cfg,
		driver:            driver,
		concurrentLimiter: NewTokenLimiter(100),
		rwlock:            &sync.RWMutex{},
		clients:           make(map[uint32]*clientConn),
	}

	var err error
	if cfg.Socket != "" {
		cfg.SkipAuth = true
		s.listener, err = net.Listen("unix", cfg.Socket)
	} else {
		s.listener, err = net.Listen("tcp", s.cfg.Addr)
	}
	if err != nil {
		return nil, errors.Trace(err)
	}

	// Init rand seed for randomBuf()
	rand.Seed(time.Now().UTC().UnixNano())
	log.Infof("Server run MySql Protocol Listen at [%s]", s.cfg.Addr)
	return s, nil
}
Beispiel #30
0
func (s *Server) startStatusHTTP() {
	once.Do(func() {
		go func() {
			http.HandleFunc("/status", func(w http.ResponseWriter, req *http.Request) {
				w.Header().Set("Content-Type", "application/json")
				s := status{
					Connections: s.ConnectionCount(),
					Version:     mysql.ServerVersion,
					GitHash:     printer.TiDBGitHash,
				}
				js, err := json.Marshal(s)
				if err != nil {
					w.WriteHeader(http.StatusInternalServerError)
					log.Error("Encode json error", err)
				} else {
					w.Write(js)
				}

			})
			// HTTP path for prometheus.
			http.Handle("/metrics", prometheus.Handler())
			addr := s.cfg.StatusAddr
			if len(addr) == 0 {
				addr = defaultStatusAddr
			}
			log.Infof("Listening on %v for status and metrics report.", addr)
			err := http.ListenAndServe(addr, nil)
			if err != nil {
				log.Fatal(err)
			}
		}()
	})
}