Esempio n. 1
0
func (tso *TimestampOracle) handleConnection(s *session) {
	defer func() {
		tso.connLock.Lock()
		delete(tso.conns, s.conn)
		tso.connLock.Unlock()

		s.conn.Close()
		tso.wg.Done()
	}()

	var buf [1]byte

	for {
		_, err := s.r.Read(buf[:])
		if err != nil {
			log.Warn(err)
			return
		}

		resp := tso.getRespTS()
		resp.Encode(s.w)
		if s.r.Buffered() <= 0 {
			err = s.w.Flush()
			if err != nil {
				log.Warn(err)
				return
			}
		}
	}
}
Esempio n. 2
0
func mustNewConn(urls []string, quit chan struct{}) *conn {
	for {
		conn, err := rpcConnectLeader(urls)
		if err == nil {
			return newConn(conn)
		}
		log.Warn(err)

		conn, err = rpcConnect(urls)
		if err == nil {
			c := newConn(conn)
			c.wg.Add(1)
			go c.connectLeader(urls, reconnectPDTimeout)
			return c
		}
		log.Warn(err)

		select {
		case <-time.After(connectPDTimeout):
			break
		case <-quit:
			return nil
		}
	}
}
Esempio n. 3
0
File: ds.go Progetto: qgweb/new
func (this *NSQDataStream) HandleMessage(message *nsq.Message) error {
	data := string(message.Body)
	if data == "" {
		log.Warn("数据丢失")
		return nil
	}

	data = encrypt.GetEnDecoder(encrypt.TYPE_BASE64).Decode(data)

	urlData, err := url.ParseQuery(data)
	if err != nil {
		log.Warn("解析数据失败")
		return nil
	}

	//数据存放在队列中
	if urlData.Get("date") != time.Now().Format("2006-01-02") {
		return nil
	}

	this.Dispatch(urlData)

	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	_ = time.Millisecond * time.Duration(r.Intn(1000)+200)
	time.Sleep(time.Microsecond)
	return nil
}
Esempio n. 4
0
func (tso *TimestampOracle) handleConnection(s *session) {
	defer func() {
		tso.closeConn(s.conn)

		tso.wg.Done()
	}()

	var buf [1]byte

	for {
		_, err := s.r.Read(buf[:])
		if err != nil {
			log.Warn(err)
			return
		}

		resp := tso.getRespTS()
		if resp == nil {
			log.Errorf("get repsone timestamp timeout, close %v", s.conn.RemoteAddr())
			return
		}
		resp.Encode(s.w)
		if s.r.Buffered() <= 0 {
			err = s.w.Flush()
			if err != nil {
				log.Warn(err)
				return
			}
		}
	}
}
Esempio n. 5
0
func (c *client) watchLeader(leaderPath string, revision int64) {
	defer c.wg.Done()
WATCH:
	for {
		log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
		rch := c.etcdClient.Watch(context.Background(), leaderPath, clientv3.WithRev(revision))
		select {
		case resp := <-rch:
			if resp.Canceled {
				log.Warn("[pd] leader watcher canceled")
				continue WATCH
			}
			leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
			if err != nil {
				log.Warn(err)
				continue WATCH
			}
			log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
			c.workerMutex.Lock()
			c.worker.stop(errors.New("[pd] leader change"))
			c.worker = newRPCWorker(leaderAddr, c.clusterID)
			c.workerMutex.Unlock()
			revision = rev
		case <-c.quit:
			return
		}
	}
}
Esempio n. 6
0
func (s *TransactionTestSuit) TestAsyncCommit(c *C) {
	conf := defaultTxnConf
	conf.brokenCommitSecondaryTest = true

	tx := newTxn(s.cli, conf)
	// simulating broken commit
	for i := 0; i < 10; i++ {
		p := hbase.NewPut([]byte(fmt.Sprintf("test_%d", i)))
		p.AddValue(cf, q, []byte(fmt.Sprintf("%d", tx.(*themisTxn).GetStartTS())))
		tx.Put(themisTestTableName, p)
	}
	err := tx.Commit()
	c.Assert(err, Equals, nil)

	//  wait until lock expired.
	log.Warn("Wait for lock expired. Sleep...")
	tick := 6
	for tick > 0 {
		time.Sleep(1 * time.Second)
		tick--
		log.Infof("remain %ds...", tick)
	}

	log.Warn("Try commit again")
	// new transction will not see lock
	for {
		tx = newTxn(s.cli, defaultTxnConf)
		for i := 0; i < 5; i++ {
			p := hbase.NewPut([]byte(fmt.Sprintf("test_%d", i)))
			p.AddValue(cf, q, []byte(fmt.Sprintf("%d", tx.(*themisTxn).GetStartTS())))
			tx.Put(themisTestTableName, p)
		}
		err = tx.Commit()
		if err == nil || !errorEqual(err, ErrRetryable) {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}
	c.Assert(err, Equals, nil)

	for {
		tx = newTxn(s.cli, defaultTxnConf)
		for i := 5; i < 10; i++ {
			p := hbase.NewPut([]byte(fmt.Sprintf("test_%d", i)))
			p.AddValue(cf, q, []byte(fmt.Sprintf("%d", tx.(*themisTxn).GetStartTS())))
			tx.Put(themisTestTableName, p)
		}
		err = tx.Commit()
		if err == nil || !errorEqual(err, ErrRetryable) {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}
	c.Assert(err, Equals, nil)
}
Esempio n. 7
0
func (tso *TimestampOracle) updateTimestamp() error {
	prev := tso.ts.Load().(*atomicObject)
	now := time.Now()

	// ms
	since := now.Sub(prev.physical).Nanoseconds() / 1e6
	if since > 2*updateTimestampStep {
		log.Warnf("clock offset: %v, prev: %v, now %v", since, prev.physical, now)
	}
	// Avoid the same physical time stamp
	if since <= 0 {
		log.Warn("invalid physical time stamp, re-update later again")
		return nil
	}

	if now.Sub(tso.lastSavedTime).Nanoseconds()/1e6 > tso.cfg.SaveInterval {
		if err := tso.saveTimestamp(now); err != nil {
			return errors.Trace(err)
		}
	}

	current := &atomicObject{
		physical: now,
	}
	tso.ts.Store(current)

	return nil
}
Esempio n. 8
0
func (txn *themisTxn) batchCommitSecondary(wait bool) error {
	//will batch commit all rows in a region
	rsRowMap, err := txn.groupByRegion()
	if err != nil {
		return errors.Trace(err)
	}

	wg := sync.WaitGroup{}
	for _, regionRowMap := range rsRowMap {
		wg.Add(1)
		_, firstRowM := getFirstEntity(regionRowMap)
		go func(cli *themisRPC, tbl string, rMap map[string]*rowMutation, startTs, commitTs uint64) {
			defer wg.Done()
			err := cli.batchCommitSecondaryRows([]byte(tbl), rMap, startTs, commitTs)
			if err != nil {
				// fail of secondary commit will not stop the commits of next
				// secondaries
				if isWrongRegionErr(err) {
					txn.client.CleanAllRegionCache()
					log.Warn("region info outdated when committing secondary rows, don't panic")
				}
			}
		}(txn.rpc, string(firstRowM.tbl), regionRowMap, txn.startTs, txn.commitTs)
	}
	if wait {
		wg.Wait()
	}
	return nil
}
Esempio n. 9
0
func NewServer(cfg *config.Conf) (*Server, error) {
	s := new(Server)

	s.cfg = cfg

	var err error

	s.fingerprints = make(map[string]*LimitReqNode)
	// s.users = make(map[string]*User)
	// s.qpsOnServer = &LimitReqNode{}
	s.mu = &sync.Mutex{}
	s.restart = false
	port := s.cfg.GetConfig().Global.Port

	// get listenfd from file when restart
	if os.Getenv("_GRACEFUL_RESTART") == "true" {
		log.Info("graceful restart with previous listenfd")

		//get the linstenfd
		file := os.NewFile(3, "")
		s.listener, err = net.FileListener(file)
		if err != nil {
			log.Warn("get linstener err ")
		}

	} else {
		s.listener, err = net.Listen("tcp4", fmt.Sprintf(":%d", port))
	}
	if err != nil {
		return nil, err
	}

	log.Infof("Dbatman Listen(tcp4) at [%d]", port)
	return s, nil
}
Esempio n. 10
0
func extractLockInfoFromKeyErr(keyErr *pb.KeyError) (*pb.LockInfo, error) {
	if locked := keyErr.GetLocked(); locked != nil {
		return locked, nil
	}
	if keyErr.Retryable != nil {
		err := errors.Errorf("tikv restarts txn: %s", keyErr.GetRetryable())
		log.Warn(err)
		return nil, errors.Annotate(err, txnRetryableMark)
	}
	if keyErr.Abort != nil {
		err := errors.Errorf("tikv aborts txn: %s", keyErr.GetAbort())
		log.Warn(err)
		return nil, errors.Trace(err)
	}
	return nil, errors.Errorf("unexpected KeyError: %s", keyErr.String())
}
Esempio n. 11
0
func (tso *TimestampOracle) updateTicker() {
	// TODO: save latest TS to persistent storage(zookeeper/etcd...)
	for {
		select {
		case <-tso.ticker.C:
			prev := tso.ts.Load().(*atomicObject)
			now := time.Now()

			// ms
			since := now.Sub(prev.physical).Nanoseconds() / 1e6
			if since > 2*updateTimtestampStep {
				log.Warnf("clock offset: %v, prev:%v, now %v", since, prev.physical, now)
			}
			// Avoid the same physical time stamp
			if since <= 0 {
				log.Warn("invalid physical time stamp")
				continue
			}

			current := &atomicObject{
				physical: now,
			}
			tso.ts.Store(current)
		}
	}
}
Esempio n. 12
0
func (session *Session) Run() error {

	for {

		data, err := session.fc.ReadPacket()

		if err != nil {
			log.Warn(err)
			return err
		}

		if data[0] == ComQuit {
			return errSessionQuit
		}

		if err := session.dispatch(data); err != nil {
			if err == driver.ErrBadConn {
				// TODO handle error
			}

			log.Warnf("dispatch error: %s", err.Error())
			return err
		}

		session.fc.ResetSequence()

		if session.closed {
			// TODO return MySQL Go Away ?
			return errors.New("session closed!")
		}
	}

	return nil
}
Esempio n. 13
0
File: ds.go Progetto: qgweb/new
func (this *NSQDataStream) Dispatch(data url.Values) {
	var (
		uidsAry  = strings.Split(data.Get("uids"), ",")
		info     = make(map[string]interface{})
		ginfoAry = this.GrabData(uidsAry)
	)

	data.Set("ua", encrypt.DefaultBase64.Decode(data.Get("ua")))

	for k, _ := range data {
		info[k] = data.Get(k)
	}

	if len(ginfoAry) == 0 {
		return
	}

	info["ginfos"] = ginfoAry

	j, err := json.Marshal(&info)
	if err != nil {
		log.Warn(err)
		return
	}

	go this.Save(j)
}
Esempio n. 14
0
func (c *Client) cleanupPending(err error) {
	log.Warn(err)
	length := c.pending.Len()
	for i := 0; i < length; i++ {
		e := c.pending.Front()
		c.pending.Remove(e)
		e.Value.(*PipelineRequest).MarkDone(nil, err)
	}
}
Esempio n. 15
0
func (c *Client) workerLoop() {
	for {
		err := c.do()
		if err != nil {
			log.Warn(err)
		}
		time.Sleep(time.Second)
	}
}
Esempio n. 16
0
File: ds.go Progetto: qgweb/new
func (this *NSQDataStream) Save(data []byte) {
	err := this.nsqproducer.Ping()
	if err != nil {
		log.Warn("无法和nsq通讯,错误信息为:", err)
		return
	}

	err = this.nsqproducer.Publish(this.config.SendKey, data)
	if err != nil {
		log.Warn("推送数据失败,错误信息为:", err)
	}

	//数据冗余处理(临时)
	err = this.nsqproducer.Publish(this.config.SendKey+"_es", data)
	if err != nil {
		log.Warn("推送数据失败,错误信息为:", err)
	}
}
Esempio n. 17
0
func (s *TransactionTestSuit) SetUpSuite(c *C) {
	var err error
	s.cli, err = createHBaseClient()
	c.Assert(err, Equals, nil)

	log.Warn("new test, reset tables")
	err = createNewTableAndDropOldTable(s.cli, themisTestTableName, string(cf), nil)
	c.Assert(err, IsNil)
}
Esempio n. 18
0
func (c *client) watchLeader(leaderPath string, revision int64) {
	defer c.wg.Done()

	for {
		log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
		ctx, cancel := context.WithTimeout(c.etcdClient.Ctx(), defaultWatchLeaderTimeout)
		rch := c.etcdClient.Watch(ctx, leaderPath, clientv3.WithRev(revision))

		for resp := range rch {
			if resp.Canceled {
				log.Warn("[pd] leader watcher canceled")
				break
			}

			// We don't watch any changed, no need to check leader again.
			if len(resp.Events) == 0 {
				break
			}

			leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
			if err != nil {
				log.Warn(err)
				break
			}

			log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
			c.workerMutex.Lock()
			c.worker.stop(errors.New("[pd] leader change"))
			c.worker = newRPCWorker(leaderAddr, c.clusterID)
			c.workerMutex.Unlock()
			revision = rev
		}

		cancel()

		select {
		case <-c.quit:
			return
		default:
		}
	}
}
Esempio n. 19
0
File: sh.go Progetto: qgweb/new
func NewShPut() *ShPut {
	var sh = &ShPut{}
	sh.kf = dbfactory.NewKVFile(fmt.Sprintf("./%s.txt", convert.ToString(time.Now().Unix())))
	sh.putTags = make(map[string]map[string]int)
	sh.Timestamp = timestamp.GetDayTimestamp(-1)
	sh.initPutAdverts()
	sh.initPutTags("TAGS_3*", "tb_", "mg_")
	sh.initPutTags("TAGS_5*", "url_", "")
	log.Warn(sh.putAdverts)
	return sh
}
Esempio n. 20
0
func (c *rpcClient) doSend(conn net.Conn, msg *msgpb.Message) error {
	curMsgID := atomic.AddUint64(&c.msgID, 1)
	log.Debugf("Send request msgID[%d] type[%v]", curMsgID, msg.GetMsgType())
	if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
		log.Warn("Set write deadline failed, it may be blocked.")
	}
	if err := util.WriteMessage(conn, curMsgID, msg); err != nil {
		return errors.Trace(err)
	}
	if err := conn.SetReadDeadline(time.Now().Add(readTimeout)); err != nil {
		log.Warn("Set read deadline failed, it may be blocked.")
	}
	msgID, err := util.ReadMessage(conn, msg)
	if err != nil {
		return errors.Trace(err)
	}
	if curMsgID != msgID {
		log.Errorf("Sent msgID[%d] mismatches recv msgID[%d]", curMsgID, msgID)
	}
	log.Debugf("Receive response msgID[%d] type[%v]", msgID, msg.GetMsgType())
	return nil
}
Esempio n. 21
0
File: main.go Progetto: disksing/tso
func (tso *TimestampOracle) handleConnection(s *session) {
	var buf [1]byte
	defer s.conn.Close()
	resp := &proto.Response{}
	for {
		_, err := s.r.Read(buf[:])
		if err != nil {
			log.Warn(err)
			return
		}
		prev := tso.ts.Load().(*atomicObject)
		resp.Physical = int64(prev.physical.UnixNano()) / 1e6
		resp.Logical = atomic.AddInt64(&prev.logical, 1)
		binary.Write(s.w, binary.BigEndian, resp)
		if s.r.Buffered() <= 0 {
			err = s.w.Flush()
			if err != nil {
				log.Warn(err)
				return
			}
		}
	}
}
Esempio n. 22
0
File: ds.go Progetto: qgweb/new
func (this *NSQDataStream) Receive() {
	cfg := nsq.NewConfig()
	cus, err := nsq.NewConsumer(this.config.ReceiveKey, this.config.ReceiveKey, cfg)
	if err != nil {
		log.Fatal("连接nsq失败")
	}

	cus.AddHandler(this)
	log.Warn(cus.ConnectToNSQD(fmt.Sprintf("%s:%s", this.config.NsqHost, this.config.NsqPort)))
	select {
	case <-cus.StopChan:
		return
	}
}
Esempio n. 23
0
File: ddl.go Progetto: H0bby/tidb
func (d *ddl) writeSchemaInfo(info *model.DBInfo, txn kv.Transaction) error {
	var b []byte
	b, err := json.Marshal(info)
	if err != nil {
		return errors.Trace(err)
	}
	key := []byte(meta.DBMetaKey(info.ID))
	if err := txn.LockKeys(key); err != nil {
		return errors.Trace(err)
	}
	txn.Set(key, b)
	log.Warn("save schema", string(b))

	return errors.Trace(err)
}
Esempio n. 24
0
func (c *Client) cleanupPending(err error) {
	log.Warn(err)
	length := c.pending.Len()
	for i := 0; i < length; i++ {
		e := c.pending.Front()
		c.pending.Remove(e)
		e.Value.(*PipelineRequest).MarkDone(nil, err)
	}

	// clear request in channel too
	length = len(c.requests)
	for i := 0; i < length; i++ {
		req := <-c.requests
		req.MarkDone(nil, err)
	}
}
Esempio n. 25
0
func (txn *themisTxn) brokenPrewriteSecondary() error {
	log.Warn("Simulating prewrite secondary failed")
	for i, rm := range txn.secondaryRows {
		if i == len(txn.secondary)-1 {
			if !txn.conf.brokenPrewriteSecondaryAndRollbackTest {
				// simulating prewrite failed, need rollback
				txn.rollbackRow(txn.primaryRow.tbl, txn.primaryRow)
				txn.rollbackSecondaryRow(i)
			}
			// maybe rollback occurs error too
			return ErrSimulated
		}
		txn.prewriteRowWithLockClean(rm.tbl, rm, false)
	}
	return nil
}
Esempio n. 26
0
func (d *ddl) writeSchemaInfo(info *model.DBInfo) error {
	var b []byte
	b, err := json.Marshal(info)
	if err != nil {
		return errors.Trace(err)
	}
	err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
		key := []byte(meta.DBMetaKey(info.ID))
		if err := txn.LockKeys(key); err != nil {
			return errors.Trace(err)
		}
		return txn.Set(key, b)
	})
	log.Warn("save schema", string(b))
	return errors.Trace(err)
}
Esempio n. 27
0
func (txn *themisTxn) batchPrewriteSecondaries() error {
	wg := sync.WaitGroup{}
	//will batch prewrite all rows in a region
	rsRowMap := txn.groupByRegion()

	errChan := make(chan error, len(rsRowMap))
	defer close(errChan)
	successChan := make(chan map[string]*rowMutation, len(rsRowMap))
	defer close(successChan)

	for _, regionRowMap := range rsRowMap {
		wg.Add(1)
		_, firstRowM := getFirstEntity(regionRowMap)
		go func(tbl []byte, rMap map[string]*rowMutation) {
			defer wg.Done()
			err := txn.batchPrewriteSecondaryRowsWithLockClean(tbl, rMap)
			if err != nil {
				errChan <- err
			} else {
				successChan <- rMap
			}
		}(firstRowM.tbl, regionRowMap)
	}
	wg.Wait()

	if len(errChan) != 0 {
		// occur error, clean success prewrite mutations
		log.Warn("batch prewrite secondary rows error, rolling back", len(successChan))
		txn.rollbackRow(txn.primaryRow.tbl, txn.primaryRow)
	L:
		for {
			select {
			case succMutMap := <-successChan:
				{
					for _, rowMut := range succMutMap {
						txn.rollbackRow(rowMut.tbl, rowMut)
					}
				}
			default:
				break L
			}
		}

		return <-errChan
	}
	return nil
}
Esempio n. 28
0
func (txn *themisTxn) Commit() error {
	if txn.mutationCache.getMutationCount() == 0 {
		return nil
	}
	if txn.mutationCache.getRowCount() > txn.conf.MaxRowsInOneTxn {
		return ErrTooManyRows
	}

	txn.selectPrimaryAndSecondaries()
	err := txn.prewritePrimary()
	if err != nil {
		// no need to check wrong region here, hbase client will retry when
		// occurs single row NotInRegion error.
		log.Error(errors.ErrorStack(err))
		// it's safe to retry, because this transaction is not committed.
		return ErrRetryable
	}

	err = txn.prewriteSecondary()
	if err != nil {
		if isWrongRegionErr(err) {
			log.Warn("region info outdated")
			// reset hbase client buffered region info
			txn.client.CleanAllRegionCache()
		}
		return ErrRetryable
	}

	txn.commitTs, err = txn.oracle.GetTimestamp()
	if err != nil {
		log.Error(errors.ErrorStack(err))
		return ErrRetryable
	}
	err = txn.commitPrimary()
	if err != nil {
		// commit primary error, rollback
		log.Error("commit primary row failed", txn.startTs, err)
		txn.rollbackRow(txn.primaryRow.tbl, txn.primaryRow)
		txn.rollbackSecondaryRow(len(txn.secondaryRows) - 1)
		return ErrRetryable
	}
	txn.commitSecondary()
	log.Debug("themis txn commit successfully", txn.startTs, txn.commitTs)
	return nil
}
Esempio n. 29
0
File: conn.go Progetto: yzl11/vessel
func (c *connection) init() error {
	err := c.writeHead()
	if err != nil {
		return err
	}
	err = c.writeConnectionHeader()
	if err != nil {
		return err
	}
	go func() {
		err := c.processMessages()
		if err != nil {
			log.Warn(err)
			return
		}
	}()
	go c.dispatch()
	return nil
}
Esempio n. 30
0
func (txn *themisTxn) rollbackRow(tbl []byte, mutation *rowMutation) error {
	l := fmt.Sprintf("\nrolling back %q %d {\n", mutation.row, txn.startTs)
	for _, v := range mutation.getColumns() {
		l += fmt.Sprintf("\t%s:%s\n", string(v.Family), string(v.Qual))
	}
	l += "}\n"
	log.Warn(l)
	for _, col := range mutation.getColumns() {
		cc := &hbase.ColumnCoordinate{
			Table:  tbl,
			Row:    mutation.row,
			Column: col,
		}
		err := txn.lockCleaner.EraseLockAndData(cc, txn.startTs)
		if err != nil {
			return errors.Trace(err)
		}
	}
	return nil
}