Пример #1
0
func (c *Session) handleSet(stmt *parser.Set, sql string) error {
	if len(stmt.VarList) < 1 {
		return fmt.Errorf("must set one item at least")
	}

	var err error
	for _, v := range stmt.VarList {
		if strings.ToUpper(v.Name) == "AUTOCOMMIT" {
			log.Debug("handle autocommit")
			err = c.handleSetAutoCommit(v.Value) //??
		}
	}

	if err != nil {
		return err
	}

	defer func() {
		//only execute when the autocommit 0->1 //clear
		if c.autoCommit == 1 {
			log.Debug("clear autocommit tx")
			c.clearAutoCommitTx()
		}

	}()
	return c.handleOtherSet(stmt, sql)
}
Пример #2
0
func (s *Server) Serve() error {
	log.Debug("this is ddbatman v4")
	s.running = true
	var sessionId int64 = 0
	for s.running {
		select {
		case sessionChan <- sessionId:
			//do nothing
		default:
			//warnning!
			log.Warnf("TASK_CHANNEL is full!")
		}

		conn, err := s.Accept()
		if err != nil {
			log.Warning("accept error %s", err.Error())
			continue
		}
		//allocate a sessionId for a session
		go s.onConn(conn)
		sessionId += 1
	}
	if s.restart == true {
		log.Debug("Begin to restart graceful")
		listenerFile, err := s.listener.(*net.TCPListener).File()
		if err != nil {
			log.Fatal("Fail to get socket file descriptor:", err)
		}
		listenerFd := listenerFile.Fd()

		os.Setenv("_GRACEFUL_RESTART", "true")
		execSpec := &syscall.ProcAttr{
			Env:   os.Environ(),
			Files: []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd(), listenerFd},
		}
		fork, err := syscall.ForkExec(os.Args[0], os.Args, execSpec)
		if err != nil {
			return fmt.Errorf("failed to forkexec: %v", err)
		}

		log.Infof("start new process success, pid %d.", fork)
	}
	timeout := time.NewTimer(time.Minute)
	wait := make(chan struct{})
	go func() {
		s.wg.Wait()
		wait <- struct{}{}
	}()

	select {
	case <-timeout.C:
		log.Error("server : Waittimeout error when close the service")
		return nil
	case <-wait:
		log.Info("server : all goroutine has been done")
		return nil
	}
	return nil
}
Пример #3
0
func logger(runmode string) macaron.Handler {
	return func(ctx *macaron.Context) {
		if runmode == "dev" {
			log.Debug("")
			log.Debug("----------------------------------------------------------------------------------")
		}

		log.Infof("[%s] [%s]", ctx.Req.Method, ctx.Req.RequestURI)
		log.Infof("[Header] %v", ctx.Req.Header)
	}
}
Пример #4
0
func (gc *localstoreCompactor) checkExpiredKeysWorker() {
	defer gc.workerWaitGroup.Done()
	for {
		select {
		case <-gc.stopCh:
			log.Debug("[kv] GC stopped")
			return
		case <-gc.ticker.C:
			gc.mu.Lock()
			m := gc.recentKeys
			if len(m) == 0 {
				gc.mu.Unlock()
				continue
			}
			gc.recentKeys = make(map[string]struct{})
			gc.mu.Unlock()
			for k := range m {
				err := gc.Compact([]byte(k))
				if err != nil {
					log.Error(err)
				}
			}
		}
	}
}
Пример #5
0
func (s *testStatisticsSuite) TestTable(c *C) {
	tblInfo := &model.TableInfo{
		ID: 1,
	}
	columns := []*model.ColumnInfo{
		{
			ID:        2,
			FieldType: *types.NewFieldType(mysql.TypeLonglong),
		},
	}
	tblInfo.Columns = columns
	timestamp := int64(10)
	bucketCount := int64(256)
	t, err := NewTable(tblInfo, timestamp, s.count, bucketCount, [][]types.Datum{s.samples})
	c.Check(err, IsNil)
	str := t.String()
	log.Debug(str)
	c.Check(len(str), Greater, 0)

	tpb, err := t.ToPB()
	c.Check(err, IsNil)
	data, err := proto.Marshal(tpb)
	c.Check(err, IsNil)
	ntpb := &TablePB{}
	err = proto.Unmarshal(data, ntpb)
	c.Check(err, IsNil)
	nt, err := TableFromPB(tblInfo, ntpb)
	c.Check(err, IsNil)
	c.Check(nt.String(), Equals, str)
}
Пример #6
0
func cmdDashboard(argv []string) (err error) {
	usage := `usage: reborn-config dashboard [--addr=<address>] [--http-log=<log_file>]

options:
	--addr		listen ip:port, e.g. localhost:12345, :8086, [default: :8086]
	--http-log	http request log [default: request.log ]
`

	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return errors.Trace(err)
	}
	log.Debug(args)

	logFileName := "request.log"
	if args["--http-log"] != nil {
		logFileName = args["--http-log"].(string)
	}

	addr := ":8086"
	if args["--addr"] != nil {
		addr = args["--addr"].(string)
	}

	runDashboard(addr, logFileName)
	return nil
}
Пример #7
0
// DelKeyWithPrefix deletes keys with prefix.
func DelKeyWithPrefix(ctx context.Context, prefix string) error {
	log.Debug("delKeyWithPrefix", prefix)
	txn, err := ctx.GetTxn(false)
	if err != nil {
		return err
	}

	var keys []string
	iter, err := txn.Seek([]byte(prefix), hasPrefix([]byte(prefix)))
	if err != nil {
		return err
	}
	defer iter.Close()
	for {
		if err != nil {
			return err
		}

		if iter.Valid() && strings.HasPrefix(iter.Key(), prefix) {
			keys = append(keys, iter.Key())
			iter, err = iter.Next(hasPrefix([]byte(prefix)))
		} else {
			break
		}
	}

	for _, key := range keys {
		err := txn.Delete([]byte(key))
		if err != nil {
			return err
		}
	}

	return nil
}
Пример #8
0
// init and get root region server addr and master addr
func (c *client) init() error {
	zkclient, _, err := zk.Connect(c.zkHosts, time.Second*30)
	if err != nil {
		return err
	}
	c.zkClient = zkclient

	res, _, _, err := c.zkClient.GetW(c.zkRoot + zkRootRegionPath)
	if err != nil {
		return err
	}
	c.rootServerName, err = c.decodeMeta(res)
	if err != nil {
		return err
	}
	log.Debug("connect root region server...", c.rootServerName)
	conn, err := newConnection(serverNameToAddr(c.rootServerName), false)
	if err != nil {
		return err
	}
	// set buffered regionserver conn
	c.cachedConns[serverNameToAddr(c.rootServerName)] = conn

	res, _, _, err = c.zkClient.GetW(c.zkRoot + zkMasterAddrPath)
	if err != nil {
		return err
	}
	c.masterServerName, err = c.decodeMeta(res)
	if err != nil {
		return err
	}
	return nil
}
Пример #9
0
func (n *Node) get(key string) (message *Message, err error) {
	index := n.getSuitablePivotIndex(key)
	if index >= int(n.pivotCount) {
		logger.Fatal("Unknow Error, pivot index out range")
	}

	pivot := n.pivots[index]

	if pivot == nil {
		logger.Fatal("Unknow error, Pivot Is Nil, Min pivot count is 1, but current = 0")
	}

	message = pivot.get(key)
	if message != nil {
		return
	}

	if n.isLeaf {
		return
	}

	if pivot.hasChild() {
		childNode := n.tree.nodes[pivot.childNodeID]
		if childNode == nil {
			logger.Debug("Get Node Error")
		}

		message, err = childNode.get(key)
	}

	return
}
Пример #10
0
func exec(sqlStmt string) error {
	db := connPool.Get().(*sql.DB)
	defer connPool.Put(db)
	log.Debug("exec sql:", sqlStmt)
	_, err := db.Exec(sqlStmt)
	return err
}
Пример #11
0
func statement(ctx context.Context, sql string) stmt.Statement {
	log.Debug("[ddl] Compile", sql)
	s, _ := parser.ParseOneStmt(sql, "", "")
	compiler := &executor.Compiler{}
	stm, _ := compiler.Compile(ctx, s)
	return stm
}
Пример #12
0
func deserializeBlockContainer(reader *bytes.Reader, blockContainer *BlockContainer) {
	var nodesOffset uint64
	if err := binary.Read(reader, binary.LittleEndian, &nodesOffset); err != nil {
		logger.Fatal(err)
	}

	blockContainer.nodesOffset = nodesOffset

	logger.Debug("Serialize nodesOffset =", nodesOffset)

	var blockCount uint32
	if err := binary.Read(reader, binary.LittleEndian, &blockCount); err != nil {
		logger.Fatal(err)
	}
	blockContainer.blockCount = blockCount

	var blockSize uint64
	if err := binary.Read(reader, binary.LittleEndian, &blockSize); err != nil {
		logger.Fatal(err)
	}
	blockContainer.blockSize = blockSize

	var blockUsedCount uint32
	if err := binary.Read(reader, binary.LittleEndian, &blockUsedCount); err != nil {
		logger.Fatal(err)
	}
	blockContainer.blockUsedCount = blockUsedCount
}
Пример #13
0
func cmdProxy(argv []string) (err error) {
	usage := `usage:
	reborn-config proxy list
	reborn-config proxy offline <proxy_name>
	reborn-config proxy online <proxy_name>
`
	args, err := docopt.Parse(usage, argv, true, "", false)
	if err != nil {
		log.Error(err)
		return err
	}
	log.Debug(args)

	if args["list"].(bool) {
		return runProxyList()
	}

	proxyName := args["<proxy_name>"].(string)
	if args["online"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_ONLINE)
	}
	if args["offline"].(bool) {
		return runSetProxyStatus(proxyName, models.PROXY_STATE_MARK_OFFLINE)
	}
	return nil
}
Пример #14
0
// backfillIndexInTxn deals with a part of backfilling index data in a Transaction.
// This part of the index data rows is defaultSmallBatchCnt.
func (d *ddl) backfillIndexInTxn(t table.Table, kvIdx table.Index, handles []int64, txn kv.Transaction) (int64, error) {
	idxRecords, err := d.fetchRowColVals(txn, t, handles, kvIdx.Meta())
	if err != nil {
		return 0, errors.Trace(err)
	}

	for _, idxRecord := range idxRecords {
		log.Debug("[ddl] backfill index...", idxRecord.handle)
		err = txn.LockKeys(idxRecord.key)
		if err != nil {
			return 0, errors.Trace(err)
		}

		// Create the index.
		handle, err := kvIdx.Create(txn, idxRecord.vals, idxRecord.handle)
		if err != nil {
			if terror.ErrorEqual(err, kv.ErrKeyExists) && idxRecord.handle == handle {
				// Index already exists, skip it.
				continue
			}
			return 0, errors.Trace(err)
		}
	}
	return idxRecords[len(idxRecords)-1].handle, nil
}
Пример #15
0
func (b Block) isValid() bool {
	if b.nodeID < MinNodeId {
		logger.Debug("Block invalid")
		return true
	}

	return true
}
Пример #16
0
func statement(sql string) stmt.Statement {
	log.Debug("Compile", sql)
	lexer := parser.NewLexer(sql)
	parser.YYParse(lexer)
	compiler := &optimizer.Compiler{}
	stm, _ := compiler.Compile(lexer.Stmts()[0])
	return stm
}
Пример #17
0
func (session *Session) Handshake() error {

	if err := session.fc.Handshake(); err != nil {
		log.Debug("handshake error: %s", err)
		return err
	}

	return nil
}
Пример #18
0
func (s *Server) handleMigrateState(slotIndex int, keys ...[]byte) error {
	shd := s.slots[slotIndex]
	if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE {
		return nil
	}

	if shd.migrateFrom == nil {
		log.Fatalf("migrateFrom not exist %+v", shd)
	}

	if shd.dst.Master() == shd.migrateFrom.Master() {
		log.Fatalf("the same migrate src and dst, %+v", shd)
	}

	redisConn, err := s.pools.GetConn(shd.migrateFrom.Master())
	if err != nil {
		return errors.Trace(err)
	}

	defer s.pools.PutConn(redisConn)

	err = writeMigrateKeyCmd(redisConn, shd.dst.Master(), MigrateKeyTimeoutMs, keys...)
	if err != nil {
		redisConn.Close()
		log.Errorf("migrate key %s error, from %s to %s, err:%v",
			string(keys[0]), shd.migrateFrom.Master(), shd.dst.Master(), err)
		return errors.Trace(err)
	}

	redisReader := redisConn.BufioReader()

	// handle migrate result
	for i := 0; i < len(keys); i++ {
		resp, err := parser.Parse(redisReader)
		if err != nil {
			log.Errorf("migrate key %s error, from %s to %s, err:%v",
				string(keys[i]), shd.migrateFrom.Master(), shd.dst.Master(), err)
			redisConn.Close()
			return errors.Trace(err)
		}

		result, err := resp.Bytes()

		log.Debug("migrate", string(keys[0]), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(),
			string(result))

		if resp.Type == parser.ErrorResp {
			redisConn.Close()
			log.Error(string(keys[0]), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master())
			return errors.New(string(resp.Raw))
		}
	}

	s.counter.Add("Migrate", int64(len(keys)))

	return nil
}
Пример #19
0
func (b Block) available(compressedSize uint32) bool {
	logger.Debug(b.blockSize)

	if b.offset == 0 || b.blockSize >= compressedSize {
		return true
	}

	return false
}
Пример #20
0
// dispatch handles client request based on command which is the first byte of the data.
// It also gets a token from server which is used to limit the concurrently handling clients.
// The most frequently used command is ComQuery.
func (cc *clientConn) dispatch(data []byte) error {
	cmd := data[0]
	data = data[1:]
	cc.lastCmd = hack.String(data)

	token := cc.server.getToken()

	startTS := time.Now()
	defer func() {
		cc.server.releaseToken(token)
		log.Debugf("[TIME_CMD] %v %d", time.Since(startTS), cmd)
	}()

	switch cmd {
	case mysql.ComSleep:
		// TODO: According to mysql document, this command is supposed to be used only internally.
		// So it's just a temp fix, not sure if it's done right.
		// Investigate this command and write test case later.
		return nil
	case mysql.ComQuit:
		return io.EOF
	case mysql.ComQuery: // Most frequently used command.
		// For issue 1989
		// Input payload may end with byte '\0', we didn't find related mysql document about it, but mysql
		// implementation accept that case. So trim the last '\0' here as if the payload an EOF string.
		// See http://dev.mysql.com/doc/internals/en/com-query.html
		if data[len(data)-1] == 0 {
			data = data[:len(data)-1]
		}
		return cc.handleQuery(hack.String(data))
	case mysql.ComPing:
		return cc.writeOK()
	case mysql.ComInitDB:
		log.Debug("init db", hack.String(data))
		if err := cc.useDB(hack.String(data)); err != nil {
			return errors.Trace(err)
		}
		return cc.writeOK()
	case mysql.ComFieldList:
		return cc.handleFieldList(hack.String(data))
	case mysql.ComStmtPrepare:
		return cc.handleStmtPrepare(hack.String(data))
	case mysql.ComStmtExecute:
		return cc.handleStmtExecute(data)
	case mysql.ComStmtClose:
		return cc.handleStmtClose(data)
	case mysql.ComStmtSendLongData:
		return cc.handleStmtSendLongData(data)
	case mysql.ComStmtReset:
		return cc.handleStmtReset(data)
	case mysql.ComSetOption:
		return cc.handleSetOption(data)
	default:
		return mysql.NewErrf(mysql.ErrUnknown, "command %d not supported now", cmd)
	}
}
Пример #21
0
func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, handles []int64, reorgInfo *reorgInfo) error {
	kvX := kv.NewKVIndex(t.IndexPrefix(), indexInfo.Name.L, indexInfo.ID, indexInfo.Unique)

	for _, handle := range handles {
		log.Debug("[ddl] building index...", handle)

		err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
			if err := d.isReorgRunnable(txn); err != nil {
				return errors.Trace(err)
			}

			// first check row exists
			exist, err := checkRowExist(txn, t, handle)
			if err != nil {
				return errors.Trace(err)
			} else if !exist {
				// row doesn't exist, skip it.
				return nil
			}

			var vals []interface{}
			vals, err = fetchRowColVals(txn, t, handle, indexInfo)
			if err != nil {
				return errors.Trace(err)
			}

			exist, _, err = kvX.Exist(txn, vals, handle)
			if err != nil {
				return errors.Trace(err)
			} else if exist {
				// index already exists, skip it.
				return nil
			}

			err = lockRow(txn, t, handle)
			if err != nil {
				return errors.Trace(err)
			}

			// create the index.
			err = kvX.Create(txn, vals, handle)
			if err != nil {
				return errors.Trace(err)
			}

			// update reorg next handle
			return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
		})

		if err != nil {
			return errors.Trace(err)
		}
	}

	return nil
}
Пример #22
0
// Compile is safe for concurrent use by multiple goroutines.
func Compile(src string) ([]stmt.Statement, error) {
	log.Debug("compiling", src)
	l := parser.NewLexer(src)
	if parser.YYParse(l) != 0 {
		log.Warnf("compiling %s, error: %v", src, l.Errors()[0])
		return nil, errors.Trace(l.Errors()[0])
	}

	return l.Stmts(), nil
}
Пример #23
0
// Parse parses a query string to raw ast.StmtNode.
func Parse(ctx context.Context, src string) ([]ast.StmtNode, error) {
	log.Debug("compiling", src)
	charset, collation := getCtxCharsetInfo(ctx)
	stmts, err := parser.Parse(src, charset, collation)
	if err != nil {
		log.Warnf("compiling %s, error: %v", src, err)
		return nil, errors.Trace(err)
	}
	return stmts, nil
}
Пример #24
0
// Exec implements the stmt.Statement Exec interface.
func (s *CreateDatabaseStmt) Exec(ctx context.Context) (_ rset.Recordset, err error) {
	log.Debug("create database")
	err = sessionctx.GetDomain(ctx).DDL().CreateSchema(ctx, model.NewCIStr(s.Name))
	if err != nil {
		if terror.ErrorEqual(err, ddl.ErrExists) && s.IfNotExists {
			err = nil
		}
	}
	return nil, errors.Trace(err)
}
Пример #25
0
func (b *executorBuilder) buildIndexScan(v *plan.IndexScan) Executor {
	txn, err := b.ctx.GetTxn(false)
	if err != nil {
		b.err = err
		return nil
	}
	tbl, _ := b.is.TableByID(v.Table.ID)
	client := txn.GetClient()
	var memDB bool
	switch v.Fields()[0].DBName.L {
	case "information_schema", "performance_schema":
		memDB = true
	}
	if !memDB && client.SupportRequestType(kv.ReqTypeIndex, 0) && txn.IsReadOnly() {
		log.Debug("xapi select index")
		e := &XSelectIndexExec{
			table:     tbl,
			ctx:       b.ctx,
			indexPlan: v,
		}
		where := conditionsToPBExpression(v.FilterConditions...)
		if xapi.SupportExpression(client, where) {
			e.where = where
			return e
		}
		return b.buildFilter(e, v.FilterConditions)
	}
	var idx *column.IndexedCol
	for _, val := range tbl.Indices() {
		if val.IndexInfo.Name.L == v.Index.Name.L {
			idx = val
			break
		}
	}
	e := &IndexScanExec{
		tbl:        tbl,
		idx:        idx,
		fields:     v.Fields(),
		ctx:        b.ctx,
		Desc:       v.Desc,
		valueTypes: make([]*types.FieldType, len(idx.Columns)),
	}

	for i, ic := range idx.Columns {
		col := tbl.Cols()[ic.Offset]
		e.valueTypes[i] = &col.FieldType
	}

	e.Ranges = make([]*IndexRangeExec, len(v.Ranges))
	for i, val := range v.Ranges {
		e.Ranges[i] = b.buildIndexRange(e, val)
	}
	return b.buildFilter(e, v.FilterConditions)
}
Пример #26
0
func (c *Session) handleSetAutoCommit(val parser.IExpr) error {

	var stmt *parser.Predicate
	var ok bool
	if stmt, ok = val.(*parser.Predicate); !ok {
		return fmt.Errorf("set autocommit is not support for complicate expressions")
	}

	switch value := stmt.Expr.(type) {
	case parser.NumVal:
		if i, err := value.ParseInt(); err != nil {
			return err
		} else if i == 1 {
			c.fc.XORStatus(uint16(StatusInAutocommit))
			log.Debug("autocommit is set")
			// return c.handleBegin()
		} else if i == 0 {
			c.fc.AndStatus(^uint16(StatusInAutocommit))
			log.Debug("auto commit is unset")
		} else {
			return fmt.Errorf("Variable 'autocommit' can't be set to the value of '%s'", i)
		}
	case parser.StrVal:
		if s := value.Trim(); s == "" {
			return fmt.Errorf("Variable 'autocommit' can't be set to the value of ''")
		} else if us := strings.ToUpper(s); us == `ON` {
			c.fc.XORStatus(uint16(StatusInAutocommit))
			log.Debug("auto commit is set")
			// return c.handleBegin()
		} else if us == `OFF` {
			c.fc.AndStatus(^uint16(StatusInAutocommit))
			log.Debug("auto commit is unset")
		} else {
			return fmt.Errorf("Variable 'autocommit' can't be set to the value of '%s'", us)
		}
	default:
		return fmt.Errorf("set autocommit error, value type is %T", val)
	}

	return nil
}
Пример #27
0
func (t *Tree) initialize() {
	isNew := true

	if _, err := os.Stat(t.env.fullDBName); err == nil {
		isNew = false
	}

	t.openFile()
	t.header = newHeader(t)

	if !isNew {
		logger.Debug("Not New File")
		t.header.read()
		logger.Debug(t.header)
		t.initRootNode()
	} else {
		t.initRootNode()
		t.header.flush()
	}

}
Пример #28
0
func (b *executorBuilder) buildTableScan(v *plan.TableScan) Executor {
	txn, err := b.ctx.GetTxn(false)
	if err != nil {
		b.err = err
		return nil
	}
	table, _ := b.is.TableByID(v.Table.ID)
	client := txn.GetClient()
	var memDB bool
	switch v.Fields()[0].DBName.L {
	case "information_schema", "performance_schema":
		memDB = true
	}
	supportDesc := client.SupportRequestType(kv.ReqTypeSelect, kv.ReqSubTypeDesc)
	if !memDB && client.SupportRequestType(kv.ReqTypeSelect, 0) {
		log.Debug("xapi select table")
		e := &XSelectTableExec{
			table:       table,
			ctx:         b.ctx,
			tablePlan:   v,
			supportDesc: supportDesc,
		}
		where, remained := b.conditionsToPBExpr(client, v.FilterConditions, v.TableName)
		if where != nil {
			e.where = where
		}
		if len(remained) == 0 {
			e.allFiltersPushed = true
		}
		var ex Executor
		if txn.IsReadOnly() {
			ex = e
		} else {
			ex = b.buildUnionScanExec(e)
		}
		return b.buildFilter(ex, remained)
	}

	e := &TableScanExec{
		t:           table,
		tableAsName: v.TableAsName,
		fields:      v.Fields(),
		ctx:         b.ctx,
		ranges:      v.Ranges,
		seekHandle:  math.MinInt64,
	}
	x := b.buildFilter(e, v.FilterConditions)
	if v.Desc {
		x = &ReverseExec{Src: x}
	}
	return x
}
Пример #29
0
func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, handles []int64, reorgInfo *reorgInfo) error {
	kvX := tables.NewIndex(t.Meta(), indexInfo)

	for _, handle := range handles {
		log.Debug("[ddl] building index...", handle)

		err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
			if err := d.isReorgRunnable(txn); err != nil {
				return errors.Trace(err)
			}

			vals, err1 := fetchRowColVals(txn, t, handle, indexInfo)
			if terror.ErrorEqual(err1, kv.ErrNotExist) {
				// row doesn't exist, skip it.
				return nil
			}
			if err1 != nil {
				return errors.Trace(err1)
			}

			exist, _, err1 := kvX.Exist(txn, vals, handle)
			if err1 != nil {
				return errors.Trace(err1)
			} else if exist {
				// index already exists, skip it.
				return nil
			}
			rowKey := tablecodec.EncodeRecordKey(t.RecordPrefix(), handle)
			err1 = txn.LockKeys(rowKey)
			if err1 != nil {
				return errors.Trace(err1)
			}

			// create the index.
			err1 = kvX.Create(txn, vals, handle)
			if err1 != nil {
				return errors.Trace(err1)
			}

			// update reorg next handle
			return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
		})

		if err != nil {
			return errors.Trace(err)
		}
	}

	return nil
}
Пример #30
0
func (b *BlockContainer) flushNode(node *Node) {
	logger.Debug("BlockContainer FlushNode", node.id)
	if node == nil {
		logger.Fatal("FlushNode: Node Is Nil")
	}

	buffer := &bytes.Buffer{}
	node.writeTo(buffer)

	if buffer == nil {
		logger.Fatal("Node Serialize Buffer Is nil")
	}

	compressedBuffer := &bytes.Buffer{}

	w := gzip.NewWriter(compressedBuffer)
	_, err := w.Write(buffer.Bytes())
	if err != nil {
		logger.Fatal(err)
	}

	if err = w.Close(); err != nil {
		logger.Fatal(err)
	}

	compressedBufferSize := compressedBuffer.Len()

	bufferSize := buffer.Len()
	emptyBlock := b.getSuitableBlock(node.id, compressedBufferSize)
	logger.Debug(emptyBlock)

	newOffset := emptyBlock.writeNode(node, b.tree.file, b.nodesOffset, compressedBuffer, uint32(bufferSize), uint32(compressedBufferSize))

	if newOffset > b.nodesOffset {
		b.nodesOffset = newOffset
	}
}