Пример #1
0
func (sq *SqlQuery) allowQueries(dbconfig *eproto.DBConfigs) {
	sq.statemu.Lock()
	v := sq.state.Get()
	switch v {
	case ABORT, SERVING:
		sq.statemu.Unlock()
		log.Info("Ignoring allowQueries request, current state: %v", v)
		return
	case INITIALIZING, SHUTTING_DOWN:
		panic("unreachable")
	}
	// state is NOT_SERVING
	sq.setState(INITIALIZING)

	defer func() {
		if x := recover(); x != nil {
			log.Error("%s", x.(*TabletError).Message)
			sq.setState(NOT_SERVING)
			return
		}
		sq.setState(SERVING)
	}()

	sq.qe.Open(dbconfig)
	sq.dbconfig = dbconfig
	sq.sessionId = Rand()
	log.Info("Session id: %d", sq.sessionId)
}
Пример #2
0
func (em *Engine) Init(conf *proto.DBConfigs) error {
	log.Info("Begin init engine")
	if err := em.dbEngine.Init(conf); err != nil {
		log.Info("Init engine %v error, %v", em.dbEngine.Name(), err)
		return proto.ErrDbInitError
	}
	log.Info("Init engine %v complete", em.dbEngine.Name())
	return nil
}
Пример #3
0
//-----------------------------------------------
// Execution
func (qe *QueryEngine) execInsertPK(logStats *sqlQueryStats, conn PoolConnection, plan *CompiledPlan) (qr *eproto.QueryResult) {
	log.Info("Execute insert pk sql %s", plan.Query)
	tableName := plan.TableName
	tableInfo := qe.schemaInfo.tables[plan.TableName]
	rowColumns := plan.RowColumns
	var key []byte
	var columnName string
	keys := make([][]byte, 0, len(rowColumns)*len(tableInfo.Columns))
	values := make([][]byte, 0, len(rowColumns)*len(tableInfo.Columns))
	pkList := buildValueList(tableInfo, plan.PKValues, plan.BindVars)
	for i, columnsMap := range rowColumns {
		pkvalue := buildPkValue(pkList[i])
		log.Info("Pk Value is %v", string(pkvalue))
		for _, columnDef := range tableInfo.Columns {
			columnName = columnDef.Name
			if columnDef.IsPk {
				key = buildTableRowColumnKey(tableName, columnName, pkvalue)
				log.Info("pk key is %v", string(key))
				keys = append(keys, key)
				values = append(values, []byte{'0'})
			} else if columnDef.IsAuto {
				if _, ok := columnsMap[columnName]; ok {
					panic(NewTabletErrorDB(FAIL, fmt.Errorf("field %s value is auto created", columnName)))
				}
				// TODO
			} else {
				value, ok := columnsMap[columnName]
				if !ok {
					if !columnDef.Nullable {
						panic(NewTabletErrorDB(FAIL, fmt.Errorf("column %s shouldn't be null", columnDef.Name)))
					}
				}
				if !value.(sqltypes.Value).IsNull() {
					key = buildTableRowColumnKey(tableName, columnName, pkvalue)
					log.Info("normal key is %v", string(key))
					keys = append(keys, key)
					values = append(values, value.(sqltypes.Value).Raw())
					log.Info("normal value is %v", value.(sqltypes.Value).String())
				}
			}
		}
	}
	atomic.AddInt64(&qe.activeConnection, 1)
	defer atomic.AddInt64(&qe.activeConnection, -1)
	err := conn.Puts(nil, keys, values)
	if err != nil {
		panic(NewTabletErrorDB(FAIL, err))
	}

	qr = &eproto.QueryResult{RowsAffected: uint64(len(rowColumns))}
	return qr
}
Пример #4
0
func (si *SchemaInfo) GetPlan(logStats *sqlQueryStats, sql string) (plan *ExecPlan) {
	log.Warn("plan sql %v", sql)
	si.mu.Lock()
	defer si.mu.Unlock()
	if plan := si.getQuery(sql); plan != nil {
		return plan
	}

	var tableInfo *schema.Table
	GetTable := func(tableName string) (table *schema.Table, ok bool) {
		tableInfo, ok = si.tables[tableName]
		if !ok {
			return nil, false
		}
		return tableInfo, true
	}
	splan, err := sqlparser.ExecParse(sql, GetTable)
	if err != nil {
		log.Info("parse error %v", err.Error())
		panic(NewTabletError(FAIL, "%s", err))
	}
	plan = &ExecPlan{ExecPlan: splan, Table: tableInfo}
	if plan.PlanId.IsSelect() {
		fields := make([]eproto.Field, len(plan.ColumnNumbers))
		for i, cIdx := range plan.ColumnNumbers {
			column := si.tables[plan.TableName].Columns[cIdx]
			fields[i] = eproto.Field{column.Name, int64(column.Category)}
		}
		plan.Fields = fields
	} else if plan.PlanId == sqlparser.PLAN_DDL || plan.PlanId == sqlparser.PLAN_SET {
		return plan
	}
	si.queries.Set(sql, plan)
	return plan
}
Пример #5
0
func (si *SchemaInfo) DropTable(tableName string) {
	si.mu.Lock()
	defer si.mu.Unlock()
	delete(si.tables, tableName)
	si.queries.Clear()
	log.Info("Table %s forgotten", tableName)
}
Пример #6
0
func (qe *QueryEngine) selectAll(logStats *sqlQueryStats, plan *CompiledPlan) (result *eproto.QueryResult) {
	tableName := plan.TableName
	pks := qe.getAllPks(logStats, plan.TableName, nil)
	var keys, values [][]byte
	for _, pk := range pks {
		for _, field := range plan.Fields {
			keys = append(keys, buildTableRowColumnKey(tableName, field.Name, pk))
		}
	}

	result = &eproto.QueryResult{}
	result.Fields = plan.Fields

	if len(pks) == 0 {
		result.RowsAffected = 0
		return result
	}

	values = qe.fetch(logStats, keys)

	rowList := make([][]sqltypes.Value, len(pks))
	for i := range pks {
		rowList[i] = make([]sqltypes.Value, len(plan.Fields))
		for j, field := range plan.Fields {
			rowList[i][j] = buildValue(values[i*len(plan.Fields)+j], field.Type)
			log.Info(rowList[i][j].String())
		}
	}
	result.Rows = rowList
	result.RowsAffected = uint64(len(pks))
	return result
}
Пример #7
0
func (node *Node) execAnalyzeValue() *Node {
	log.Info("node.Type %v", node.Type)
	switch node.Type {
	case STRING, NUMBER, VALUE_ARG:
		return node
	}
	return nil
}
Пример #8
0
func ExecParse(sql string, getTable TableGetter) (plan *ExecPlan, err error) {
	defer handleError(&err)

	tree, err := Parse(sql)

	log.Info("tree: %v", tree.Type)
	log.Info("%v", tree.TreeString())

	if err != nil {
		return nil, err
	}
	plan = tree.execAnalyzeSql(getTable)
	if plan.PlanId == PLAN_PASS_DML {
		log.Warn("PASS_DML: %s", sql)
	}
	return plan, nil
}
Пример #9
0
func (qe *QueryEngine) Open(config *eproto.DBConfigs) {
	// Wait for Close, in case it's running
	qe.mu.Lock()
	defer qe.mu.Unlock()
	err := qe.engine.Init(config)
	if err != nil {
		log.Info(err.Error())
		panic(NewTabletErrorDB(FATAL, err))
	}
	connFactory := ConnectionCreator(config.AppConnectParams, qe.engine)
	qe.connPool.Open(connFactory)
	// qe.streamConnPool.Open(connFactory)
	// qe.reservedPool.Open(connFactory)
	start := time.Now().UnixNano()
	qe.schemaInfo.Open(connFactory)
	log.Info("Time taken to load the schema: %v ms", (time.Now().UnixNano()-start)/1e6)
}
Пример #10
0
// No condition in the sql
// if modiftyType is true, update method, else delete method
func (qe *QueryEngine) execModifyAll(logStats *sqlQueryStats, conn PoolConnection, plan *CompiledPlan, modifyType bool) *eproto.QueryResult {
	pks := qe.getAllPks(logStats, plan.TableName, conn)
	log.Info("delete %d", len(pks))
	if modifyType {
		return qe.execUpdate(logStats, conn, plan, pks)
	} else {
		return qe.execDelete(logStats, conn, plan, pks)
	}
}
Пример #11
0
func (qe *QueryEngine) getAllPks(logStats *sqlQueryStats, tableName string, conn PoolConnection) (pks [][]byte) {
	tableInfo := qe.schemaInfo.tables[tableName]
	pkStart := []byte(fmt.Sprintf("%s|%s|", tableName, tableInfo.GetPk().Name))
	pkEnd := []byte(fmt.Sprintf("%s|%s||", tableName, tableInfo.GetPk().Name))
	_, pks = qe.fetchIterate(logStats, conn, pkStart, pkEnd, 0, true, false)
	for _, pk := range pks {
		log.Info("pkpkpk:%s", string(pk))
	}
	return pks
}
Пример #12
0
func (sq *SqlQuery) Execute(context *rpcproto.Context, query *proto.Query, reply *eproto.QueryResult) (err error) {
	log.Info("sql is %v", query.Sql)
	logStats := newSqlQueryStats("Execute", context)
	defer handleExecError(query, &err, logStats)

	sq.checkState(query.SessionId)

	*reply = *sq.qe.Execute(logStats, query)
	return nil
}
Пример #13
0
func (em *Engine) Connect(params *proto.DbConnectParams) (conn *DBConnection, err error) {
	var c proto.DbConnection
	c, err = em.dbEngine.Connect(params)
	log.Info("Come to a new client %v, id is %d", params.UserName, c.Id())
	if conn != nil && err != nil {
		conn.Close()
		return nil, err
	}
	conn = &DBConnection{connectionParams: params, DbConnection: c}
	return conn, nil
}
Пример #14
0
// Load loads the contents of a JSON file named
// filename into c.
func (c *cramMD5Credentials) Load(filename string) error {
	data, err := ioutil.ReadFile(filename)
	if err != nil {
		return err
	}
	if err = json.Unmarshal(data, c); err != nil {
		return err
	}
	log.Info("Loaded credentials from %s.", filename)
	return nil
}
Пример #15
0
func NewEngine(name string) (*Engine, error) {
	em := new(Engine)
	if engineInit, ok := engineImpls[name]; ok {
		log.Info("Get Engine : %v", name)
		engine := engineInit()
		em.dbEngine = engine
		return em, nil
	} else {
		return nil, proto.ErrUnknownDBEngineName
	}
}
Пример #16
0
func main() {
	flag.Parse()
	servenv.Init()

	ts.InitQueryService()

	dbConfigs := &proto.DBConfigs{DataPath: "testrock"}
	dbConfigs.AppConnectParams = &proto.DbConnectParams{DbName: "test", UserName: "******"}
	ts.AllowQueries(dbConfigs)

	log.Info("starting vtocc %v", *port)
	servenv.OnClose(func() {
		time.Sleep(5 * time.Millisecond)
		ts.DisallowQueries()
	})
	servenv.Run(*port)
}
Пример #17
0
// RunSecure is like Run, but it additionally listens for RPC and HTTP
// requests using TLS on securePort, using the passed certificate,
// key, and CA certificate.
func RunSecure(port int, securePort int, cert, key, caCert string) {
	onRunHooks.Fire()
	ServeRPC()

	l, err := proc.Listen(fmt.Sprintf("%v", port))
	if err != nil {
		log.Critical(err.Error())
	}

	go http.Serve(l, nil)

	if securePort != 0 {
		log.Info("listening on secure port %v", securePort)
		SecureServe(fmt.Sprintf(":%d", securePort), cert, key, caCert)
	}
	proc.Wait()
	Close()
}
Пример #18
0
func (node *Node) execAnalyzeUpdateExpressions(pkIndex *schema.Index) (updateColumns []Pair, status updateStatus) {
	updateColumns = make([]Pair, node.Len())
	for i := 0; i < node.Len(); i++ {
		columnName := string(node.At(i).At(0).Value)
		index := pkIndex.FindColumn(columnName)
		if index != -1 {
			// update pkvalues not supported
			return nil, updatePkValue
		}
		value := node.At(i).At(1).execAnalyzeValue()
		if value == nil {
			log.Warn("unsupported update expression", node.At(i).At(0))
			return nil, updateUnsupport
		}
		log.Info(string(value.Value))
		updateColumns[i] = Pair{columnName, string(value.Value)}
	}
	return updateColumns, updateOk
}
Пример #19
0
func (sq *SqlQuery) disallowQueries() {
	sq.statemu.Lock()
	defer sq.statemu.Unlock()
	switch sq.state.Get() {
	case NOT_SERVING, ABORT:
		return
	case INITIALIZING, SHUTTING_DOWN:
		panic("unreachable")
	}
	// state is SERVING
	sq.setState(SHUTTING_DOWN)
	defer func() {
		sq.setState(NOT_SERVING)
	}()

	log.Info("Stopping query service: %d", sq.sessionId)
	sq.qe.Close()
	sq.sessionId = 0
	sq.dbconfig = nil
}
Пример #20
0
func (qe *QueryEngine) selectPkEqual(logStats *sqlQueryStats, plan *CompiledPlan) (result *eproto.QueryResult) {
	pkRows := buildPkValueList(plan.Table, plan.PKValues, plan.BindVars)
	if len(pkRows) != 1 || plan.Fields == nil {
		panic("unexpected")
	}

	tableName := plan.TableName
	keys := make([][]byte, len(plan.Fields))
	pkValue := buildCompositeValue(pkRows[0])
	log.Info("pk value is %s", string(pkValue))

	result = &eproto.QueryResult{}
	result.Fields = plan.Fields

	// check if pk exists
	if !qe.fetchExists(logStats, buildTableRowColumnKey(tableName, plan.Table.GetPk().Name, pkValue)) {
		// not exists
		result.RowsAffected = 0
		return result
	}

	for i, field := range plan.Fields {
		keys[i] = buildTableRowColumnKey(tableName, field.Name, pkValue)
	}

	values := qe.fetch(logStats, keys)

	rowValues := make([]sqltypes.Value, len(plan.Fields))
	for i, field := range plan.Fields {
		if len(values[i]) != 0 {
			rowValues[i] = buildValue(values[i], field.Type)
		} else {
			rowValues[i] = sqltypes.NULL
		}
	}

	result.Rows = make([][]sqltypes.Value, 1)
	result.Rows[0] = rowValues
	result.RowsAffected = 1
	return result
}
Пример #21
0
func (si *SchemaInfo) Open(connFactory CreateConnectionFun) {
	si.connPool.Open(connFactory)
	conn := si.connPool.Get()
	defer conn.Recycle()
	tables, err := getTables(conn)
	if err != nil {
		panic(NewTabletError(FATAL, "Could not get table list: %v", err))
	} else {
		log.Info("get table %d", len(tables))
	}
	if len(tables) == 0 {
		tables = append(tables, testTables())
	}
	si.tables = make(map[string]*schema.Table)
	for _, tableInfo := range tables {
		si.tables[tableInfo.Name] = tableInfo
	}
	// Clear is not really needed. Doing it for good measure.
	si.queries.Clear()
	// si.ticks.Start(func() { si.Reload() })
}
Пример #22
0
func (node *Node) execAnalyzeInsert(getTable TableGetter) (plan *ExecPlan) {
	plan = &ExecPlan{PlanId: PLAN_PASS_DML, FullQuery: node.GenerateFullQuery()}
	tableName := string(node.At(INSERT_TABLE_OFFSET).Value)
	tableInfo := plan.setTableInfo(tableName, getTable)

	pkColumnNumbers := node.At(INSERT_COLUMN_LIST_OFFSET).getInsertPKColumns(tableInfo)

	rowValues := node.At(INSERT_VALUES_OFFSET) // VALUES/SELECT
	if rowValues.Type == SELECT {
		plan.PlanId = PLAN_INSERT_SUBQUERY
		plan.OuterQuery = node.GenerateInsertOuterQuery()
		plan.Subquery = rowValues.GenerateSelectLimitQuery()
		// Column list syntax is a subset of select expressions
		if node.At(INSERT_COLUMN_LIST_OFFSET).Len() != 0 {
			plan.ColumnNumbers = node.At(INSERT_COLUMN_LIST_OFFSET).execAnalyzeSelectExpressions(tableInfo)
		} else {
			// SELECT_STAR node will expand into all columns
			n := NewSimpleParseNode(NODE_LIST, "")
			n.Push(NewSimpleParseNode(SELECT_STAR, "*"))
			plan.ColumnNumbers = n.execAnalyzeSelectExpressions(tableInfo)
		}
		plan.SubqueryPKColumns = pkColumnNumbers
		return plan
	}

	rowList := rowValues.At(0) // VALUES->NODE_LIST
	if pkValues := getInsertPKValues(pkColumnNumbers, rowList, tableInfo); pkValues != nil {
		plan.PlanId = PLAN_INSERT_PK
		plan.OuterQuery = plan.FullQuery
		plan.PKValues = pkValues
	}

	if rowColumns := node.At(INSERT_COLUMN_LIST_OFFSET).getInsertColumnValue(tableInfo, rowList); rowColumns != nil {
		plan.RowColumns = rowColumns
		log.Info("row column is %d", len(rowColumns))
	}

	return plan
}
Пример #23
0
func (qe *QueryEngine) execDelete(logStats *sqlQueryStats, conn PoolConnection, plan *CompiledPlan, pks [][]byte) (qr *eproto.QueryResult) {
	tableName := plan.TableName
	tableInfo := plan.Table
	columnsLen := len(tableInfo.Columns)

	keys := make([][]byte, len(pks)*columnsLen)
	for i, pk := range pks {
		for j, column := range tableInfo.Columns {
			keys[i*columnsLen+j] = buildTableRowColumnKey(tableName, column.Name, pk)
			log.Info("delete key %v", string(keys[i*columnsLen+j]))
		}
	}

	atomic.AddInt64(&qe.activeConnection, 1)
	defer atomic.AddInt64(&qe.activeConnection, -1)
	err := conn.Deletes(nil, keys)
	if err != nil {
		panic(NewTabletErrorDB(FAIL, err))
	}

	qr = &eproto.QueryResult{RowsAffected: uint64(len(pks))}
	return qr
}
Пример #24
0
//-----------------------------------------------
// Execution
func (qe *QueryEngine) execInsertPK(logStats *sqlQueryStats, conn PoolConnection, plan *CompiledPlan) (qr *eproto.QueryResult) {
	log.Info("Execute insert pk sql %s", plan.Query)
	tableName := plan.TableName
	tableInfo := qe.schemaInfo.tables[plan.TableName]
	rowColumns := plan.RowColumns
	var pkValue []byte
	var key []byte
	var columnName string
	keys := make([][]byte, 0, len(rowColumns)*len(tableInfo.Columns))
	values := make([][]byte, 0, len(rowColumns)*len(tableInfo.Columns))
	pkList := buildPkValueList(tableInfo, plan.PKValues, plan.BindVars)

	for i, columnsMap := range rowColumns {
		if len(tableInfo.PKColumns) > 1 { // multiple pk
			pkValue = buildCompositeValue(pkList[i])
		} else {
			pkColumn := tableInfo.GetPKColumn(0)
			if pkColumn.IsAuto {
				if plan.PKValues != nil {
					panic(NewTabletErrorDB(FAIL, fmt.Errorf("field %s value is auto created", columnName)))
				}
				pkValue = []byte(strconv.FormatInt(pkColumn.GetNextId(), 64))
			} else if pkColumn.IsUUID {
				uid, err := uuid.NewV4()
				if err != nil {
					panic(NewTabletError(FATAL, "Make uuuid error"))
				}
				pkValue = []byte(uid.String())
			} else {
				pkValue = pkList[i][0].Raw() // single pk
			}
		}
		log.Info("Pk Value is %v", string(pkValue))
		for _, columnDef := range tableInfo.Columns {
			columnName = columnDef.Name
			if columnDef.IsPk {
				value := columnsMap[columnName]
				key = buildTableRowColumnKey(tableName, columnName, pkValue)
				keys = append(keys, key)
				values = append(values, value.Raw())

				// if column is auto increment, update the value
				if columnDef.IsAuto {
					keys = append(keys, buildTableColumnAutoKey(tableName, columnName))
					values = append(values, pkValue)
				}
			} else if columnDef.IsAuto {
				if _, ok := columnsMap[columnName]; ok {
					panic(NewTabletErrorDB(FAIL, fmt.Errorf("field %s value is auto created", columnName)))
				}
				keys = append(keys, buildTableRowColumnKey(tableName, columnName, pkValue))
				nextId := []byte(strconv.FormatInt(columnDef.GetNextId(), 64))
				values = append(values, nextId)

				keys = append(keys, buildTableColumnAutoKey(tableName, columnName))
				values = append(values, nextId)

			} else if columnDef.IsUUID {
				uid, err := uuid.NewV4()
				if err != nil {
					panic(NewTabletError(FATAL, "Make uuuid error"))
				}
				keys = append(keys, buildTableRowColumnKey(tableName, columnName, pkValue))
				values = append(values, []byte(uid.String()))
			} else {
				value, ok := columnsMap[columnName]
				if !ok {
					if !columnDef.Nullable {
						panic(NewTabletErrorDB(FAIL, fmt.Errorf("column %s shouldn't be null", columnDef.Name)))
					}
				}
				if !value.IsNull() {
					key = buildTableRowColumnKey(tableName, columnName, pkValue)
					keys = append(keys, key)
					values = append(values, value.Raw())
				}
			}
		}
		// secondary index
		for _, index := range tableInfo.Indexes {
			if index.Name == "PRIMARY" {
				continue
			}
			columnValues := make([]sqltypes.Value, len(index.Columns))
			for i, columnName := range index.Columns {
				columnValues[i] = columnsMap[columnName]
			}
			indexKey := buildSecondaryIndexKey(tableName, index.Name, buildCompositeValue(columnValues))
			log.Trace("idx key is %s", indexKey)
			keys = append(keys, indexKey)
			values = append(values, pkValue)
		}
	}
	atomic.AddInt64(&qe.activeConnection, 1)
	defer atomic.AddInt64(&qe.activeConnection, -1)
	err := conn.Puts(nil, keys, values)
	if err != nil {
		panic(NewTabletErrorDB(FAIL, err))
	}

	qr = &eproto.QueryResult{RowsAffected: uint64(len(rowColumns))}
	return qr
}
Пример #25
0
func (qe *QueryEngine) Execute(logStats *sqlQueryStats, query *proto.Query) (reply *eproto.QueryResult) {
	qe.mu.RLock()
	defer qe.mu.RUnlock()

	if query.BindVariables == nil { // will help us avoid repeated nil checks
		query.BindVariables = make(map[string]interface{})
	}
	logStats.BindVariables = query.BindVariables
	logStats.OriginalSql = query.Sql
	// cheap hack: strip trailing comment into a special bind var
	stripTrailing(query)
	basePlan := qe.schemaInfo.GetPlan(logStats, query.Sql)
	planName := basePlan.PlanId.String()
	logStats.PlanType = planName
	defer func(start time.Time) {
		duration := time.Now().Sub(start)
		queryStats.Add(planName, duration)
		if reply == nil {
			basePlan.AddStats(1, duration, 0, 1)
		} else {
			basePlan.AddStats(1, duration, int64(len(reply.Rows)), 0)
		}
	}(time.Now())

	/*	if basePlan.PlanId == sqlparser.PLAN_DDL {
		return qe.execDDL(logStats, query.Sql)
	}*/
	plan := &CompiledPlan{query.Sql, basePlan, query.BindVariables, query.TransactionId, query.ConnectionId}
	log.Info("Plan type is %s, reason is %s", plan.PlanId, plan.Reason)
	if plan.Reason == sqlparser.REASON_ERR {
		panic(NewTabletError(FAIL, plan.Error.Error()))
	}
	if isDml(query.Sql) {
		conn := qe.connPool.Get()
		defer conn.Recycle()
		switch plan.PlanId {
		case sqlparser.PLAN_INSERT_PK:
			reply = qe.execInsertPK(logStats, conn, plan)
		case sqlparser.PLAN_UPDATE_ALL:
			reply = qe.execModifyAll(logStats, conn, plan, true)
		case sqlparser.PLAN_UPDATE_PK:
			reply = qe.execModifyPk(logStats, conn, plan, true)
		case sqlparser.PLAN_DELETE_ALL:
			reply = qe.execModifyAll(logStats, conn, plan, false)
		case sqlparser.PLAN_DELETE_PK:
			reply = qe.execModifyPk(logStats, conn, plan, false)

		default:
			panic(NewTabletError(FAIL, "sql currently not supported"))
		}
	} else {
		switch plan.PlanId {
		case sqlparser.PLAN_SELECT_ALL:
			reply = qe.selectAll(logStats, plan)
		case sqlparser.PLAN_PK_EQUAL:
			reply = qe.selectPkEqual(logStats, plan)
		case sqlparser.PLAN_PK_IN:
			reply = qe.selectPkIn(logStats, plan)
		default:
			panic(NewTabletError(FAIL, "sql currently not supported"))
		}
	}
	return reply
}
Пример #26
0
// Handle makes logs sent to logger available throught HTTP at url.
func (logger *StreamLogger) ServeLogs(url string) {
	http.Handle(url, logger)
	log.Info("Streaming logs from %v at %v.", logger, url)
}
Пример #27
0
func (sq *SqlQuery) setState(state int64) {
	log.Info("SqlQuery state: %v -> %v", stateName[sq.state.Get()], stateName[state])
	sq.state.Set(state)
}
Пример #28
0
func (node *Node) getInsertColumnValue(tableInfo *schema.Table, rowList *Node) (rowColumns []map[string]sqltypes.Value) {
	log.Warn("node.Sub is ", len(node.Sub))

	rowLen := rowList.Len()
	rowColumns = make([]map[string]sqltypes.Value, rowLen)

	for i := 0; i < rowList.Len(); i++ {
		rowColumns[i] = make(map[string]sqltypes.Value)
	}
	if len(node.Sub) > 0 {

		// pkIndex := tableInfo.Indexes[0]
		for i, column := range node.Sub {
			// index := pkIndex.FindColumn(string(column.Value))
			// if index != -1 {
			// 	continue
			// }
			values := make([]sqltypes.Value, rowList.Len())
			for j := 0; j < rowList.Len(); j++ {
				node := rowList.At(j).At(0).At(i) // NODE_LIST->'('->NODE_LIST->Value
				value := node.execAnalyzeValue()
				if value == nil {
					log.Warn("insert is too complex %v", node)
					return nil
				}
				// values[j] = asInterface(value)
				values[j] = asValue(value)
				rowColumns[j][string(column.Value)] = values[j]
			}
			// if len(values) == 1 {
			// 	rowColumns = map[string]sqltypes.Value{string(column.Value): values[0]}
			// } else {
			// 	fmt.Println("ValuesLen: ", len(values))
			// 	// rowColumns[i] = map[string]sqltypes.Value{"name": string(column.Value), "value": values}
			// }
		}
	}

	if len(node.Sub) == 0 {
		tableColumns := tableInfo.Columns
		log.Info("tableColumns %v", tableColumns)
		for i := 0; i < len(tableColumns); i++ {
			column := tableColumns[i]
			values := make([]sqltypes.Value, rowList.Len())
			log.Info("values len %v", len(values))
			for j := 0; j < rowList.Len(); j++ {
				node := rowList.At(j).At(0).At(i) // NODE_LIST->'('->NODE_LIST->Value
				value := node.execAnalyzeValue()
				log.Info("value is %v", value)
				if value == nil {
					// log.Warningf("insert is too complex %v", node)
					return nil
				}
				// values[j] = asInterface(value)
				values[j] = asValue(value)
				rowColumns[j][column.Name] = values[j]
				log.Info("values[j] %v", values[j])
			}

		}
	}
	log.Info("rowColumns is ", rowColumns)
	return rowColumns
}
Пример #29
0
func (node *Node) execAnalyzeSelect(getTable TableGetter) (plan *ExecPlan) {
	// Default plan
	plan = &ExecPlan{PlanId: PLAN_PASS_SELECT, FieldQuery: node.GenerateFieldQuery(), FullQuery: node.GenerateSelectLimitQuery()}

	// There are bind variables in the SELECT list
	if plan.FieldQuery == nil {
		plan.Reason = REASON_SELECT_LIST
		return plan
	}

	if !node.execAnalyzeSelectStructure() {
		plan.Reason = REASON_SELECT
		return plan
	}

	// from
	tableName, hasHints := node.At(SELECT_FROM_OFFSET).execAnalyzeFrom()
	if tableName == "" {
		plan.Reason = REASON_TABLE
		return plan
	}
	tableInfo := plan.setTableInfo(tableName, getTable)
	log.Info("hashints:%#v", hasHints)

	// Don't improve the plan if the select is for update
	if node.At(SELECT_FOR_UPDATE_OFFSET).Type == FOR_UPDATE {
		plan.Reason = REASON_FOR_UPDATE
		return plan
	}

	// Select expressions
	selects := node.At(SELECT_EXPR_OFFSET).execAnalyzeSelectExpressions(tableInfo)
	if selects == nil {
		plan.Reason = REASON_SELECT_LIST
		return plan
	}
	plan.ColumnNumbers = selects
	log.Info("select expre:%#v", selects)

	// where
	conditions := node.At(SELECT_WHERE_OFFSET).execAnalyzeWhere()
	if conditions == nil {
		plan.PlanId = PLAN_SELECT_ALL
		plan.Reason = REASON_WHERE
		return plan
	}
	for _, node := range conditions {
		log.Trace("%v", node)
	}

	// order
	if node.At(SELECT_ORDER_OFFSET).Len() != 0 {
		plan.Reason = REASON_ORDER
		return plan
	}

	// This check should never fail because we only cache tables with primary keys.
	if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" {
		panic("unexpected")
	}

	// Attempt PK match only if there's no limit clause
	if node.At(SELECT_LIMIT_OFFSET).Len() == 0 {
		planId, pkValues := getSelectPKValues(conditions, tableInfo.Indexes[0])
		switch planId {
		case PLAN_PK_EQUAL:
			plan.PlanId = PLAN_PK_EQUAL
			plan.OuterQuery = node.GenerateEqualOuterQuery(tableInfo)
			plan.PKValues = pkValues
			return plan
		case PLAN_PK_IN:
			plan.PlanId = PLAN_PK_IN
			plan.OuterQuery = node.GenerateInOuterQuery(tableInfo)
			plan.PKValues = pkValues
			return plan
		}
	}

	if len(tableInfo.Indexes[0].Columns) != 1 {
		plan.Reason = REASON_COMPOSITE_PK
		return plan
	}

	// TODO: Analyze hints to improve plan.
	if hasHints {
		plan.Reason = REASON_HAS_HINTS
		return plan
	}

	plan.IndexUsed = getIndexMatch(conditions, tableInfo.Indexes)
	if plan.IndexUsed == "" {
		panic(NewParserError("no index matches the sql"))
	}
	if plan.IndexUsed == "PRIMARY" {
		plan.Reason = REASON_PKINDEX
		if len(conditions) != 1 {
			plan.Conditions = conditions
		}
		return plan
	}
	// TODO: We can further optimize. Change this to pass-through if select list matches all columns in index.
	plan.PlanId = PLAN_SELECT_SUBQUERY
	plan.Conditions = conditions
	plan.OuterQuery = node.GenerateInOuterQuery(tableInfo)
	plan.Subquery = node.GenerateSelectSubquery(tableInfo, plan.IndexUsed)
	return plan
}