func getInsertPKValues(pkColumnNumbers []int, rowList *Node, tableInfo *schema.Table) (pkValues []interface{}) { pkValues = make([]interface{}, len(pkColumnNumbers)) for index, columnNumber := range pkColumnNumbers { if columnNumber == -1 { pkValues[index] = tableInfo.GetPKColumn(index).Default continue } values := make([]interface{}, rowList.Len()) for j := 0; j < rowList.Len(); j++ { if columnNumber >= rowList.At(j).At(0).Len() { // NODE_LIST->'('->NODE_LIST panic(NewParserError("Column count doesn't match value count")) } node := rowList.At(j).At(0).At(columnNumber) // NODE_LIST->'('->NODE_LIST->Value value := node.execAnalyzeValue() if value == nil { log.Warn("insert is too complex %v", node) return nil } values[j] = asInterface(value) } if len(values) == 1 { pkValues[index] = values[0] } else { pkValues[index] = values } } return pkValues }
// FmtBindVariables returns the map of bind variables as JSON. For // values that are strings or byte slices it only reports their type // and length. func (stats *sqlQueryStats) FmtBindVariables(full bool) string { var out map[string]interface{} if full { out = stats.BindVariables } else { // NOTE(szopa): I am getting rid of potentially large bind // variables. out = make(map[string]interface{}) for k, v := range stats.BindVariables { switch val := v.(type) { case string: out[k] = fmt.Sprintf("string %v", len(val)) case []byte: out[k] = fmt.Sprintf("bytes %v", len(val)) default: out[k] = v } } } b, err := json.Marshal(out) if err != nil { log.Warn("could not marshal %q", stats.BindVariables) return "" } return string(b) }
func (vtc *VtConn) handleErr(err error) (int, error) { now := time.Now() if vtc.timeFailed.IsZero() { vtc.timeFailed = now } else if now.Sub(vtc.timeFailed) > vtc.recoveryTimeout() { vtc.Close() return ErrTypeFatal, fmt.Errorf("vt: max recovery time exceeded: %v", err) } errType := ErrTypeApp if tabletErr, ok := err.(TabletError); ok { msg := strings.ToLower(tabletErr.err.Error()) if strings.HasPrefix(msg, "fatal") { errType = ErrTypeFatal } else if strings.HasPrefix(msg, "retry") { errType = ErrTypeRetry } } else if netErr, ok := err.(net.Error); ok && netErr.Temporary() { errType = ErrTypeRetry } if errType == ErrTypeRetry && vtc.TransactionId != 0 { errType = ErrTypeApp err = fmt.Errorf("vt: cannot retry within a transaction: %v", err) time.Sleep(vtc.reconnectDelay) vtc.Close() dialErr := vtc.dial() log.Warn("vt: redial error %v", dialErr) } return errType, err }
func (vtc *VtConn) Exec(query string, bindVars map[string]interface{}) (db.Result, error) { attempt := 0 for { result, err := vtc.Conn.Exec(query, bindVars) if err == nil { vtc.timeFailed = zeroTime return result, nil } errType, err := vtc.handleErr(err) if errType != ErrTypeRetry { return nil, err } for { attempt++ if attempt > vtc.maxAttempts { return nil, fmt.Errorf("vt: max recovery attempts exceeded: %v", err) } vtc.Close() time.Sleep(vtc.reconnectDelay) if err := vtc.dial(); err == nil { break } log.Warn("vt: error dialing on exec %v", vtc.Conn.dbi.Host) } } panic("unreachable") }
func (node *Node) getInsertColumnValue(tableInfo *schema.Table, rowList *Node) (rowColumns []map[string]interface{}) { rowColumns = make([]map[string]interface{}, len(node.Sub)) // pkIndex := tableInfo.Indexes[0] for i, column := range node.Sub { // index := pkIndex.FindColumn(string(column.Value)) // if index != -1 { // continue // } values := make([]interface{}, rowList.Len()) for j := 0; j < rowList.Len(); j++ { node := rowList.At(j).At(0).At(i) // NODE_LIST->'('->NODE_LIST->Value value := node.execAnalyzeValue() if value == nil { log.Warn("insert is too complex %v", node) return nil } values[j] = asInterface(value) } if len(values) == 1 { rowColumns[i] = map[string]interface{}{"name": string(column.Value), "value": values[0]} } else { fmt.Println("ValuesLen: ", len(values)) rowColumns[i] = map[string]interface{}{"name": string(column.Value), "value": values} } } return rowColumns }
func (vtc *VtConn) Begin() (db.Tx, error) { attempt := 0 for { tx, err := vtc.Conn.Begin() if err == nil { vtc.timeFailed = zeroTime return tx, nil } errType, err := vtc.handleErr(err) if errType != ErrTypeRetry { return nil, err } for { attempt++ if attempt > vtc.maxAttempts { return nil, fmt.Errorf("vt: max recovery attempts exceeded: %v", err) } vtc.Close() time.Sleep(vtc.reconnectDelay) if err := vtc.dial(); err == nil { break } log.Warn("vt: error dialing on begin %v", vtc.Conn.dbi.Host) } } panic("unreachable") }
func (node *Node) execAnalyzeDelete(getTable TableGetter) (plan *ExecPlan) { // Default plan plan = &ExecPlan{PlanId: PLAN_PASS_DML, FullQuery: node.GenerateFullQuery()} tableName := string(node.At(DELETE_TABLE_OFFSET).Value) tableInfo := plan.setTableInfo(tableName, getTable) if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name != "PRIMARY" { log.Warn("no primary key for table %s", tableName) plan.Reason = REASON_TABLE_NOINDEX return plan } plan.PlanId = PLAN_DML_SUBQUERY plan.OuterQuery = node.GenerateDeleteOuterQuery(tableInfo.Indexes[0]) plan.Subquery = node.GenerateDeleteSubquery(tableInfo) conditions := node.At(DELETE_WHERE_OFFSET).execAnalyzeWhere() if conditions == nil { plan.Reason = REASON_WHERE return plan } if pkValues := getPKValues(conditions, tableInfo.Indexes[0]); pkValues != nil { plan.PlanId = PLAN_DML_PK plan.OuterQuery = plan.FullQuery plan.PKValues = pkValues return plan } return plan }
func (si *SchemaInfo) GetPlan(logStats *sqlQueryStats, sql string) (plan *ExecPlan) { log.Warn("plan sql %v", sql) si.mu.Lock() defer si.mu.Unlock() if plan := si.getQuery(sql); plan != nil { return plan } var tableInfo *schema.Table GetTable := func(tableName string) (table *schema.Table, ok bool) { tableInfo, ok = si.tables[tableName] if !ok { return nil, false } return tableInfo, true } splan, err := sqlparser.ExecParse(sql, GetTable) if err != nil { log.Info("parse error %v", err.Error()) panic(NewTabletError(FAIL, "%s", err)) } plan = &ExecPlan{ExecPlan: splan, Table: tableInfo} if plan.PlanId.IsSelect() { fields := make([]eproto.Field, len(plan.ColumnNumbers)) for i, cIdx := range plan.ColumnNumbers { column := si.tables[plan.TableName].Columns[cIdx] fields[i] = eproto.Field{column.Name, int64(column.Category)} } plan.Fields = fields } else if plan.PlanId == sqlparser.PLAN_DDL || plan.PlanId == sqlparser.PLAN_SET { return plan } si.queries.Set(sql, plan) return plan }
func RegisterQueryService() { if SqlQueryRpcService != nil { log.Warn("RPC service already up %v", SqlQueryRpcService) return } SqlQueryRpcService = NewSqlQuery(qsConfig) proto.RegisterAuthenticated(SqlQueryRpcService) http.HandleFunc("/debug/health", healthCheck) }
// Authenticate checks if the client proof is correct. func (a *AuthenticatorCRAMMD5) Authenticate(context *proto.Context, req *AuthenticateRequest, reply *AuthenticateReply) error { username := strings.SplitN(req.Proof, " ", 2)[0] secrets, ok := a.Credentials[username] if !ok { log.Warn("failed authentication attempt: wrong user: %#v", username) return AuthenticationFailed } if !req.state.challengeIssued { log.Warn("failed authentication attempt: challenge was not issued") return AuthenticationFailed } for _, secret := range secrets { if expected := CRAMMD5GetExpected(username, secret, req.state.challenge); expected == req.Proof { context.Username = username return nil } } log.Warn("failed authentication attempt: wrong proof") return AuthenticationFailed }
func validateKey(tableInfo *schema.Table, key string) (newKey string) { if key == "" { // TODO: Verify auto-increment table return } pieces := strings.Split(key, ".") if len(pieces) != len(tableInfo.PKColumns) { // TODO: Verify auto-increment table return "" } pkValues := make([]sqltypes.Value, len(tableInfo.PKColumns)) for i, piece := range pieces { if piece[0] == '\'' { s, err := base64.StdEncoding.DecodeString(piece[1 : len(piece)-1]) if err != nil { log.Warn("Error decoding key %s for table %s: %v", key, tableInfo.Name, err) errorStats.Add("Mismatch", 1) return } pkValues[i] = sqltypes.MakeString(s) } else if piece == "null" { // TODO: Verify auto-increment table return "" } else { n, err := sqltypes.BuildNumeric(piece) if err != nil { log.Warn("Error decoding key %s for table %s: %v", key, tableInfo.Name, err) errorStats.Add("Mismatch", 1) return } pkValues[i] = n } } if newKey = buildKey(pkValues); newKey != key { log.Warn("Error: Key mismatch, received: %s, computed: %s", key, newKey) errorStats.Add("Mismatch", 1) } return newKey }
func ExecParse(sql string, getTable TableGetter) (plan *ExecPlan, err error) { defer handleError(&err) tree, err := Parse(sql) if err != nil { return nil, err } plan = tree.execAnalyzeSql(getTable) if plan.PlanId == PLAN_PASS_DML { log.Warn("PASS_DML: %s", sql) } return plan, nil }
func (node *Node) execAnalyzeUpdateExpressions(pkIndex *schema.Index) (updateColumns []Pair, status updateStatus) { updateColumns = make([]Pair, node.Len()) for i := 0; i < node.Len(); i++ { columnName := string(node.At(i).At(0).Value) index := pkIndex.FindColumn(columnName) if index != -1 { // update pkvalues not supported return nil, updatePkValue } value := node.At(i).At(1).execAnalyzeValue() if value == nil { log.Warn("unsupported update expression", node.At(i).At(0)) return nil, updateUnsupport } log.Info(string(value.Value)) updateColumns[i] = Pair{columnName, string(value.Value)} } return updateColumns, updateOk }
func (node *Node) execAnalyzeUpdateExpressions(pkIndex *schema.Index) (pkValues []interface{}, ok bool) { for i := 0; i < node.Len(); i++ { columnName := string(node.At(i).At(0).Value) index := pkIndex.FindColumn(columnName) if index == -1 { continue } value := node.At(i).At(1).execAnalyzeValue() if value == nil { log.Warn("expression is too complex %v", node.At(i).At(0)) return nil, false } if pkValues == nil { pkValues = make([]interface{}, len(pkIndex.Columns)) } pkValues[index] = asInterface(value) } return pkValues, true }
func (node *Node) getInsertColumnValue(tableInfo *schema.Table, rowList *Node) (rowColumns []map[string]sqltypes.Value) { log.Warn("node.Sub is ", len(node.Sub)) rowLen := rowList.Len() rowColumns = make([]map[string]sqltypes.Value, rowLen) for i := 0; i < rowList.Len(); i++ { rowColumns[i] = make(map[string]sqltypes.Value) } if len(node.Sub) > 0 { // pkIndex := tableInfo.Indexes[0] for i, column := range node.Sub { // index := pkIndex.FindColumn(string(column.Value)) // if index != -1 { // continue // } values := make([]sqltypes.Value, rowList.Len()) for j := 0; j < rowList.Len(); j++ { node := rowList.At(j).At(0).At(i) // NODE_LIST->'('->NODE_LIST->Value value := node.execAnalyzeValue() if value == nil { log.Warn("insert is too complex %v", node) return nil } // values[j] = asInterface(value) values[j] = asValue(value) rowColumns[j][string(column.Value)] = values[j] } // if len(values) == 1 { // rowColumns = map[string]sqltypes.Value{string(column.Value): values[0]} // } else { // fmt.Println("ValuesLen: ", len(values)) // // rowColumns[i] = map[string]sqltypes.Value{"name": string(column.Value), "value": values} // } } } if len(node.Sub) == 0 { tableColumns := tableInfo.Columns log.Info("tableColumns %v", tableColumns) for i := 0; i < len(tableColumns); i++ { column := tableColumns[i] values := make([]sqltypes.Value, rowList.Len()) log.Info("values len %v", len(values)) for j := 0; j < rowList.Len(); j++ { node := rowList.At(j).At(0).At(i) // NODE_LIST->'('->NODE_LIST->Value value := node.execAnalyzeValue() log.Info("value is %v", value) if value == nil { // log.Warningf("insert is too complex %v", node) return nil } // values[j] = asInterface(value) values[j] = asValue(value) rowColumns[j][column.Name] = values[j] log.Info("values[j] %v", values[j]) } } } log.Info("rowColumns is ", rowColumns) return rowColumns }