func (self *Table) AddColumn(name string, columnType string, defval sqltypes.Value, extra string, isPk, nullable bool) { index := len(self.Columns) self.Columns = append(self.Columns, TableColumn{Name: name}) self.Columns[index].IsPk = isPk self.Columns[index].Nullable = nullable if strings.Contains(columnType, "int") { self.Columns[index].Category = CAT_NUMBER } else if strings.HasPrefix(columnType, "varbinary") { self.Columns[index].Category = CAT_VARBINARY } else if strings.HasPrefix(columnType, "fractional") { self.Columns[index].Category = CAT_FRACTIONAL } else { self.Columns[index].Category = CAT_OTHER } if extra == "auto_increment" { self.Columns[index].IsAuto = true self.Columns[index].NextId = 0 // Ignore default value, if any return } else if extra == "uuid" { self.Columns[index].IsUUID = true } if defval.IsNull() { return } if self.Columns[index].Category == CAT_NUMBER { self.Columns[index].Default = sqltypes.MakeNumeric(defval.Raw()) } else { self.Columns[index].Default = sqltypes.MakeString(defval.Raw()) } }
func buildValue(bytes []byte, fieldType uint32) sqltypes.Value { switch fieldType { case schema.TYPE_FRACTIONAL: return sqltypes.MakeFractional(bytes) case schema.TYPE_NUMERIC: return sqltypes.MakeNumeric(bytes) } return sqltypes.MakeString(bytes) }
func buildValue(bytes []byte, fieldType int64) sqltypes.Value { switch fieldType { case schema.CAT_FRACTIONAL: return sqltypes.MakeFractional(bytes) case schema.CAT_NUMBER: return sqltypes.MakeNumeric(bytes) } return sqltypes.MakeString(bytes) }
func asValue(node *Node) sqltypes.Value { switch node.Type { case STRING: return sqltypes.MakeString(node.Value) case NUMBER: n, err := sqltypes.BuildNumeric(string(node.Value)) if err != nil { panic(NewParserError("Type mismatch: %s", err)) } return n } panic(NewParserError("Unexpected node %v", node)) }
func validateKey(tableInfo *schema.Table, key string) (newKey string) { if key == "" { // TODO: Verify auto-increment table return } pieces := strings.Split(key, ".") if len(pieces) != len(tableInfo.PKColumns) { // TODO: Verify auto-increment table return "" } pkValues := make([]sqltypes.Value, len(tableInfo.PKColumns)) for i, piece := range pieces { if piece[0] == '\'' { s, err := base64.StdEncoding.DecodeString(piece[1 : len(piece)-1]) if err != nil { log.Warn("Error decoding key %s for table %s: %v", key, tableInfo.Name, err) errorStats.Add("Mismatch", 1) return } pkValues[i] = sqltypes.MakeString(s) } else if piece == "null" { // TODO: Verify auto-increment table return "" } else { n, err := sqltypes.BuildNumeric(piece) if err != nil { log.Warn("Error decoding key %s for table %s: %v", key, tableInfo.Name, err) errorStats.Add("Mismatch", 1) return } pkValues[i] = n } } if newKey = buildKey(pkValues); newKey != key { log.Warn("Error: Key mismatch, received: %s, computed: %s", key, newKey) errorStats.Add("Mismatch", 1) } return newKey }
func decodeRowBson(buf *bytes.Buffer, kind byte) []sqltypes.Value { switch kind { case bson.Array: // valid case bson.Null: return nil default: panic(bson.NewBsonError("Unexpected data type %v for Query.Row", kind)) } bson.Next(buf, 4) row := make([]sqltypes.Value, 0, 8) kind = bson.NextByte(buf) for i := 0; kind != bson.EOO; i++ { bson.ExpectIndex(buf, i) if kind != bson.Null { row = append(row, sqltypes.MakeString(bson.DecodeBytes(buf, kind))) } else { row = append(row, sqltypes.Value{}) } kind = bson.NextByte(buf) } return row }
func FormatNode(buf *TrackedBuffer, node *Node) { switch node.Type { case SELECT: buf.Fprintf("select %v%v%v from %v%v%v%v%v%v%v", node.At(SELECT_COMMENT_OFFSET), node.At(SELECT_DISTINCT_OFFSET), node.At(SELECT_EXPR_OFFSET), node.At(SELECT_FROM_OFFSET), node.At(SELECT_WHERE_OFFSET), node.At(SELECT_GROUP_OFFSET), node.At(SELECT_HAVING_OFFSET), node.At(SELECT_ORDER_OFFSET), node.At(SELECT_LIMIT_OFFSET), node.At(SELECT_FOR_UPDATE_OFFSET), ) case INSERT: buf.Fprintf("insert %vinto %v%v %v%v", node.At(INSERT_COMMENT_OFFSET), node.At(INSERT_TABLE_OFFSET), node.At(INSERT_COLUMN_LIST_OFFSET), node.At(INSERT_VALUES_OFFSET), node.At(INSERT_ON_DUP_OFFSET), ) case UPDATE: buf.Fprintf("update %v%v set %v%v%v%v", node.At(UPDATE_COMMENT_OFFSET), node.At(UPDATE_TABLE_OFFSET), node.At(UPDATE_LIST_OFFSET), node.At(UPDATE_WHERE_OFFSET), node.At(UPDATE_ORDER_OFFSET), node.At(UPDATE_LIMIT_OFFSET), ) case DELETE: buf.Fprintf("delete %vfrom %v%v%v%v", node.At(DELETE_COMMENT_OFFSET), node.At(DELETE_TABLE_OFFSET), node.At(DELETE_WHERE_OFFSET), node.At(DELETE_ORDER_OFFSET), node.At(DELETE_LIMIT_OFFSET), ) case SET: buf.Fprintf("set %v%v", node.At(0), node.At(1)) case CREATE, ALTER, DROP: buf.Fprintf("%s table %v", node.Value, node.At(0)) case RENAME: buf.Fprintf("%s table %v %v", node.Value, node.At(0), node.At(1)) case TABLE_EXPR: buf.Fprintf("%v", node.At(0)) if node.At(1).Len() == 1 { buf.Fprintf(" as %v", node.At(1).At(0)) } buf.Fprintf("%v", node.At(2)) case USE, FORCE: if node.Len() != 0 { buf.Fprintf(" %s index %v", node.Value, node.At(0)) } case WHERE, HAVING: if node.Len() > 0 { buf.Fprintf(" %s %v", node.Value, node.At(0)) } case ORDER, GROUP: if node.Len() > 0 { buf.Fprintf(" %s by %v", node.Value, node.At(0)) } case LIMIT: if node.Len() > 0 { buf.Fprintf(" %s %v", node.Value, node.At(0)) if node.Len() > 1 { buf.Fprintf(", %v", node.At(1)) } } case COLUMN_LIST: if node.Len() > 0 { buf.Fprintf("(%v", node.At(0)) for i := 1; i < node.Len(); i++ { buf.Fprintf(", %v", node.At(i)) } buf.WriteByte(')') } case NODE_LIST: if node.Len() > 0 { buf.Fprintf("%v", node.At(0)) for i := 1; i < node.Len(); i++ { buf.Fprintf(", %v", node.At(i)) } } case COMMENT_LIST: if node.Len() > 0 { for i := 0; i < node.Len(); i++ { buf.Fprintf("%v", node.At(i)) } } case WHEN_LIST: buf.Fprintf("%v", node.At(0)) for i := 1; i < node.Len(); i++ { buf.Fprintf(" %v", node.At(i)) } case JOIN, STRAIGHT_JOIN, LEFT, RIGHT, CROSS, NATURAL: buf.Fprintf("%v %s %v", node.At(0), node.Value, node.At(1)) if node.Len() > 2 { buf.Fprintf(" on %v", node.At(2)) } case DUPLICATE: if node.Len() != 0 { buf.Fprintf(" on duplicate key update %v", node.At(0)) } case NUMBER, NULL, SELECT_STAR, NO_DISTINCT, COMMENT, FOR_UPDATE, NOT_FOR_UPDATE, TABLE: buf.Fprintf("%s", node.Value) case ID: if _, ok := keywords[string(node.Value)]; ok { buf.Fprintf("`%s`", node.Value) } else { buf.Fprintf("%s", node.Value) } case VALUE_ARG: buf.WriteArg(string(node.Value[1:])) case STRING: s := sqltypes.MakeString(node.Value) s.EncodeSql(buf) case '+', '-', '*', '/', '%', '&', '|', '^', '.': buf.Fprintf("%v%s%v", node.At(0), node.Value, node.At(1)) case CASE_WHEN: buf.Fprintf("case %v end", node.At(0)) case CASE: buf.Fprintf("case %v %v end", node.At(0), node.At(1)) case WHEN: buf.Fprintf("when %v then %v", node.At(0), node.At(1)) case ELSE: buf.Fprintf("else %v", node.At(0)) case '=', '>', '<', GE, LE, NE, NULL_SAFE_EQUAL, AS, AND, OR, UNION, UNION_ALL, MINUS, EXCEPT, INTERSECT, LIKE, NOT_LIKE, IN, NOT_IN: buf.Fprintf("%v %s %v", node.At(0), node.Value, node.At(1)) case '(': buf.Fprintf("(%v)", node.At(0)) case EXISTS: buf.Fprintf("%s (%v)", node.Value, node.At(0)) case FUNCTION: if node.Len() == 2 { // DISTINCT buf.Fprintf("%s(%v%v)", node.Value, node.At(0), node.At(1)) } else { buf.Fprintf("%s(%v)", node.Value, node.At(0)) } case UPLUS, UMINUS, '~': buf.Fprintf("%s%v", node.Value, node.At(0)) case NOT, VALUES: buf.Fprintf("%s %v", node.Value, node.At(0)) case ASC, DESC, IS_NULL, IS_NOT_NULL: buf.Fprintf("%v %s", node.At(0), node.Value) case BETWEEN, NOT_BETWEEN: buf.Fprintf("%v %s %v and %v", node.At(0), node.Value, node.At(1), node.At(2)) case DISTINCT: buf.Fprintf("%s ", node.Value) default: buf.Fprintf("Unknown: %s", node.Value) } }
func testTables() *schema.Table { // t := new(schema.Table) // t.Name = "user" // t.AddColumn("id", "int(11)", sqltypes.NULL, "", true) // t.AddColumn("name", "varchar(255)", sqltypes.NULL, "", false) // index := t.AddIndex("PRIMARY") // index.AddColumn("id", 0) // t.PKColumns = []int{0} var schem map[string]*schema.Table schem = make(map[string]*schema.Table) var ( SQLZERO = sqltypes.MakeString([]byte("0")) ) a := schema.NewTable("a") a.AddColumn("id", "int", SQLZERO, "", true, false) a.AddColumn("eid", "int", SQLZERO, "", false, false) a.AddColumn("name", "varchar(10)", SQLZERO, "", false, true) a.AddColumn("foo", "varchar(10)", SQLZERO, "", false, true) acolumns := []string{"id", "eid", "name", "foo"} a.Indexes = append(a.Indexes, &schema.Index{Name: "PRIMARY", Columns: []string{"id"}, Cardinality: []uint64{1}, DataColumns: acolumns}) a.Indexes = append(a.Indexes, &schema.Index{Name: "a_name", Columns: []string{"id", "name"}, Cardinality: []uint64{1, 1}, DataColumns: a.Indexes[0].Columns}) a.Indexes = append(a.Indexes, &schema.Index{Name: "b_name", Columns: []string{"name"}, Cardinality: []uint64{3}, DataColumns: a.Indexes[0].Columns}) a.Indexes = append(a.Indexes, &schema.Index{Name: "c_name", Columns: []string{"name"}, Cardinality: []uint64{2}, DataColumns: a.Indexes[0].Columns}) a.PKColumns = append(a.PKColumns, 0) a.CacheType = schema.CACHE_RW schem["a"] = a // b := schema.NewTable("b") // b.AddColumn("eid", "int", SQLZERO, "") // b.AddColumn("id", "int", SQLZERO, "") // bcolumns := []string{"eid", "id"} // b.Indexes = append(a.Indexes, &schema.Index{Name: "PRIMARY", Columns: []string{"eid", "id"}, Cardinality: []uint64{1, 1}, DataColumns: bcolumns}) // b.PKColumns = append(a.PKColumns, 0, 1) // b.CacheType = schema.CACHE_NONE // schem["b"] = b // c := schema.NewTable("c") // c.AddColumn("eid", "int", SQLZERO, "") // c.AddColumn("id", "int", SQLZERO, "") // c.CacheType = schema.CACHE_NONE // schem["c"] = c // d := schema.NewTable("d") // d.AddColumn("name", "varbinary(10)", SQLZERO, "") // d.AddColumn("id", "int", SQLZERO, "") // d.AddColumn("foo", "varchar(10)", SQLZERO, "") // d.AddColumn("bar", "varchar(10)", SQLZERO, "") // dcolumns := []string{"name"} // d.Indexes = append(d.Indexes, &schema.Index{Name: "PRIMARY", Columns: []string{"name"}, Cardinality: []uint64{1}, DataColumns: dcolumns}) // d.Indexes = append(d.Indexes, &schema.Index{Name: "d_id", Columns: []string{"id"}, Cardinality: []uint64{1}, DataColumns: d.Indexes[0].Columns}) // d.Indexes = append(d.Indexes, &schema.Index{Name: "d_bar_never", Columns: []string{"bar", "foo"}, Cardinality: []uint64{2, 1}, DataColumns: d.Indexes[0].Columns}) // d.Indexes = append(d.Indexes, &schema.Index{Name: "d_bar", Columns: []string{"bar", "foo"}, Cardinality: []uint64{3, 1}, DataColumns: d.Indexes[0].Columns}) // d.PKColumns = append(d.PKColumns, 0) // d.CacheType = schema.CACHE_RW // schem["d"] = d // e := schema.NewTable("e") // e.AddColumn("eid", "int", SQLZERO, "") // e.AddColumn("id", "int", SQLZERO, "") // ecolumns := []string{"eid", "id"} // e.Indexes = append(e.Indexes, &schema.Index{Name: "PRIMARY", Columns: []string{"eid", "id"}, Cardinality: []uint64{1, 1}, DataColumns: ecolumns}) // e.PKColumns = append(a.PKColumns, 0, 1) // e.CacheType = schema.CACHE_W // schem["e"] = e return a }
"github.com/senarukana/rationaldb/sqltypes" "github.com/youtube/vitess/go/testfiles" "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/schema" ) func TestGen(t *testing.T) { _, err := Parse("select :1 from a where a in (:1)") if err != nil { t.Error(err) } } var ( SQLZERO = sqltypes.MakeString([]byte("0")) ) var schem map[string]*schema.Table func initTables() { schem = make(map[string]*schema.Table) a := schema.NewTable("a") a.AddColumn("eid", "int", SQLZERO, "") a.AddColumn("id", "int", SQLZERO, "") a.AddColumn("name", "varchar(10)", SQLZERO, "") a.AddColumn("foo", "varchar(10)", SQLZERO, "") acolumns := []string{"eid", "id", "name", "foo"} a.Indexes = append(a.Indexes, &schema.Index{Name: "PRIMARY", Columns: []string{"eid", "id"}, Cardinality: []uint64{1, 1}, DataColumns: acolumns}) a.Indexes = append(a.Indexes, &schema.Index{Name: "a_name", Columns: []string{"eid", "name"}, Cardinality: []uint64{1, 1}, DataColumns: a.Indexes[0].Columns})