func benchmarkWriteArray(b *testing.B, format formatCode) { i1 := parser.NewDInt(1234) i2 := parser.NewDInt(1234) i3 := parser.NewDInt(1234) a := &parser.DArray{i1, i2, i3} benchmarkWriteType(b, a, format) }
// Events is an endpoint that returns the latest event log entries, with the following // optional URL parameters: // // type=STRING returns events with this type (e.g. "create_table") // targetID=INT returns events for that have this targetID func (s *adminServer) Events( ctx context.Context, req *serverpb.EventsRequest, ) (*serverpb.EventsResponse, error) { args := sql.SessionArgs{User: s.getUser(req)} session := s.NewSessionForRPC(ctx, args) defer session.Finish(s.server.sqlExecutor) // Execute the query. q := makeSQLQuery() q.Append("SELECT timestamp, eventType, targetID, reportingID, info, uniqueID ") q.Append("FROM system.eventlog ") q.Append("WHERE true ") // This simplifies the WHERE clause logic below. if len(req.Type) > 0 { q.Append("AND eventType = $ ", parser.NewDString(req.Type)) } if req.TargetId > 0 { q.Append("AND targetID = $ ", parser.NewDInt(parser.DInt(req.TargetId))) } q.Append("ORDER BY timestamp DESC ") q.Append("LIMIT $", parser.NewDInt(parser.DInt(apiEventLimit))) if len(q.Errors()) > 0 { return nil, s.serverErrors(q.Errors()) } r := s.server.sqlExecutor.ExecuteStatements(session, q.String(), q.QueryArguments()) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return nil, s.serverError(err) } // Marshal response. var resp serverpb.EventsResponse scanner := makeResultScanner(r.ResultList[0].Columns) for i, nRows := 0, r.ResultList[0].Rows.Len(); i < nRows; i++ { row := r.ResultList[0].Rows.At(i) var event serverpb.EventsResponse_Event var ts time.Time if err := scanner.ScanIndex(row, 0, &ts); err != nil { return nil, err } event.Timestamp = serverpb.EventsResponse_Event_Timestamp{Sec: ts.Unix(), Nsec: uint32(ts.Nanosecond())} if err := scanner.ScanIndex(row, 1, &event.EventType); err != nil { return nil, err } if err := scanner.ScanIndex(row, 2, &event.TargetID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 3, &event.ReportingID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 4, &event.Info); err != nil { return nil, err } if err := scanner.ScanIndex(row, 5, &event.UniqueID); err != nil { return nil, err } resp.Events = append(resp.Events, event) } return &resp, nil }
func checkEquivExpr(a, b parser.TypedExpr, sel *selectNode) error { // The expressions above only use the values 1 and 2. Verify that the // simplified expressions evaluate to the same value as the original // expression for interesting values. for _, v := range []parser.Datum{ parser.NewDInt(0), parser.NewDInt(1), parser.NewDInt(2), parser.NewDInt(3), parser.DNull, } { for i := range sel.curSourceRow { sel.curSourceRow[i] = v } ctx := &parser.EvalContext{} da, err := a.Eval(ctx) if err != nil { return fmt.Errorf("%s: %v", a, err) } db, err := b.Eval(ctx) if err != nil { return fmt.Errorf("%s: %v", b, err) } // This is tricky: we don't require the expressions to produce identical // results, but to either both return true or both return not true (either // false or NULL). if (da == parser.DBoolTrue) != (db == parser.DBoolTrue) { return fmt.Errorf("%s: %s: expected %s, but found %s", a, v, da, db) } } return nil }
// golangFillQueryArguments populates the placeholder map with // types and values from an array of Go values. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func golangFillQueryArguments(pinfo *parser.PlaceholderInfo, args []interface{}) { pinfo.Clear() for i, arg := range args { k := fmt.Sprint(i + 1) if arg == nil { pinfo.SetValue(k, parser.DNull) continue } // A type switch to handle a few explicit types with special semantics: // - Datums are passed along as is. // - Time datatypes get special representation in the database. var d parser.Datum switch t := arg.(type) { case parser.Datum: d = t case time.Time: d = parser.MakeDTimestamp(t, time.Microsecond) case time.Duration: d = &parser.DInterval{Duration: duration.Duration{Nanos: t.Nanoseconds()}} case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) d = dd } if d == nil { // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: d = parser.MakeDBool(parser.DBool(val.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: d = parser.NewDInt(parser.DInt(val.Int())) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: d = parser.NewDInt(parser.DInt(val.Uint())) case reflect.Float32, reflect.Float64: d = parser.NewDFloat(parser.DFloat(val.Float())) case reflect.String: d = parser.NewDString(val.String()) case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { d = parser.NewDBytes(parser.DBytes(val.Bytes())) } } if d == nil { panic(fmt.Sprintf("unexpected type %T", arg)) } } pinfo.SetValue(k, d) } }
func benchmarkWriteTuple(b *testing.B, format formatCode) { i := parser.NewDInt(1234) f := parser.NewDFloat(12.34) s := parser.NewDString("testing") t := &parser.DTuple{i, f, s} benchmarkWriteType(b, t, format) }
// queryZone retrieves the specific ZoneConfig associated with the supplied ID, // if it exists. func (s *adminServer) queryZone( session *sql.Session, id sqlbase.ID, ) (config.ZoneConfig, bool, error) { const query = `SELECT config FROM system.zones WHERE id = $1` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return config.ZoneConfig{}, false, err } result := r.ResultList[0] if result.Rows.Len() == 0 { return config.ZoneConfig{}, false, nil } var zoneBytes []byte scanner := resultScanner{} err := scanner.ScanIndex(result.Rows.At(0), 0, &zoneBytes) if err != nil { return config.ZoneConfig{}, false, err } var zone config.ZoneConfig if err := zone.Unmarshal(zoneBytes); err != nil { return config.ZoneConfig{}, false, err } return zone, true, nil }
// populateExplain invokes explain() with a makeRow method // which populates a valuesNode. func (e *explainer) populateExplain(v *valuesNode, plan planNode) error { e.makeRow = func(level int, name, field, description string, plan planNode) { if e.err != nil { return } row := parser.DTuple{ parser.NewDInt(parser.DInt(level)), parser.NewDString(name), parser.NewDString(field), parser.NewDString(description), } if e.showMetadata { if plan != nil { row = append(row, parser.NewDString(formatColumns(plan.Columns(), e.showTypes))) row = append(row, parser.NewDString(plan.Ordering().AsString(plan.Columns()))) } else { row = append(row, emptyString, emptyString) } } if _, err := v.rows.AddRow(row); err != nil { e.err = err } } e.err = nil e.explain(plan) return e.err }
// queryNamespaceID queries for the ID of the namespace with the given name and // parent ID. func (s *adminServer) queryNamespaceID( session *sql.Session, parentID sqlbase.ID, name string, ) (sqlbase.ID, error) { const query = `SELECT id FROM system.namespace WHERE parentID = $1 AND name = $2` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(parentID))) params.SetValue(`2`, parser.NewDString(name)) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return 0, err } result := r.ResultList[0] if result.Rows.Len() == 0 { return 0, errors.Errorf("namespace %s with ParentID %d not found", name, parentID) } var id int64 scanner := resultScanner{} err := scanner.ScanIndex(result.Rows.At(0), 0, &id) if err != nil { return 0, err } return sqlbase.ID(id), nil }
func benchmarkWriteArray(b *testing.B, format formatCode) { a := parser.NewDArray(parser.TypeInt) for i := 0; i < 3; i++ { if err := a.Append(parser.NewDInt(parser.DInt(1234))); err != nil { b.Fatal(err) } } benchmarkWriteType(b, a, format) }
func (o *ordinalityNode) Next() (bool, error) { hasNext, err := o.source.Next() if !hasNext || err != nil { return hasNext, err } copy(o.row, o.source.Values()) // o.row was allocated one spot larger than o.source.Values(). // Store the ordinality value there. o.row[len(o.row)-1] = parser.NewDInt(parser.DInt(o.curCnt)) o.curCnt++ return true, nil }
// colIDArrayToDatum returns an int[] containing the ColumnIDs, or NULL if there // are no ColumnIDs. func colIDArrayToDatum(arr []sqlbase.ColumnID) (parser.Datum, error) { if len(arr) == 0 { return parser.DNull, nil } d := parser.NewDArray(parser.TypeInt) for _, val := range arr { if err := d.Append(parser.NewDInt(parser.DInt(val))); err != nil { return nil, err } } return d, nil }
func TestRowContainer(t *testing.T) { defer leaktest.AfterTest(t)() for _, numCols := range []int{1, 2, 3, 5, 10, 15} { for _, numRows := range []int{5, 10, 100} { for _, numPops := range []int{0, 1, 2, numRows / 3, numRows / 2} { resCol := make(ResultColumns, numCols) for i := range resCol { resCol[i] = ResultColumn{Typ: parser.TypeInt} } m := mon.MakeUnlimitedMonitor(context.Background(), "test", nil, nil, math.MaxInt64) rc := NewRowContainer(m.MakeBoundAccount(context.Background()), resCol, 0) row := make(parser.DTuple, numCols) for i := 0; i < numRows; i++ { for j := range row { row[j] = parser.NewDInt(parser.DInt(i*numCols + j)) } if err := rc.AddRow(row); err != nil { t.Fatal(err) } } for i := 0; i < numPops; i++ { rc.PopFirst() } // Given that we just deleted numPops rows, we have numRows - // numPops rows remaining. if rc.Len() != numRows-numPops { t.Fatalf("invalid length, expected %d got %d", numRows-numPops, rc.Len()) } // what was previously rc.At(i + numPops) is now rc.At(i). for i := 0; i < rc.Len(); i++ { row := rc.At(i) for j := range row { dint, ok := row[j].(*parser.DInt) if !ok || int(*dint) != (i+numPops)*numCols+j { t.Fatalf("invalid value %+v on row %d, col %d", row[j], i+numPops, j) } } } } } } }
// RandDatum generates a random Datum of the given type. // If null is true, the datum can be DNull. func RandDatum(rng *rand.Rand, typ ColumnType_Kind, null bool) parser.Datum { if null && rng.Intn(10) == 0 { return parser.DNull } switch typ { case ColumnType_BOOL: return parser.MakeDBool(rng.Intn(2) == 1) case ColumnType_INT: return parser.NewDInt(parser.DInt(rng.Int63())) case ColumnType_FLOAT: return parser.NewDFloat(parser.DFloat(rng.NormFloat64())) case ColumnType_DECIMAL: d := &parser.DDecimal{} d.Dec.SetScale(inf.Scale(rng.Intn(40) - 20)) d.Dec.SetUnscaled(rng.Int63()) return d case ColumnType_DATE: return parser.NewDDate(parser.DDate(rng.Intn(10000))) case ColumnType_TIMESTAMP: return &parser.DTimestamp{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} case ColumnType_INTERVAL: return &parser.DInterval{Duration: duration.Duration{Months: rng.Int63n(1000), Days: rng.Int63n(1000), Nanos: rng.Int63n(1000000), }} case ColumnType_STRING: // Generate a random ASCII string. p := make([]byte, rng.Intn(10)) for i := range p { p[i] = byte(1 + rng.Intn(127)) } return parser.NewDString(string(p)) case ColumnType_BYTES: p := make([]byte, rng.Intn(10)) _, _ = rng.Read(p) return parser.NewDBytes(parser.DBytes(p)) case ColumnType_TIMESTAMPTZ: return &parser.DTimestampTZ{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} case ColumnType_INT_ARRAY: // TODO(cuongdo): we don't support for persistence of arrays yet return parser.DNull default: panic(fmt.Sprintf("invalid type %s", typ)) } }
// MakePrimaryIndexKey creates a key prefix that corresponds to a table row // (in the primary index); it is intended for tests. // // The value types must match the primary key columns (or a prefix of them); // supported types are: - Datum // - bool (converts to DBool) // - int (converts to DInt) // - string (converts to DString) func MakePrimaryIndexKey(desc *TableDescriptor, vals ...interface{}) (roachpb.Key, error) { index := &desc.PrimaryIndex if len(vals) > len(index.ColumnIDs) { return nil, errors.Errorf("got %d values, PK has %d columns", len(vals), len(index.ColumnIDs)) } datums := make([]parser.Datum, len(vals)) for i, v := range vals { switch v := v.(type) { case bool: datums[i] = parser.MakeDBool(parser.DBool(v)) case int: datums[i] = parser.NewDInt(parser.DInt(v)) case string: datums[i] = parser.NewDString(v) case parser.Datum: datums[i] = v default: return nil, errors.Errorf("unexpected value type %T", v) } // Check that the value type matches. colID := index.ColumnIDs[i] for _, c := range desc.Columns { if c.ID == colID { if t := DatumTypeToColumnKind(datums[i].ResolvedType()); t != c.Type.Kind { return nil, errors.Errorf("column %d of type %s, got value of type %s", i, c.Type.Kind, t) } break } } } // Create the ColumnID to index in datums slice map needed by // MakeIndexKeyPrefix. colIDToRowIndex := make(map[ColumnID]int) for i := range vals { colIDToRowIndex[index.ColumnIDs[i]] = i } keyPrefix := MakeIndexKeyPrefix(desc, index.ID) key, _, err := EncodeIndexKey(desc, index, colIDToRowIndex, datums, keyPrefix) if err != nil { return nil, err } return roachpb.Key(key), nil }
func (p *planner) validateCheckExpr( exprStr string, tableName parser.TableExpr, tableDesc *sqlbase.TableDescriptor, ) error { expr, err := parser.ParseExprTraditional(exprStr) if err != nil { return err } sel := &parser.SelectClause{ Exprs: sqlbase.ColumnsSelectors(tableDesc.Columns), From: &parser.From{Tables: parser.TableExprs{tableName}}, Where: &parser.Where{Expr: &parser.NotExpr{Expr: expr}}, } lim := &parser.Limit{Count: parser.NewDInt(1)} // This could potentially use a variant of planner.SelectClause that could // use the tableDesc we have, but this is a rare operation and be benefit // would be marginal compared to the work of the actual query, so the added // complexity seems unjustified. rows, err := p.SelectClause(sel, nil, lim, nil, publicColumns) if err != nil { return err } rows, err = p.optimizePlan(rows, allColumns(rows)) if err != nil { return err } if err := p.startPlan(rows); err != nil { return err } next, err := rows.Next() if err != nil { return err } if next { return errors.Errorf("validation of CHECK %q failed on row: %s", expr.String(), labeledRowValues(tableDesc.Columns, rows.Values())) } return nil }
func TestTableReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } testCases := []struct { spec TableReaderSpec expected string }{ { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 OutputColumns: []uint32{0, 1}, }, expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", }, { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, OutputColumns: []uint32{3}, // s HardLimit: 4, }, expected: "[['one'] ['two'] ['four'] ['one-zero']]", }, { spec: TableReaderSpec{ IndexIdx: 1, Reverse: true, Spans: []TableReaderSpan{makeIndexSpan(4, 6)}, Filter: Expression{Expr: "@1 < 3"}, // sum < 8 OutputColumns: []uint32{0, 1}, SoftLimit: 1, }, expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", }, } for _, c := range testCases { ts := c.spec ts.Table = *td flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txnProto: &roachpb.Transaction{}, clientDB: kvDB, } out := &RowBuffer{} tr, err := newTableReader(&flowCtx, &ts, out) if err != nil { t.Fatal(err) } tr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func TestEncDatum(t *testing.T) { defer leaktest.AfterTest(t)() a := &DatumAlloc{} v := EncDatum{} if !v.IsUnset() { t.Errorf("empty EncDatum should be unset") } if _, ok := v.Encoding(); ok { t.Errorf("empty EncDatum has an encoding") } x := DatumToEncDatum(ColumnType{Kind: ColumnType_INT}, parser.NewDInt(5)) if x.IsUnset() { t.Errorf("unset after DatumToEncDatum()") } if x.IsNull() { t.Errorf("null after DatumToEncDatum()") } encoded, err := x.Encode(a, DatumEncoding_ASCENDING_KEY, nil) if err != nil { t.Fatal(err) } y := EncDatumFromEncoded(ColumnType{Kind: ColumnType_INT}, DatumEncoding_ASCENDING_KEY, encoded) if y.IsUnset() { t.Errorf("unset after EncDatumFromEncoded") } if y.IsNull() { t.Errorf("null after EncDatumFromEncoded") } if enc, ok := y.Encoding(); !ok { t.Error("no encoding after EncDatumFromEncoded") } else if enc != DatumEncoding_ASCENDING_KEY { t.Errorf("invalid encoding %d", enc) } err = y.EnsureDecoded(a) if err != nil { t.Fatal(err) } if cmp := y.Datum.Compare(x.Datum); cmp != 0 { t.Errorf("Datums should be equal, cmp = %d", cmp) } enc2, err := y.Encode(a, DatumEncoding_DESCENDING_KEY, nil) if err != nil { t.Fatal(err) } // y's encoding should not change. if enc, ok := y.Encoding(); !ok { t.Error("no encoding") } else if enc != DatumEncoding_ASCENDING_KEY { t.Errorf("invalid encoding %d", enc) } z := EncDatumFromEncoded(ColumnType{Kind: ColumnType_INT}, DatumEncoding_DESCENDING_KEY, enc2) if enc, ok := z.Encoding(); !ok { t.Error("no encoding") } else if enc != DatumEncoding_DESCENDING_KEY { t.Errorf("invalid encoding %d", enc) } if z.IsNull() { t.Errorf("null after EncDatumFromEncoded") } err = z.EnsureDecoded(a) if err != nil { t.Fatal(err) } if cmp := y.Datum.Compare(z.Datum); cmp != 0 { t.Errorf("Datums should be equal, cmp = %d", cmp) } y.UnsetDatum() if !y.IsUnset() { t.Error("not unset after UnsetDatum()") } }
func TestSorter(t *testing.T) { defer leaktest.AfterTest(t)() v := [6]sqlbase.EncDatum{} for i := range v { v[i].SetDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { spec SorterSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { // No specified input ordering and unspecified limit. spec: SorterSpec{ OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: desc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[3], v[2], v[0]}, {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[0], v[0], v[0]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[3], v[3], v[0]}, {v[3], v[2], v[0]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, }, }, { // No specified input ordering but specified limit. spec: SorterSpec{ Limit: 4, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[3], v[3], v[0]}, {v[3], v[4], v[1]}, {v[1], v[0], v[4]}, {v[0], v[0], v[0]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, {v[3], v[2], v[0]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, {v[3], v[2], v[0]}, {v[3], v[3], v[0]}, }, }, { // Specified match ordering length but no specified limit. spec: SorterSpec{ OrderingMatchLen: 2, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[0], v[1], v[2]}, {v[0], v[1], v[0]}, {v[1], v[0], v[5]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, {v[3], v[4], v[3]}, {v[3], v[4], v[2]}, {v[3], v[5], v[1]}, {v[4], v[4], v[5]}, {v[4], v[4], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[1], v[0]}, {v[0], v[1], v[2]}, {v[1], v[0], v[5]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[3], v[4], v[2]}, {v[3], v[4], v[3]}, {v[3], v[5], v[1]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, }, }, { // Specified input ordering but no specified limit. spec: SorterSpec{ OrderingMatchLen: 2, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, {ColIdx: 3, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[1], v[1], v[2], v[5]}, {v[0], v[1], v[2], v[4]}, {v[0], v[1], v[2], v[3]}, {v[1], v[1], v[2], v[2]}, {v[1], v[2], v[2], v[5]}, {v[0], v[2], v[2], v[4]}, {v[0], v[2], v[2], v[3]}, {v[1], v[2], v[2], v[2]}, }, expected: sqlbase.EncDatumRows{ {v[1], v[1], v[2], v[2]}, {v[0], v[1], v[2], v[3]}, {v[0], v[1], v[2], v[4]}, {v[1], v[1], v[2], v[5]}, {v[1], v[2], v[2], v[2]}, {v[0], v[2], v[2], v[3]}, {v[0], v[2], v[2], v[4]}, {v[1], v[2], v[2], v[5]}, }, }, } for _, c := range testCases { ss := c.spec in := &RowBuffer{rows: c.input} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background()} s := newSorter(&flowCtx, &ss, in, out) s.Run(nil) var retRows sqlbase.EncDatumRows for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String() retStr := retRows.String() if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
// TestAdminAPIZoneDetails verifies the zone configuration information returned // for both DatabaseDetailsResponse AND TableDetailsResponse. func TestAdminAPIZoneDetails(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) // Create database and table. ac := log.AmbientContext{Tracer: tracing.NewTracer()} ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test") defer span.Finish() session := sql.NewSession( ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{}) session.StartUnlimitedMonitor() setupQueries := []string{ "CREATE DATABASE test", "CREATE TABLE test.tbl (val STRING)", } for _, q := range setupQueries { res := ts.sqlExecutor.ExecuteStatements(session, q, nil) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err) } } // Function to verify the zone for table "test.tbl" as returned by the Admin // API. verifyTblZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.TableDetailsResponse if err := getAdminJSONProto(s, "databases/test/tables/tbl", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual table zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual table ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to verify the zone for database "test" as returned by the Admin // API. verifyDbZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.DatabaseDetailsResponse if err := getAdminJSONProto(s, "databases/test", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual db zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual db ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to store a zone config for a given object ID. setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) { zoneBytes, err := zoneCfg.Marshal() if err != nil { t.Fatal(err) } const query = `INSERT INTO system.zones VALUES($1, $2)` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes))) res := ts.sqlExecutor.ExecuteStatements(session, query, params) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err) } } // Verify zone matches cluster default. verifyDbZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) verifyTblZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) // Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace, // the database, and the table (in that order). idPath, err := ts.admin.queryDescriptorIDPath(session, []string{"test", "tbl"}) if err != nil { t.Fatal(err) } // Apply zone configuration to database and check again. dbZone := config.ZoneConfig{ RangeMinBytes: 456, } setZone(dbZone, idPath[1]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) // Apply zone configuration to table and check again. tblZone := config.ZoneConfig{ RangeMinBytes: 789, } setZone(tblZone, idPath[2]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(tblZone, serverpb.ZoneConfigurationLevel_TABLE) }
func TestDistinct(t *testing.T) { defer leaktest.AfterTest(t)() v := [15]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType{Kind: sqlbase.ColumnType_INT}, parser.NewDInt(parser.DInt(i))) } testCases := []struct { spec DistinctSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: DistinctSpec{}, input: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[5], v[6]}, {v[2], v[3]}, {v[5], v[6]}, {v[2], v[6]}, {v[3], v[5]}, {v[2], v[9]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[5], v[6]}, {v[2], v[6]}, {v[3], v[5]}, {v[2], v[9]}, }, }, { spec: DistinctSpec{ OrderedColumns: []uint32{1}, }, input: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[2], v[3]}, {v[2], v[6]}, {v[2], v[9]}, {v[3], v[5]}, {v[5], v[6]}, {v[5], v[6]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[2], v[6]}, {v[2], v[9]}, {v[3], v[5]}, {v[5], v[6]}, }, }, } for _, c := range testCases { ds := c.spec in := NewRowBuffer(nil, c.input) out := &RowBuffer{} flowCtx := FlowCtx{ Context: context.Background(), } d, err := newDistinct(&flowCtx, &ds, in, out) if err != nil { t.Fatal(err) } d.Run(nil) if out.Err != nil { t.Fatal(out.Err) } if !out.Closed { t.Fatalf("output RowReceiver not closed") } if result := out.Rows.String(); result != c.expected.String() { t.Errorf("invalid results: %s, expected %s'", result, c.expected.String()) } } }
func TestMergeJoiner(t *testing.T) { defer leaktest.AfterTest(t)() v := [6]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } null := sqlbase.EncDatum{Datum: parser.DNull} testCases := []struct { spec MergeJoinerSpec inputs []sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[1]}, {v[0], v[0], v[0]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[1]}, {v[0], v[1], v[0]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, Expr: Expression{Expr: "@4 >= 4"}, // Implicit AND @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, {v[1], v[0]}, {v[1], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, {v[1], v[4]}, {v[1], v[1]}, {v[1], v[0]}, {v[1], v[5]}, {v[1], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, {v[1], v[0], v[4]}, {v[1], v[0], v[5]}, {v[1], v[0], v[4]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{3, 1, 2}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], v[5], v[1]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {null, v[5], v[1]}, }, }, } for _, c := range testCases { ms := c.spec inputs := []RowSource{&RowBuffer{rows: c.inputs[0]}, &RowBuffer{rows: c.inputs[1]}} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background(), evalCtx: &parser.EvalContext{}} m, err := newMergeJoiner(&flowCtx, &ms, inputs, out) if err != nil { t.Fatal(err) } m.Run(nil) var retRows sqlbase.EncDatumRows for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String() retStr := retRows.String() if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
// RowIdxFn is a GenValueFn that returns the row number as a DInt func RowIdxFn(row int) parser.Datum { return parser.NewDInt(parser.DInt(row)) }
func TestEncDatumRowCompare(t *testing.T) { defer leaktest.AfterTest(t)() v := [5]EncDatum{} for i := range v { v[i] = DatumToEncDatum(ColumnType{Kind: ColumnType_INT}, parser.NewDInt(parser.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { row1, row2 EncDatumRow ord ColumnOrdering cmp int }{ { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{1, desc}}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{0, asc}, {1, desc}}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{2, asc}}, cmp: -1, }, { row1: EncDatumRow{v[0], v[1], v[3]}, row2: EncDatumRow{v[0], v[1], v[2]}, ord: ColumnOrdering{{2, asc}}, cmp: 1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{2, asc}, {0, asc}, {1, asc}}, cmp: -1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{0, asc}, {2, desc}}, cmp: 1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{1, desc}, {0, asc}, {2, desc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, desc}, {0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, asc}, {0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, asc}, {0, desc}}, cmp: -1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{0, desc}, {1, asc}}, cmp: -1, }, } a := &DatumAlloc{} for _, c := range testCases { cmp, err := c.row1.Compare(a, c.ord, c.row2) if err != nil { t.Error(err) } else if cmp != c.cmp { t.Errorf("%s cmp %s ordering %v got %d, expected %d", c.row1, c.row2, c.ord, cmp, c.cmp) } } }
func benchmarkWriteInt(b *testing.B, format formatCode) { benchmarkWriteType(b, parser.NewDInt(1234), format) }
// RowModuloFn creates a GenValueFn that returns the row number modulo a given // value as a DInt func RowModuloFn(modulo int) GenValueFn { return func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % modulo)) } }
// TODO(irfansharif): Add tests to verify the following aggregation functions: // AVG // BOOL_AND // BOOL_OR // CONCAT_AGG // STDDEV // VARIANCE // TODO(irfansharif): Replicate sql/testdata and TestLogic for distsql, this kind of manual // case-by-case testing is error prone making it very easy to miss edge cases. func TestAggregator(t *testing.T) { defer leaktest.AfterTest(t)() v := [15]sqlbase.EncDatum{} for i := range v { v[i].SetDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } testCases := []struct { spec AggregatorSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { // SELECT $1, COUNT($0), GROUP BY $1. spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, GroupCols: []uint32{1}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[3], v[4]}, {v[6], v[2]}, {v[7], v[2]}, {v[8], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[4], v[2]}, {v[2], v[3]}, }, }, { // SELECT $1, SUM($0), GROUP BY $1. spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, GroupCols: []uint32{1}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 1, }, { Func: AggregatorSpec_SUM, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[3], v[4]}, {v[6], v[2]}, {v[7], v[2]}, {v[8], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[14]}, {v[4], v[11]}, }, }, { // SELECT COUNT($0), SUM($0), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_COUNT, ColIdx: 0, }, { Func: AggregatorSpec_SUM, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[1], v[4]}, {v[3], v[2]}, {v[4], v[2]}, {v[5], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[5], v[14]}, }, }, { // SELECT SUM DISTINCT ($0), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_SUM, Distinct: true, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[2]}, {v[4]}, {v[2]}, {v[2]}, {v[4]}, }, expected: sqlbase.EncDatumRows{ {v[6]}, }, }, { // SELECT $0, GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1]}, {v[1]}, {v[1]}, }, expected: sqlbase.EncDatumRows{ {v[1]}, }, }, { // SELECT MAX($0), MIN($1), COUNT($1), COUNT DISTINCT ($1), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_MAX, ColIdx: 0, }, { Func: AggregatorSpec_MIN, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, Distinct: true, ColIdx: 1, }, }, }, input: sqlbase.EncDatumRows{ {v[2], v[2]}, {v[1], v[4]}, {v[3], v[2]}, {v[4], v[2]}, {v[5], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[5], v[2], v[5], v[2]}, }, }, } for _, c := range testCases { ags := c.spec in := &RowBuffer{rows: c.input} out := &RowBuffer{} flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, } ag, err := newAggregator(&flowCtx, &ags, in, out) if err != nil { t.Fatal(err) } ag.Run(nil) var expected []string for _, row := range c.expected { expected = append(expected, row.String()) } sort.Strings(expected) expStr := strings.Join(expected, "") var rets []string for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } rets = append(rets, row.String()) } sort.Strings(rets) retStr := strings.Join(rets, "") if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
func TestJoinReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") testCases := []struct { spec JoinReaderSpec input [][]parser.Datum expected string }{ { spec: JoinReaderSpec{ OutputColumns: []uint32{0, 1, 2}, }, input: [][]parser.Datum{ {aFn(2), bFn(2)}, {aFn(5), bFn(5)}, {aFn(10), bFn(10)}, {aFn(15), bFn(15)}, }, expected: "[[0 2 2] [0 5 5] [1 0 1] [1 5 6]]", }, { spec: JoinReaderSpec{ Filter: Expression{Expr: "$2 <= 5"}, // sum <= 5 OutputColumns: []uint32{3}, }, input: [][]parser.Datum{ {aFn(1), bFn(1)}, {aFn(25), bFn(25)}, {aFn(5), bFn(5)}, {aFn(21), bFn(21)}, {aFn(34), bFn(34)}, {aFn(13), bFn(13)}, {aFn(51), bFn(51)}, {aFn(50), bFn(50)}, }, expected: "[['one'] ['five'] ['two-one'] ['one-three'] ['five-zero']]", }, } for _, c := range testCases { js := c.spec js.Table = *td txn := client.NewTxn(context.Background(), *kvDB) flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txn: txn, } in := &RowBuffer{} for _, row := range c.input { encRow := make(sqlbase.EncDatumRow, len(row)) for i, d := range row { encRow[i].SetDatum(sqlbase.ColumnType_INT, d) } in.rows = append(in.rows, encRow) } out := &RowBuffer{} jr, err := newJoinReader(&flowCtx, &js, in, out) if err != nil { t.Fatal(err) } jr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !in.done { t.Fatal("joinReader stopped accepting rows") } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func dumpTable(w io.Writer, conn *sqlConn, origDBName, origTableName string) error { const limit = 100 // Escape names since they can't be used in placeholders. dbname := parser.Name(origDBName).String() tablename := parser.Name(origTableName).String() if err := conn.Exec(fmt.Sprintf("SET DATABASE = %s", dbname), nil); err != nil { return err } // Fetch all table metadata in a transaction and its time to guarantee it // doesn't change between the various SHOW statements. if err := conn.Exec("BEGIN", nil); err != nil { return err } vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()", nil) if err != nil { return err } clusterTS := string(vals[0].([]byte)) // A previous version of the code did a SELECT on system.descriptor. This // required the SELECT privilege to the descriptor table, which only root // has. Allowing non-root to do this would let users see other users' table // descriptors which is a problem in multi-tenancy. // Fetch column types. rows, err := conn.Query(fmt.Sprintf("SHOW COLUMNS FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 2) coltypes := make(map[string]string) for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } nameI, typI := vals[0], vals[1] name, ok := nameI.(string) if !ok { return fmt.Errorf("unexpected value: %T", nameI) } typ, ok := typI.(string) if !ok { return fmt.Errorf("unexpected value: %T", typI) } coltypes[name] = typ } if err := rows.Close(); err != nil { return err } // index holds the names, in order, of the primary key columns. var index []string // Primary index is always the first index returned by SHOW INDEX. rows, err = conn.Query(fmt.Sprintf("SHOW INDEX FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 5) var primaryIndex string // Find the primary index columns. for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } b, ok := vals[1].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[1]) } if primaryIndex == "" { primaryIndex = b } else if primaryIndex != b { break } b, ok = vals[4].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[4]) } index = append(index, parser.Name(b).String()) } if err := rows.Close(); err != nil { return err } if len(index) == 0 { return fmt.Errorf("no primary key index found") } indexes := strings.Join(index, ", ") // Build the SELECT query. var sbuf bytes.Buffer fmt.Fprintf(&sbuf, "SELECT %s, * FROM %s@%s AS OF SYSTEM TIME %s", indexes, tablename, primaryIndex, clusterTS) var wbuf bytes.Buffer fmt.Fprintf(&wbuf, " WHERE ROW (%s) > ROW (", indexes) for i := range index { if i > 0 { wbuf.WriteString(", ") } fmt.Fprintf(&wbuf, "$%d", i+1) } wbuf.WriteString(")") // No WHERE clause first time, so add a place to inject it. fmt.Fprintf(&sbuf, "%%s ORDER BY %s LIMIT %d", indexes, limit) bs := sbuf.String() vals, err = conn.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", tablename), nil) if err != nil { return err } create := vals[1].(string) if _, err := w.Write([]byte(create)); err != nil { return err } if _, err := w.Write([]byte(";\n")); err != nil { return err } if err := conn.Exec("COMMIT", nil); err != nil { return err } // pk holds the last values of the fetched primary keys var pk []driver.Value q := fmt.Sprintf(bs, "") for { rows, err := conn.Query(q, pk) if err != nil { return err } cols := rows.Columns() pkcols := cols[:len(index)] cols = cols[len(index):] inserts := make([][]string, 0, limit) i := 0 for i < limit { vals := make([]driver.Value, len(cols)+len(pkcols)) if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } if pk == nil { q = fmt.Sprintf(bs, wbuf.String()) } pk = vals[:len(index)] vals = vals[len(index):] ivals := make([]string, len(vals)) // Values need to be correctly encoded for INSERT statements in a text file. for si, sv := range vals { switch t := sv.(type) { case nil: ivals[si] = "NULL" case bool: ivals[si] = parser.MakeDBool(parser.DBool(t)).String() case int64: ivals[si] = parser.NewDInt(parser.DInt(t)).String() case float64: ivals[si] = parser.NewDFloat(parser.DFloat(t)).String() case string: ivals[si] = parser.NewDString(t).String() case []byte: switch ct := coltypes[cols[si]]; ct { case "INTERVAL": ivals[si] = fmt.Sprintf("'%s'", t) case "BYTES": ivals[si] = parser.NewDBytes(parser.DBytes(t)).String() default: // STRING and DECIMAL types can have optional length // suffixes, so only examine the prefix of the type. if strings.HasPrefix(coltypes[cols[si]], "STRING") { ivals[si] = parser.NewDString(string(t)).String() } else if strings.HasPrefix(coltypes[cols[si]], "DECIMAL") { ivals[si] = string(t) } else { panic(errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } } case time.Time: var d parser.Datum ct := coltypes[cols[si]] switch ct { case "DATE": d = parser.NewDDateFromTime(t, time.UTC) case "TIMESTAMP": d = parser.MakeDTimestamp(t, time.Nanosecond) case "TIMESTAMP WITH TIME ZONE": d = parser.MakeDTimestampTZ(t, time.Nanosecond) default: panic(errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } ivals[si] = fmt.Sprintf("'%s'", d) default: panic(errors.Errorf("unknown field type: %T (%s)", t, cols[si])) } } inserts = append(inserts, ivals) i++ } for si, sv := range pk { b, ok := sv.([]byte) if ok && strings.HasPrefix(coltypes[pkcols[si]], "STRING") { // Primary key strings need to be converted to a go string, but not SQL // encoded since they aren't being written to a text file. pk[si] = string(b) } } if err := rows.Close(); err != nil { return err } if i == 0 { break } fmt.Fprintf(w, "\nINSERT INTO %s VALUES", tablename) for idx, values := range inserts { if idx > 0 { fmt.Fprint(w, ",") } fmt.Fprint(w, "\n\t(") for vi, v := range values { if vi > 0 { fmt.Fprint(w, ", ") } fmt.Fprint(w, v) } fmt.Fprint(w, ")") } fmt.Fprintln(w, ";") if i < limit { break } } return nil }
// ShowIndex returns all the indexes for a table. // Privileges: Any privilege on table. // Notes: postgres does not have a SHOW INDEXES statement. // mysql requires some privilege for any column. func (p *planner) ShowIndex(n *parser.ShowIndex) (planNode, error) { tn, err := n.Table.NormalizeWithDatabaseName(p.session.Database) if err != nil { return nil, err } desc, err := p.mustGetTableDesc(tn) if err != nil { return nil, err } if err := p.anyPrivilege(desc); err != nil { return nil, err } columns := ResultColumns{ {Name: "Table", Typ: parser.TypeString}, {Name: "Name", Typ: parser.TypeString}, {Name: "Unique", Typ: parser.TypeBool}, {Name: "Seq", Typ: parser.TypeInt}, {Name: "Column", Typ: parser.TypeString}, {Name: "Direction", Typ: parser.TypeString}, {Name: "Storing", Typ: parser.TypeBool}, } return &delayedNode{ p: p, name: "SHOW INDEX FROM " + tn.String(), columns: columns, constructor: func(p *planner) (planNode, error) { v := p.newContainerValuesNode(columns, 0) appendRow := func(index sqlbase.IndexDescriptor, colName string, sequence int, direction string, isStored bool) error { newRow := parser.DTuple{ parser.NewDString(tn.Table()), parser.NewDString(index.Name), parser.MakeDBool(parser.DBool(index.Unique)), parser.NewDInt(parser.DInt(sequence)), parser.NewDString(colName), parser.NewDString(direction), parser.MakeDBool(parser.DBool(isStored)), } _, err := v.rows.AddRow(newRow) return err } for _, index := range append([]sqlbase.IndexDescriptor{desc.PrimaryIndex}, desc.Indexes...) { sequence := 1 for i, col := range index.ColumnNames { if err := appendRow(index, col, sequence, index.ColumnDirections[i].String(), false); err != nil { v.rows.Close() return nil, err } sequence++ } for _, col := range index.StoreColumnNames { if err := appendRow(index, col, sequence, "N/A", true); err != nil { v.rows.Close() return nil, err } sequence++ } } return v, nil }, }, nil }
func TestHashJoiner(t *testing.T) { defer leaktest.AfterTest(t)() v := [10]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } null := sqlbase.EncDatum{Datum: parser.DNull} testCases := []struct { spec HashJoinerSpec inputs []sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[1]}, {v[0], v[0], v[0]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[1]}, {v[0], v[1], v[0]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, Expr: Expression{Expr: "@4 >= 4"}, // Implicit AND @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, {v[1], v[0]}, {v[1], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, {v[1], v[4]}, {v[1], v[1]}, {v[1], v[0]}, {v[1], v[5]}, {v[1], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, {v[1], v[0], v[4]}, {v[1], v[0], v[5]}, {v[1], v[0], v[4]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{3, 1, 2}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], v[5], v[1]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {null, v[5], v[1]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, // Tests for behavior when input contains NULLs. { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {v[1], null, null, null, null}, {null, v[2], null, null, null}, {null, null, null, null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {null, null, v[1], null, v[5]}, {null, null, null, v[2], v[6]}, {null, null, null, null, v[7]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {null, null, v[1], null, v[5]}, {null, null, null, v[2], v[6]}, {null, null, null, null, v[7]}, {v[1], null, null, null, null}, {null, v[2], null, null, null}, {null, null, null, null, null}, }, }, } for _, c := range testCases { hs := c.spec inputs := []RowSource{&RowBuffer{rows: c.inputs[0]}, &RowBuffer{rows: c.inputs[1]}} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background(), evalCtx: &parser.EvalContext{}} h, err := newHashJoiner(&flowCtx, &hs, inputs, out) if err != nil { t.Fatal(err) } h.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } var expected []string for _, row := range c.expected { expected = append(expected, row.String()) } sort.Strings(expected) expStr := strings.Join(expected, "") var rets []string for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } rets = append(rets, row.String()) } sort.Strings(rets) retStr := strings.Join(rets, "") if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }