// Events is an endpoint that returns the latest event log entries, with the following // optional URL parameters: // // type=STRING returns events with this type (e.g. "create_table") // targetID=INT returns events for that have this targetID func (s *adminServer) Events( ctx context.Context, req *serverpb.EventsRequest, ) (*serverpb.EventsResponse, error) { args := sql.SessionArgs{User: s.getUser(req)} session := s.NewSessionForRPC(ctx, args) defer session.Finish(s.server.sqlExecutor) // Execute the query. q := makeSQLQuery() q.Append("SELECT timestamp, eventType, targetID, reportingID, info, uniqueID ") q.Append("FROM system.eventlog ") q.Append("WHERE true ") // This simplifies the WHERE clause logic below. if len(req.Type) > 0 { q.Append("AND eventType = $ ", parser.NewDString(req.Type)) } if req.TargetId > 0 { q.Append("AND targetID = $ ", parser.NewDInt(parser.DInt(req.TargetId))) } q.Append("ORDER BY timestamp DESC ") q.Append("LIMIT $", parser.NewDInt(parser.DInt(apiEventLimit))) if len(q.Errors()) > 0 { return nil, s.serverErrors(q.Errors()) } r := s.server.sqlExecutor.ExecuteStatements(session, q.String(), q.QueryArguments()) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return nil, s.serverError(err) } // Marshal response. var resp serverpb.EventsResponse scanner := makeResultScanner(r.ResultList[0].Columns) for i, nRows := 0, r.ResultList[0].Rows.Len(); i < nRows; i++ { row := r.ResultList[0].Rows.At(i) var event serverpb.EventsResponse_Event var ts time.Time if err := scanner.ScanIndex(row, 0, &ts); err != nil { return nil, err } event.Timestamp = serverpb.EventsResponse_Event_Timestamp{Sec: ts.Unix(), Nsec: uint32(ts.Nanosecond())} if err := scanner.ScanIndex(row, 1, &event.EventType); err != nil { return nil, err } if err := scanner.ScanIndex(row, 2, &event.TargetID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 3, &event.ReportingID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 4, &event.Info); err != nil { return nil, err } if err := scanner.ScanIndex(row, 5, &event.UniqueID); err != nil { return nil, err } resp.Events = append(resp.Events, event) } return &resp, nil }
// golangFillQueryArguments populates the placeholder map with // types and values from an array of Go values. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func golangFillQueryArguments(pinfo *parser.PlaceholderInfo, args []interface{}) { pinfo.Clear() for i, arg := range args { k := fmt.Sprint(i + 1) if arg == nil { pinfo.SetValue(k, parser.DNull) continue } // A type switch to handle a few explicit types with special semantics: // - Datums are passed along as is. // - Time datatypes get special representation in the database. var d parser.Datum switch t := arg.(type) { case parser.Datum: d = t case time.Time: d = parser.MakeDTimestamp(t, time.Microsecond) case time.Duration: d = &parser.DInterval{Duration: duration.Duration{Nanos: t.Nanoseconds()}} case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) d = dd } if d == nil { // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: d = parser.MakeDBool(parser.DBool(val.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: d = parser.NewDInt(parser.DInt(val.Int())) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: d = parser.NewDInt(parser.DInt(val.Uint())) case reflect.Float32, reflect.Float64: d = parser.NewDFloat(parser.DFloat(val.Float())) case reflect.String: d = parser.NewDString(val.String()) case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { d = parser.NewDBytes(parser.DBytes(val.Bytes())) } } if d == nil { panic(fmt.Sprintf("unexpected type %T", arg)) } } pinfo.SetValue(k, d) } }
// populateExplain invokes explain() with a makeRow method // which populates a valuesNode. func (e *explainer) populateExplain(v *valuesNode, plan planNode) error { e.makeRow = func(level int, name, field, description string, plan planNode) { if e.err != nil { return } row := parser.DTuple{ parser.NewDInt(parser.DInt(level)), parser.NewDString(name), parser.NewDString(field), parser.NewDString(description), } if e.showMetadata { if plan != nil { row = append(row, parser.NewDString(formatColumns(plan.Columns(), e.showTypes))) row = append(row, parser.NewDString(plan.Ordering().AsString(plan.Columns()))) } else { row = append(row, emptyString, emptyString) } } if _, err := v.rows.AddRow(row); err != nil { e.err = err } } e.err = nil e.explain(plan) return e.err }
// queryZone retrieves the specific ZoneConfig associated with the supplied ID, // if it exists. func (s *adminServer) queryZone( session *sql.Session, id sqlbase.ID, ) (config.ZoneConfig, bool, error) { const query = `SELECT config FROM system.zones WHERE id = $1` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return config.ZoneConfig{}, false, err } result := r.ResultList[0] if result.Rows.Len() == 0 { return config.ZoneConfig{}, false, nil } var zoneBytes []byte scanner := resultScanner{} err := scanner.ScanIndex(result.Rows.At(0), 0, &zoneBytes) if err != nil { return config.ZoneConfig{}, false, err } var zone config.ZoneConfig if err := zone.Unmarshal(zoneBytes); err != nil { return config.ZoneConfig{}, false, err } return zone, true, nil }
// queryNamespaceID queries for the ID of the namespace with the given name and // parent ID. func (s *adminServer) queryNamespaceID( session *sql.Session, parentID sqlbase.ID, name string, ) (sqlbase.ID, error) { const query = `SELECT id FROM system.namespace WHERE parentID = $1 AND name = $2` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(parentID))) params.SetValue(`2`, parser.NewDString(name)) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) defer r.Close() if err := s.checkQueryResults(r.ResultList, 1); err != nil { return 0, err } result := r.ResultList[0] if result.Rows.Len() == 0 { return 0, errors.Errorf("namespace %s with ParentID %d not found", name, parentID) } var id int64 scanner := resultScanner{} err := scanner.ScanIndex(result.Rows.At(0), 0, &id) if err != nil { return 0, err } return sqlbase.ID(id), nil }
// DecodeTableValue decodes a value encoded by EncodeTableValue. func DecodeTableValue(a *DatumAlloc, valType parser.Type, b []byte) (parser.Datum, []byte, error) { _, dataOffset, _, typ, err := encoding.DecodeValueTag(b) if err != nil { return nil, b, err } if typ == encoding.Null { return parser.DNull, b[dataOffset:], nil } switch valType { case parser.TypeBool: var x bool b, x, err = encoding.DecodeBoolValue(b) // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(x)), b, err case parser.TypeInt: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDInt(parser.DInt(i)), b, err case parser.TypeFloat: var f float64 b, f, err = encoding.DecodeFloatValue(b) return a.NewDFloat(parser.DFloat(f)), b, err case parser.TypeDecimal: var d *inf.Dec b, d, err = encoding.DecodeDecimalValue(b) dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, b, err case parser.TypeString: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDString(parser.DString(data)), b, err case parser.TypeBytes: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDBytes(parser.DBytes(data)), b, err case parser.TypeDate: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDDate(parser.DDate(i)), b, err case parser.TypeTimestamp: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err case parser.TypeTimestampTZ: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err case parser.TypeInterval: var d duration.Duration b, d, err = encoding.DecodeDurationValue(b) return a.NewDInterval(parser.DInterval{Duration: d}), b, err default: return nil, nil, errors.Errorf("TODO(pmattis): decoded index value: %s", valType) } }
func benchmarkWriteArray(b *testing.B, format formatCode) { a := parser.NewDArray(parser.TypeInt) for i := 0; i < 3; i++ { if err := a.Append(parser.NewDInt(parser.DInt(1234))); err != nil { b.Fatal(err) } } benchmarkWriteType(b, a, format) }
func (o *ordinalityNode) Next() (bool, error) { hasNext, err := o.source.Next() if !hasNext || err != nil { return hasNext, err } copy(o.row, o.source.Values()) // o.row was allocated one spot larger than o.source.Values(). // Store the ordinality value there. o.row[len(o.row)-1] = parser.NewDInt(parser.DInt(o.curCnt)) o.curCnt++ return true, nil }
// colIDArrayToDatum returns an int[] containing the ColumnIDs, or NULL if there // are no ColumnIDs. func colIDArrayToDatum(arr []sqlbase.ColumnID) (parser.Datum, error) { if len(arr) == 0 { return parser.DNull, nil } d := parser.NewDArray(parser.TypeInt) for _, val := range arr { if err := d.Append(parser.NewDInt(parser.DInt(val))); err != nil { return nil, err } } return d, nil }
func TestRowContainer(t *testing.T) { defer leaktest.AfterTest(t)() for _, numCols := range []int{1, 2, 3, 5, 10, 15} { for _, numRows := range []int{5, 10, 100} { for _, numPops := range []int{0, 1, 2, numRows / 3, numRows / 2} { resCol := make(ResultColumns, numCols) for i := range resCol { resCol[i] = ResultColumn{Typ: parser.TypeInt} } m := mon.MakeUnlimitedMonitor(context.Background(), "test", nil, nil, math.MaxInt64) rc := NewRowContainer(m.MakeBoundAccount(context.Background()), resCol, 0) row := make(parser.DTuple, numCols) for i := 0; i < numRows; i++ { for j := range row { row[j] = parser.NewDInt(parser.DInt(i*numCols + j)) } if err := rc.AddRow(row); err != nil { t.Fatal(err) } } for i := 0; i < numPops; i++ { rc.PopFirst() } // Given that we just deleted numPops rows, we have numRows - // numPops rows remaining. if rc.Len() != numRows-numPops { t.Fatalf("invalid length, expected %d got %d", numRows-numPops, rc.Len()) } // what was previously rc.At(i + numPops) is now rc.At(i). for i := 0; i < rc.Len(); i++ { row := rc.At(i) for j := range row { dint, ok := row[j].(*parser.DInt) if !ok || int(*dint) != (i+numPops)*numCols+j { t.Fatalf("invalid value %+v on row %d, col %d", row[j], i+numPops, j) } } } } } } }
// RandDatum generates a random Datum of the given type. // If null is true, the datum can be DNull. func RandDatum(rng *rand.Rand, typ ColumnType_Kind, null bool) parser.Datum { if null && rng.Intn(10) == 0 { return parser.DNull } switch typ { case ColumnType_BOOL: return parser.MakeDBool(rng.Intn(2) == 1) case ColumnType_INT: return parser.NewDInt(parser.DInt(rng.Int63())) case ColumnType_FLOAT: return parser.NewDFloat(parser.DFloat(rng.NormFloat64())) case ColumnType_DECIMAL: d := &parser.DDecimal{} d.Dec.SetScale(inf.Scale(rng.Intn(40) - 20)) d.Dec.SetUnscaled(rng.Int63()) return d case ColumnType_DATE: return parser.NewDDate(parser.DDate(rng.Intn(10000))) case ColumnType_TIMESTAMP: return &parser.DTimestamp{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} case ColumnType_INTERVAL: return &parser.DInterval{Duration: duration.Duration{Months: rng.Int63n(1000), Days: rng.Int63n(1000), Nanos: rng.Int63n(1000000), }} case ColumnType_STRING: // Generate a random ASCII string. p := make([]byte, rng.Intn(10)) for i := range p { p[i] = byte(1 + rng.Intn(127)) } return parser.NewDString(string(p)) case ColumnType_BYTES: p := make([]byte, rng.Intn(10)) _, _ = rng.Read(p) return parser.NewDBytes(parser.DBytes(p)) case ColumnType_TIMESTAMPTZ: return &parser.DTimestampTZ{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} case ColumnType_INT_ARRAY: // TODO(cuongdo): we don't support for persistence of arrays yet return parser.DNull default: panic(fmt.Sprintf("invalid type %s", typ)) } }
// MakePrimaryIndexKey creates a key prefix that corresponds to a table row // (in the primary index); it is intended for tests. // // The value types must match the primary key columns (or a prefix of them); // supported types are: - Datum // - bool (converts to DBool) // - int (converts to DInt) // - string (converts to DString) func MakePrimaryIndexKey(desc *TableDescriptor, vals ...interface{}) (roachpb.Key, error) { index := &desc.PrimaryIndex if len(vals) > len(index.ColumnIDs) { return nil, errors.Errorf("got %d values, PK has %d columns", len(vals), len(index.ColumnIDs)) } datums := make([]parser.Datum, len(vals)) for i, v := range vals { switch v := v.(type) { case bool: datums[i] = parser.MakeDBool(parser.DBool(v)) case int: datums[i] = parser.NewDInt(parser.DInt(v)) case string: datums[i] = parser.NewDString(v) case parser.Datum: datums[i] = v default: return nil, errors.Errorf("unexpected value type %T", v) } // Check that the value type matches. colID := index.ColumnIDs[i] for _, c := range desc.Columns { if c.ID == colID { if t := DatumTypeToColumnKind(datums[i].ResolvedType()); t != c.Type.Kind { return nil, errors.Errorf("column %d of type %s, got value of type %s", i, c.Type.Kind, t) } break } } } // Create the ColumnID to index in datums slice map needed by // MakeIndexKeyPrefix. colIDToRowIndex := make(map[ColumnID]int) for i := range vals { colIDToRowIndex[index.ColumnIDs[i]] = i } keyPrefix := MakeIndexKeyPrefix(desc, index.ID) key, _, err := EncodeIndexKey(desc, index, colIDToRowIndex, datums, keyPrefix) if err != nil { return nil, err } return roachpb.Key(key), nil }
func TestUnorderedSync(t *testing.T) { defer leaktest.AfterTest(t)() columnTypeInt := &sqlbase.ColumnType{Kind: sqlbase.ColumnType_INT} mrc := &MultiplexedRowChannel{} mrc.Init(5) for i := 1; i <= 5; i++ { go func(i int) { for j := 1; j <= 100; j++ { a := sqlbase.DatumToEncDatum(*columnTypeInt, parser.NewDInt(parser.DInt(i))) b := sqlbase.DatumToEncDatum(*columnTypeInt, parser.NewDInt(parser.DInt(j))) row := sqlbase.EncDatumRow{a, b} mrc.PushRow(row) } mrc.Close(nil) }(i) } var retRows sqlbase.EncDatumRows for { row, err := mrc.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } // Verify all elements. for i := 1; i <= 5; i++ { j := 1 for _, row := range retRows { if int(*(row[0].Datum.(*parser.DInt))) == i { if int(*(row[1].Datum.(*parser.DInt))) != j { t.Errorf("Expected [%d %d], got %s", i, j, row) } j++ } } if j != 101 { t.Errorf("Missing [%d %d]", i, j) } } // Test case when one source closes with an error. mrc = &MultiplexedRowChannel{} mrc.Init(5) for i := 1; i <= 5; i++ { go func(i int) { for j := 1; j <= 100; j++ { a := sqlbase.DatumToEncDatum(*columnTypeInt, parser.NewDInt(parser.DInt(i))) b := sqlbase.DatumToEncDatum(*columnTypeInt, parser.NewDInt(parser.DInt(j))) row := sqlbase.EncDatumRow{a, b} mrc.PushRow(row) } var err error if i == 3 { err = fmt.Errorf("Test error") } mrc.Close(err) }(i) } for { row, err := mrc.NextRow() if err != nil { if err.Error() != "Test error" { t.Error(err) } break } if row == nil { t.Error("Did not receive expected error") } } }
// TestAdminAPIZoneDetails verifies the zone configuration information returned // for both DatabaseDetailsResponse AND TableDetailsResponse. func TestAdminAPIZoneDetails(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) // Create database and table. ac := log.AmbientContext{Tracer: tracing.NewTracer()} ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test") defer span.Finish() session := sql.NewSession( ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{}) session.StartUnlimitedMonitor() setupQueries := []string{ "CREATE DATABASE test", "CREATE TABLE test.tbl (val STRING)", } for _, q := range setupQueries { res := ts.sqlExecutor.ExecuteStatements(session, q, nil) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err) } } // Function to verify the zone for table "test.tbl" as returned by the Admin // API. verifyTblZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.TableDetailsResponse if err := getAdminJSONProto(s, "databases/test/tables/tbl", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual table zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual table ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to verify the zone for database "test" as returned by the Admin // API. verifyDbZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.DatabaseDetailsResponse if err := getAdminJSONProto(s, "databases/test", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual db zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual db ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to store a zone config for a given object ID. setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) { zoneBytes, err := zoneCfg.Marshal() if err != nil { t.Fatal(err) } const query = `INSERT INTO system.zones VALUES($1, $2)` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes))) res := ts.sqlExecutor.ExecuteStatements(session, query, params) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err) } } // Verify zone matches cluster default. verifyDbZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) verifyTblZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) // Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace, // the database, and the table (in that order). idPath, err := ts.admin.queryDescriptorIDPath(session, []string{"test", "tbl"}) if err != nil { t.Fatal(err) } // Apply zone configuration to database and check again. dbZone := config.ZoneConfig{ RangeMinBytes: 456, } setZone(dbZone, idPath[1]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) // Apply zone configuration to table and check again. tblZone := config.ZoneConfig{ RangeMinBytes: 789, } setZone(tblZone, idPath[2]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(tblZone, serverpb.ZoneConfigurationLevel_TABLE) }
func TestOrderedSync(t *testing.T) { defer leaktest.AfterTest(t)() columnTypeInt := &sqlbase.ColumnType{Kind: sqlbase.ColumnType_INT} v := [6]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(*columnTypeInt, parser.NewDInt(parser.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { sources []sqlbase.EncDatumRows ordering sqlbase.ColumnOrdering expected sqlbase.EncDatumRows }{ { sources: []sqlbase.EncDatumRows{ { {v[0], v[1], v[4]}, {v[0], v[1], v[2]}, {v[0], v[2], v[3]}, {v[1], v[1], v[3]}, }, { {v[1], v[0], v[4]}, }, { {v[0], v[0], v[0]}, {v[4], v[4], v[4]}, }, }, ordering: sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0]}, {v[0], v[1], v[4]}, {v[0], v[1], v[2]}, {v[0], v[2], v[3]}, {v[1], v[0], v[4]}, {v[1], v[1], v[3]}, {v[4], v[4], v[4]}, }, }, { sources: []sqlbase.EncDatumRows{ {}, { {v[1], v[0], v[4]}, }, { {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[3], v[2], v[0]}, }, { {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[0], v[0], v[0]}, }, }, ordering: sqlbase.ColumnOrdering{ {ColIdx: 1, Direction: desc}, {ColIdx: 0, Direction: asc}, {ColIdx: 2, Direction: asc}, }, expected: sqlbase.EncDatumRows{ {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[3], v[2], v[0]}, {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, }, }, } for testIdx, c := range testCases { var sources []RowSource for _, srcRows := range c.sources { rowBuf := &RowBuffer{rows: srcRows} sources = append(sources, rowBuf) } src, err := makeOrderedSync(c.ordering, sources) if err != nil { t.Fatal(err) } var retRows sqlbase.EncDatumRows for { row, err := src.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String() retStr := retRows.String() if expStr != retStr { t.Errorf("invalid results for case %d; expected:\n %s\ngot:\n %s", testIdx, expStr, retStr) } } }
func TestMergeJoiner(t *testing.T) { defer leaktest.AfterTest(t)() v := [6]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } null := sqlbase.EncDatum{Datum: parser.DNull} testCases := []struct { spec MergeJoinerSpec inputs []sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[1]}, {v[0], v[0], v[0]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[1]}, {v[0], v[1], v[0]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, Expr: Expression{Expr: "@4 >= 4"}, // Implicit AND @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, {v[1], v[0]}, {v[1], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, {v[1], v[4]}, {v[1], v[1]}, {v[1], v[0]}, {v[1], v[5]}, {v[1], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, {v[1], v[0], v[4]}, {v[1], v[0], v[5]}, {v[1], v[0], v[4]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{3, 1, 2}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: MergeJoinerSpec{ LeftOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: encoding.Ascending}, }), RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], v[5], v[1]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {null, v[5], v[1]}, }, }, } for _, c := range testCases { ms := c.spec inputs := []RowSource{&RowBuffer{rows: c.inputs[0]}, &RowBuffer{rows: c.inputs[1]}} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background(), evalCtx: &parser.EvalContext{}} m, err := newMergeJoiner(&flowCtx, &ms, inputs, out) if err != nil { t.Fatal(err) } m.Run(nil) var retRows sqlbase.EncDatumRows for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String() retStr := retRows.String() if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
func TestSorter(t *testing.T) { defer leaktest.AfterTest(t)() v := [6]sqlbase.EncDatum{} for i := range v { v[i].SetDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { spec SorterSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { // No specified input ordering and unspecified limit. spec: SorterSpec{ OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: desc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[3], v[2], v[0]}, {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[0], v[0], v[0]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[3], v[3], v[0]}, {v[3], v[2], v[0]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, }, }, { // No specified input ordering but specified limit. spec: SorterSpec{ Limit: 4, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[3], v[3], v[0]}, {v[3], v[4], v[1]}, {v[1], v[0], v[4]}, {v[0], v[0], v[0]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, {v[3], v[2], v[0]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, {v[3], v[2], v[0]}, {v[3], v[3], v[0]}, }, }, { // Specified match ordering length but no specified limit. spec: SorterSpec{ OrderingMatchLen: 2, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[0], v[1], v[2]}, {v[0], v[1], v[0]}, {v[1], v[0], v[5]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, {v[3], v[4], v[3]}, {v[3], v[4], v[2]}, {v[3], v[5], v[1]}, {v[4], v[4], v[5]}, {v[4], v[4], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[0], v[1], v[0]}, {v[0], v[1], v[2]}, {v[1], v[0], v[5]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[3], v[4], v[2]}, {v[3], v[4], v[3]}, {v[3], v[5], v[1]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, }, }, { // Specified input ordering but no specified limit. spec: SorterSpec{ OrderingMatchLen: 2, OutputOrdering: convertToSpecOrdering( sqlbase.ColumnOrdering{ {ColIdx: 1, Direction: asc}, {ColIdx: 2, Direction: asc}, {ColIdx: 3, Direction: asc}, }), }, input: sqlbase.EncDatumRows{ {v[1], v[1], v[2], v[5]}, {v[0], v[1], v[2], v[4]}, {v[0], v[1], v[2], v[3]}, {v[1], v[1], v[2], v[2]}, {v[1], v[2], v[2], v[5]}, {v[0], v[2], v[2], v[4]}, {v[0], v[2], v[2], v[3]}, {v[1], v[2], v[2], v[2]}, }, expected: sqlbase.EncDatumRows{ {v[1], v[1], v[2], v[2]}, {v[0], v[1], v[2], v[3]}, {v[0], v[1], v[2], v[4]}, {v[1], v[1], v[2], v[5]}, {v[1], v[2], v[2], v[2]}, {v[0], v[2], v[2], v[3]}, {v[0], v[2], v[2], v[4]}, {v[1], v[2], v[2], v[5]}, }, }, } for _, c := range testCases { ss := c.spec in := &RowBuffer{rows: c.input} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background()} s := newSorter(&flowCtx, &ss, in, out) s.Run(nil) var retRows sqlbase.EncDatumRows for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String() retStr := retRows.String() if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
// RowModuloFn creates a GenValueFn that returns the row number modulo a given // value as a DInt func RowModuloFn(modulo int) GenValueFn { return func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % modulo)) } }
func TestDistinct(t *testing.T) { defer leaktest.AfterTest(t)() v := [15]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType{Kind: sqlbase.ColumnType_INT}, parser.NewDInt(parser.DInt(i))) } testCases := []struct { spec DistinctSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: DistinctSpec{}, input: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[5], v[6]}, {v[2], v[3]}, {v[5], v[6]}, {v[2], v[6]}, {v[3], v[5]}, {v[2], v[9]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[5], v[6]}, {v[2], v[6]}, {v[3], v[5]}, {v[2], v[9]}, }, }, { spec: DistinctSpec{ OrderedColumns: []uint32{1}, }, input: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[2], v[3]}, {v[2], v[6]}, {v[2], v[9]}, {v[3], v[5]}, {v[5], v[6]}, {v[5], v[6]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[3]}, {v[2], v[6]}, {v[2], v[9]}, {v[3], v[5]}, {v[5], v[6]}, }, }, } for _, c := range testCases { ds := c.spec in := NewRowBuffer(nil, c.input) out := &RowBuffer{} flowCtx := FlowCtx{ Context: context.Background(), } d, err := newDistinct(&flowCtx, &ds, in, out) if err != nil { t.Fatal(err) } d.Run(nil) if out.Err != nil { t.Fatal(out.Err) } if !out.Closed { t.Fatalf("output RowReceiver not closed") } if result := out.Rows.String(); result != c.expected.String() { t.Errorf("invalid results: %s, expected %s'", result, c.expected.String()) } } }
func TestHashJoiner(t *testing.T) { defer leaktest.AfterTest(t)() v := [10]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } null := sqlbase.EncDatum{Datum: parser.DNull} testCases := []struct { spec HashJoinerSpec inputs []sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[1]}, {v[0], v[0], v[0]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[1]}, {v[0], v[1], v[0]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 3}, Expr: Expression{Expr: "@4 >= 4"}, // Implicit AND @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[0], v[1]}, {v[1], v[0]}, {v[1], v[1]}, }, { {v[0], v[4]}, {v[0], v[1]}, {v[0], v[0]}, {v[0], v[5]}, {v[0], v[4]}, {v[1], v[4]}, {v[1], v[1]}, {v[1], v[0]}, {v[1], v[5]}, {v[1], v[4]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[4]}, {v[0], v[0], v[5]}, {v[0], v[0], v[4]}, {v[0], v[1], v[4]}, {v[0], v[1], v[5]}, {v[0], v[1], v[4]}, {v[1], v[0], v[4]}, {v[1], v[0], v[5]}, {v[1], v[0], v[4]}, {v[1], v[1], v[4]}, {v[1], v[1], v[5]}, {v[1], v[1], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{3, 1, 2}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], v[4]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {v[5], v[5], v[1]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], null, null}, {v[1], v[0], v[4]}, {v[2], null, null}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, {null, v[5], v[1]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 3, 4}, // Implicit @1 = @3 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[2], v[4]}, {v[3], v[1]}, {v[4], v[5]}, {v[5], v[5]}, }, { {v[1], v[0], v[4]}, {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, expected: sqlbase.EncDatumRows{ {v[3], v[4], v[1]}, {v[4], v[4], v[5]}, }, }, // Tests for behavior when input contains NULLs. { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_INNER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_LEFT_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {v[1], null, null, null, null}, {null, v[2], null, null, null}, {null, null, null, null, null}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_RIGHT_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {null, null, v[1], null, v[5]}, {null, null, null, v[2], v[6]}, {null, null, null, null, v[7]}, }, }, { spec: HashJoinerSpec{ LeftEqColumns: []uint32{0, 1}, LeftTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, RightEqColumns: []uint32{0, 1}, RightTypes: []sqlbase.ColumnType_Kind{ sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, sqlbase.ColumnType_INT, }, Type: JoinType_FULL_OUTER, OutputColumns: []uint32{0, 1, 2, 3, 4}, // Implicit @1,@2 = @3,@4 constraint. }, inputs: []sqlbase.EncDatumRows{ { {v[0], v[0]}, {v[1], null}, {null, v[2]}, {null, null}, }, { {v[0], v[0], v[4]}, {v[1], null, v[5]}, {null, v[2], v[6]}, {null, null, v[7]}, }, }, expected: sqlbase.EncDatumRows{ {v[0], v[0], v[0], v[0], v[4]}, {null, null, v[1], null, v[5]}, {null, null, null, v[2], v[6]}, {null, null, null, null, v[7]}, {v[1], null, null, null, null}, {null, v[2], null, null, null}, {null, null, null, null, null}, }, }, } for _, c := range testCases { hs := c.spec inputs := []RowSource{&RowBuffer{rows: c.inputs[0]}, &RowBuffer{rows: c.inputs[1]}} out := &RowBuffer{} flowCtx := FlowCtx{Context: context.Background(), evalCtx: &parser.EvalContext{}} h, err := newHashJoiner(&flowCtx, &hs, inputs, out) if err != nil { t.Fatal(err) } h.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } var expected []string for _, row := range c.expected { expected = append(expected, row.String()) } sort.Strings(expected) expStr := strings.Join(expected, "") var rets []string for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } rets = append(rets, row.String()) } sort.Strings(rets) retStr := strings.Join(rets, "") if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
// TODO(irfansharif): Add tests to verify the following aggregation functions: // AVG // BOOL_AND // BOOL_OR // CONCAT_AGG // STDDEV // VARIANCE // TODO(irfansharif): Replicate sql/testdata and TestLogic for distsql, this kind of manual // case-by-case testing is error prone making it very easy to miss edge cases. func TestAggregator(t *testing.T) { defer leaktest.AfterTest(t)() v := [15]sqlbase.EncDatum{} for i := range v { v[i].SetDatum(sqlbase.ColumnType_INT, parser.NewDInt(parser.DInt(i))) } testCases := []struct { spec AggregatorSpec input sqlbase.EncDatumRows expected sqlbase.EncDatumRows }{ { // SELECT $1, COUNT($0), GROUP BY $1. spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, GroupCols: []uint32{1}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[3], v[4]}, {v[6], v[2]}, {v[7], v[2]}, {v[8], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[4], v[2]}, {v[2], v[3]}, }, }, { // SELECT $1, SUM($0), GROUP BY $1. spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, GroupCols: []uint32{1}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 1, }, { Func: AggregatorSpec_SUM, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[3], v[4]}, {v[6], v[2]}, {v[7], v[2]}, {v[8], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[2], v[14]}, {v[4], v[11]}, }, }, { // SELECT COUNT($0), SUM($0), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_COUNT, ColIdx: 0, }, { Func: AggregatorSpec_SUM, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1], v[2]}, {v[1], v[4]}, {v[3], v[2]}, {v[4], v[2]}, {v[5], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[5], v[14]}, }, }, { // SELECT SUM DISTINCT ($0), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_SUM, Distinct: true, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[2]}, {v[4]}, {v[2]}, {v[2]}, {v[4]}, }, expected: sqlbase.EncDatumRows{ {v[6]}, }, }, { // SELECT $0, GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_IDENT, ColIdx: 0, }, }, }, input: sqlbase.EncDatumRows{ {v[1]}, {v[1]}, {v[1]}, }, expected: sqlbase.EncDatumRows{ {v[1]}, }, }, { // SELECT MAX($0), MIN($1), COUNT($1), COUNT DISTINCT ($1), GROUP BY [] (empty group key). spec: AggregatorSpec{ Types: []sqlbase.ColumnType_Kind{sqlbase.ColumnType_INT, sqlbase.ColumnType_INT}, Exprs: []AggregatorSpec_Expr{ { Func: AggregatorSpec_MAX, ColIdx: 0, }, { Func: AggregatorSpec_MIN, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, ColIdx: 1, }, { Func: AggregatorSpec_COUNT, Distinct: true, ColIdx: 1, }, }, }, input: sqlbase.EncDatumRows{ {v[2], v[2]}, {v[1], v[4]}, {v[3], v[2]}, {v[4], v[2]}, {v[5], v[4]}, }, expected: sqlbase.EncDatumRows{ {v[5], v[2], v[5], v[2]}, }, }, } for _, c := range testCases { ags := c.spec in := &RowBuffer{rows: c.input} out := &RowBuffer{} flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, } ag, err := newAggregator(&flowCtx, &ags, in, out) if err != nil { t.Fatal(err) } ag.Run(nil) var expected []string for _, row := range c.expected { expected = append(expected, row.String()) } sort.Strings(expected) expStr := strings.Join(expected, "") var rets []string for { row, err := out.NextRow() if err != nil { t.Fatal(err) } if row == nil { break } rets = append(rets, row.String()) } sort.Strings(rets) retStr := strings.Join(rets, "") if expStr != retStr { t.Errorf("invalid results; expected:\n %s\ngot:\n %s", expStr, retStr) } } }
// DecodeTableKey decodes a table key/value. func DecodeTableKey( a *DatumAlloc, valType parser.Type, key []byte, dir encoding.Direction, ) (parser.Datum, []byte, error) { if (dir != encoding.Ascending) && (dir != encoding.Descending) { return nil, nil, errors.Errorf("invalid direction: %d", dir) } var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } var rkey []byte var err error switch valType { case parser.TypeBool: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(i != 0)), rkey, err case parser.TypeInt: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } return a.NewDInt(parser.DInt(i)), rkey, err case parser.TypeFloat: var f float64 if dir == encoding.Ascending { rkey, f, err = encoding.DecodeFloatAscending(key) } else { rkey, f, err = encoding.DecodeFloatDescending(key) } return a.NewDFloat(parser.DFloat(f)), rkey, err case parser.TypeDecimal: var d *inf.Dec if dir == encoding.Ascending { rkey, d, err = encoding.DecodeDecimalAscending(key, nil) } else { rkey, d, err = encoding.DecodeDecimalDescending(key, nil) } dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, rkey, err case parser.TypeString: var r string if dir == encoding.Ascending { rkey, r, err = encoding.DecodeUnsafeStringAscending(key, nil) } else { rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) } return a.NewDString(parser.DString(r)), rkey, err case parser.TypeBytes: var r []byte if dir == encoding.Ascending { rkey, r, err = encoding.DecodeBytesAscending(key, nil) } else { rkey, r, err = encoding.DecodeBytesDescending(key, nil) } return a.NewDBytes(parser.DBytes(r)), rkey, err case parser.TypeDate: var t int64 if dir == encoding.Ascending { rkey, t, err = encoding.DecodeVarintAscending(key) } else { rkey, t, err = encoding.DecodeVarintDescending(key) } return a.NewDDate(parser.DDate(t)), rkey, err case parser.TypeTimestamp: var t time.Time if dir == encoding.Ascending { rkey, t, err = encoding.DecodeTimeAscending(key) } else { rkey, t, err = encoding.DecodeTimeDescending(key) } return a.NewDTimestamp(parser.DTimestamp{Time: t}), rkey, err case parser.TypeTimestampTZ: var t time.Time if dir == encoding.Ascending { rkey, t, err = encoding.DecodeTimeAscending(key) } else { rkey, t, err = encoding.DecodeTimeDescending(key) } return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), rkey, err case parser.TypeInterval: var d duration.Duration if dir == encoding.Ascending { rkey, d, err = encoding.DecodeDurationAscending(key) } else { rkey, d, err = encoding.DecodeDurationDescending(key) } return a.NewDInterval(parser.DInterval{Duration: d}), rkey, err default: return nil, nil, errors.Errorf("TODO(pmattis): decoded index key: %s", valType) } }
func TestTableReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } testCases := []struct { spec TableReaderSpec expected string }{ { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 OutputColumns: []uint32{0, 1}, }, expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", }, { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, OutputColumns: []uint32{3}, // s HardLimit: 4, }, expected: "[['one'] ['two'] ['four'] ['one-zero']]", }, { spec: TableReaderSpec{ IndexIdx: 1, Reverse: true, Spans: []TableReaderSpan{makeIndexSpan(4, 6)}, Filter: Expression{Expr: "@1 < 3"}, // sum < 8 OutputColumns: []uint32{0, 1}, SoftLimit: 1, }, expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", }, } for _, c := range testCases { ts := c.spec ts.Table = *td flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txnProto: &roachpb.Transaction{}, clientDB: kvDB, } out := &RowBuffer{} tr, err := newTableReader(&flowCtx, &ts, out) if err != nil { t.Fatal(err) } tr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func dumpTable(w io.Writer, conn *sqlConn, origDBName, origTableName string) error { const limit = 100 // Escape names since they can't be used in placeholders. dbname := parser.Name(origDBName).String() tablename := parser.Name(origTableName).String() if err := conn.Exec(fmt.Sprintf("SET DATABASE = %s", dbname), nil); err != nil { return err } // Fetch all table metadata in a transaction and its time to guarantee it // doesn't change between the various SHOW statements. if err := conn.Exec("BEGIN", nil); err != nil { return err } vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()", nil) if err != nil { return err } clusterTS := string(vals[0].([]byte)) // A previous version of the code did a SELECT on system.descriptor. This // required the SELECT privilege to the descriptor table, which only root // has. Allowing non-root to do this would let users see other users' table // descriptors which is a problem in multi-tenancy. // Fetch column types. rows, err := conn.Query(fmt.Sprintf("SHOW COLUMNS FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 2) coltypes := make(map[string]string) for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } nameI, typI := vals[0], vals[1] name, ok := nameI.(string) if !ok { return fmt.Errorf("unexpected value: %T", nameI) } typ, ok := typI.(string) if !ok { return fmt.Errorf("unexpected value: %T", typI) } coltypes[name] = typ } if err := rows.Close(); err != nil { return err } // index holds the names, in order, of the primary key columns. var index []string // Primary index is always the first index returned by SHOW INDEX. rows, err = conn.Query(fmt.Sprintf("SHOW INDEX FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 5) var primaryIndex string // Find the primary index columns. for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } b, ok := vals[1].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[1]) } if primaryIndex == "" { primaryIndex = b } else if primaryIndex != b { break } b, ok = vals[4].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[4]) } index = append(index, parser.Name(b).String()) } if err := rows.Close(); err != nil { return err } if len(index) == 0 { return fmt.Errorf("no primary key index found") } indexes := strings.Join(index, ", ") // Build the SELECT query. var sbuf bytes.Buffer fmt.Fprintf(&sbuf, "SELECT %s, * FROM %s@%s AS OF SYSTEM TIME %s", indexes, tablename, primaryIndex, clusterTS) var wbuf bytes.Buffer fmt.Fprintf(&wbuf, " WHERE ROW (%s) > ROW (", indexes) for i := range index { if i > 0 { wbuf.WriteString(", ") } fmt.Fprintf(&wbuf, "$%d", i+1) } wbuf.WriteString(")") // No WHERE clause first time, so add a place to inject it. fmt.Fprintf(&sbuf, "%%s ORDER BY %s LIMIT %d", indexes, limit) bs := sbuf.String() vals, err = conn.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", tablename), nil) if err != nil { return err } create := vals[1].(string) if _, err := w.Write([]byte(create)); err != nil { return err } if _, err := w.Write([]byte(";\n")); err != nil { return err } if err := conn.Exec("COMMIT", nil); err != nil { return err } // pk holds the last values of the fetched primary keys var pk []driver.Value q := fmt.Sprintf(bs, "") for { rows, err := conn.Query(q, pk) if err != nil { return err } cols := rows.Columns() pkcols := cols[:len(index)] cols = cols[len(index):] inserts := make([][]string, 0, limit) i := 0 for i < limit { vals := make([]driver.Value, len(cols)+len(pkcols)) if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } if pk == nil { q = fmt.Sprintf(bs, wbuf.String()) } pk = vals[:len(index)] vals = vals[len(index):] ivals := make([]string, len(vals)) // Values need to be correctly encoded for INSERT statements in a text file. for si, sv := range vals { switch t := sv.(type) { case nil: ivals[si] = "NULL" case bool: ivals[si] = parser.MakeDBool(parser.DBool(t)).String() case int64: ivals[si] = parser.NewDInt(parser.DInt(t)).String() case float64: ivals[si] = parser.NewDFloat(parser.DFloat(t)).String() case string: ivals[si] = parser.NewDString(t).String() case []byte: switch ct := coltypes[cols[si]]; ct { case "INTERVAL": ivals[si] = fmt.Sprintf("'%s'", t) case "BYTES": ivals[si] = parser.NewDBytes(parser.DBytes(t)).String() default: // STRING and DECIMAL types can have optional length // suffixes, so only examine the prefix of the type. if strings.HasPrefix(coltypes[cols[si]], "STRING") { ivals[si] = parser.NewDString(string(t)).String() } else if strings.HasPrefix(coltypes[cols[si]], "DECIMAL") { ivals[si] = string(t) } else { panic(errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } } case time.Time: var d parser.Datum ct := coltypes[cols[si]] switch ct { case "DATE": d = parser.NewDDateFromTime(t, time.UTC) case "TIMESTAMP": d = parser.MakeDTimestamp(t, time.Nanosecond) case "TIMESTAMP WITH TIME ZONE": d = parser.MakeDTimestampTZ(t, time.Nanosecond) default: panic(errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } ivals[si] = fmt.Sprintf("'%s'", d) default: panic(errors.Errorf("unknown field type: %T (%s)", t, cols[si])) } } inserts = append(inserts, ivals) i++ } for si, sv := range pk { b, ok := sv.([]byte) if ok && strings.HasPrefix(coltypes[pkcols[si]], "STRING") { // Primary key strings need to be converted to a go string, but not SQL // encoded since they aren't being written to a text file. pk[si] = string(b) } } if err := rows.Close(); err != nil { return err } if i == 0 { break } fmt.Fprintf(w, "\nINSERT INTO %s VALUES", tablename) for idx, values := range inserts { if idx > 0 { fmt.Fprint(w, ",") } fmt.Fprint(w, "\n\t(") for vi, v := range values { if vi > 0 { fmt.Fprint(w, ", ") } fmt.Fprint(w, v) } fmt.Fprint(w, ")") } fmt.Fprintln(w, ";") if i < limit { break } } return nil }
func TestEncDatumRowCompare(t *testing.T) { defer leaktest.AfterTest(t)() v := [5]EncDatum{} for i := range v { v[i] = DatumToEncDatum(ColumnType{Kind: ColumnType_INT}, parser.NewDInt(parser.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { row1, row2 EncDatumRow ord ColumnOrdering cmp int }{ { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{1, desc}}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{0, asc}, {1, desc}}, cmp: 0, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{2, asc}}, cmp: -1, }, { row1: EncDatumRow{v[0], v[1], v[3]}, row2: EncDatumRow{v[0], v[1], v[2]}, ord: ColumnOrdering{{2, asc}}, cmp: 1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{2, asc}, {0, asc}, {1, asc}}, cmp: -1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{0, asc}, {2, desc}}, cmp: 1, }, { row1: EncDatumRow{v[0], v[1], v[2]}, row2: EncDatumRow{v[0], v[1], v[3]}, ord: ColumnOrdering{{1, desc}, {0, asc}, {2, desc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, desc}, {0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, asc}, {0, asc}}, cmp: 1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{1, asc}, {0, desc}}, cmp: -1, }, { row1: EncDatumRow{v[2], v[3]}, row2: EncDatumRow{v[1], v[3], v[0]}, ord: ColumnOrdering{{0, desc}, {1, asc}}, cmp: -1, }, } a := &DatumAlloc{} for _, c := range testCases { cmp, err := c.row1.Compare(a, c.ord, c.row2) if err != nil { t.Error(err) } else if cmp != c.cmp { t.Errorf("%s cmp %s ordering %v got %d, expected %d", c.row1, c.row2, c.ord, cmp, c.cmp) } } }
func TestJoinReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") testCases := []struct { spec JoinReaderSpec input [][]parser.Datum expected string }{ { spec: JoinReaderSpec{ OutputColumns: []uint32{0, 1, 2}, }, input: [][]parser.Datum{ {aFn(2), bFn(2)}, {aFn(5), bFn(5)}, {aFn(10), bFn(10)}, {aFn(15), bFn(15)}, }, expected: "[[0 2 2] [0 5 5] [1 0 1] [1 5 6]]", }, { spec: JoinReaderSpec{ Filter: Expression{Expr: "$2 <= 5"}, // sum <= 5 OutputColumns: []uint32{3}, }, input: [][]parser.Datum{ {aFn(1), bFn(1)}, {aFn(25), bFn(25)}, {aFn(5), bFn(5)}, {aFn(21), bFn(21)}, {aFn(34), bFn(34)}, {aFn(13), bFn(13)}, {aFn(51), bFn(51)}, {aFn(50), bFn(50)}, }, expected: "[['one'] ['five'] ['two-one'] ['one-three'] ['five-zero']]", }, } for _, c := range testCases { js := c.spec js.Table = *td txn := client.NewTxn(context.Background(), *kvDB) flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txn: txn, } in := &RowBuffer{} for _, row := range c.input { encRow := make(sqlbase.EncDatumRow, len(row)) for i, d := range row { encRow[i].SetDatum(sqlbase.ColumnType_INT, d) } in.rows = append(in.rows, encRow) } out := &RowBuffer{} jr, err := newJoinReader(&flowCtx, &js, in, out) if err != nil { t.Fatal(err) } jr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !in.done { t.Fatal("joinReader stopped accepting rows") } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
// decodeOidDatum decodes bytes with specified Oid and format code into // a datum. func decodeOidDatum(id oid.Oid, code formatCode, b []byte) (parser.Datum, error) { var d parser.Datum switch id { case oid.T_bool: switch code { case formatText: v, err := strconv.ParseBool(string(b)) if err != nil { return d, err } d = parser.MakeDBool(parser.DBool(v)) case formatBinary: switch b[0] { case 0: d = parser.MakeDBool(false) case 1: d = parser.MakeDBool(true) default: return d, errors.Errorf("unsupported binary bool: %q", b) } default: return d, errors.Errorf("unsupported bool format code: %d", code) } case oid.T_int2: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: if len(b) < 2 { return d, errors.Errorf("int2 requires 2 bytes for binary format") } i := int16(binary.BigEndian.Uint16(b)) d = parser.NewDInt(parser.DInt(i)) default: return d, errors.Errorf("unsupported int2 format code: %d", code) } case oid.T_int4: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: if len(b) < 4 { return d, errors.Errorf("int4 requires 4 bytes for binary format") } i := int32(binary.BigEndian.Uint32(b)) d = parser.NewDInt(parser.DInt(i)) default: return d, errors.Errorf("unsupported int4 format code: %d", code) } case oid.T_int8: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: if len(b) < 8 { return d, errors.Errorf("int8 requires 8 bytes for binary format") } i := int64(binary.BigEndian.Uint64(b)) d = parser.NewDInt(parser.DInt(i)) default: return d, errors.Errorf("unsupported int8 format code: %d", code) } case oid.T_float4: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) case formatBinary: if len(b) < 4 { return d, errors.Errorf("float4 requires 4 bytes for binary format") } f := math.Float32frombits(binary.BigEndian.Uint32(b)) d = parser.NewDFloat(parser.DFloat(f)) default: return d, errors.Errorf("unsupported float4 format code: %d", code) } case oid.T_float8: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) case formatBinary: if len(b) < 8 { return d, errors.Errorf("float8 requires 8 bytes for binary format") } f := math.Float64frombits(binary.BigEndian.Uint64(b)) d = parser.NewDFloat(parser.DFloat(f)) default: return d, errors.Errorf("unsupported float8 format code: %d", code) } case oid.T_numeric: switch code { case formatText: dd := &parser.DDecimal{} if _, ok := dd.SetString(string(b)); !ok { return nil, errors.Errorf("could not parse string %q as decimal", b) } d = dd case formatBinary: r := bytes.NewReader(b) alloc := struct { pgNum pgNumeric i16 int16 dd parser.DDecimal }{} for _, ptr := range []interface{}{ &alloc.pgNum.ndigits, &alloc.pgNum.weight, &alloc.pgNum.sign, &alloc.pgNum.dscale, } { if err := binary.Read(r, binary.BigEndian, ptr); err != nil { return d, err } } if alloc.pgNum.ndigits > 0 { decDigits := make([]byte, 0, alloc.pgNum.ndigits*pgDecDigits) nextDigit := func() error { if err := binary.Read(r, binary.BigEndian, &alloc.i16); err != nil { return err } numZeroes := pgDecDigits for i16 := alloc.i16; i16 > 0; i16 /= 10 { numZeroes-- } for ; numZeroes > 0; numZeroes-- { decDigits = append(decDigits, '0') } return nil } for i := int16(0); i < alloc.pgNum.ndigits-1; i++ { if err := nextDigit(); err != nil { return d, err } if alloc.i16 > 0 { decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10) } } // The last digit may contain padding, which we need to deal with. if err := nextDigit(); err != nil { return d, err } dscale := (alloc.pgNum.ndigits - (alloc.pgNum.weight + 1)) * pgDecDigits if overScale := dscale - alloc.pgNum.dscale; overScale > 0 { dscale -= overScale for i := int16(0); i < overScale; i++ { alloc.i16 /= 10 } } decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10) decString := string(decDigits) if _, ok := alloc.dd.UnscaledBig().SetString(decString, 10); !ok { return nil, errors.Errorf("could not parse string %q as decimal", decString) } alloc.dd.SetScale(inf.Scale(dscale)) } switch alloc.pgNum.sign { case pgNumericPos: case pgNumericNeg: alloc.dd.Neg(&alloc.dd.Dec) default: return d, errors.Errorf("unsupported numeric sign: %d", alloc.pgNum.sign) } d = &alloc.dd default: return d, errors.Errorf("unsupported numeric format code: %d", code) } case oid.T_text, oid.T_varchar: switch code { case formatText, formatBinary: d = parser.NewDString(string(b)) default: return d, errors.Errorf("unsupported text format code: %d", code) } case oid.T_bytea: switch code { case formatText: // http://www.postgresql.org/docs/current/static/datatype-binary.html#AEN5667 // Code cribbed from github.com/lib/pq. // We only support hex encoding. if len(b) >= 2 && bytes.Equal(b[:2], []byte("\\x")) { b = b[2:] // trim off leading "\\x" result := make([]byte, hex.DecodedLen(len(b))) _, err := hex.Decode(result, b) if err != nil { return d, err } d = parser.NewDBytes(parser.DBytes(result)) } else { return d, errors.Errorf("unsupported bytea encoding: %q", b) } case formatBinary: d = parser.NewDBytes(parser.DBytes(b)) default: return d, errors.Errorf("unsupported bytea format code: %d", code) } case oid.T_timestamp: switch code { case formatText: ts, err := parseTs(string(b)) if err != nil { return d, errors.Errorf("could not parse string %q as timestamp", b) } d = parser.MakeDTimestamp(ts, time.Microsecond) case formatBinary: if len(b) < 8 { return d, errors.Errorf("timestamp requires 8 bytes for binary format") } i := int64(binary.BigEndian.Uint64(b)) d = parser.MakeDTimestamp(pgBinaryToTime(i), time.Microsecond) default: return d, errors.Errorf("unsupported timestamp format code: %d", code) } case oid.T_timestamptz: switch code { case formatText: ts, err := parseTs(string(b)) if err != nil { return d, errors.Errorf("could not parse string %q as timestamp", b) } d = parser.MakeDTimestampTZ(ts, time.Microsecond) case formatBinary: if len(b) < 8 { return d, errors.Errorf("timestamptz requires 8 bytes for binary format") } i := int64(binary.BigEndian.Uint64(b)) d = parser.MakeDTimestampTZ(pgBinaryToTime(i), time.Microsecond) default: return d, errors.Errorf("unsupported timestamptz format code: %d", code) } case oid.T_date: switch code { case formatText: ts, err := parseTs(string(b)) if err != nil { res, err := parser.ParseDDate(string(b), time.UTC) if err != nil { return d, errors.Errorf("could not parse string %q as date", b) } d = res } else { daysSinceEpoch := ts.Unix() / secondsInDay d = parser.NewDDate(parser.DDate(daysSinceEpoch)) } case formatBinary: if len(b) < 4 { return d, errors.Errorf("date requires 4 bytes for binary format") } i := int32(binary.BigEndian.Uint32(b)) d = pgBinaryToDate(i) default: return d, errors.Errorf("unsupported date format code: %d", code) } case oid.T_interval: switch code { case formatText: d, err := parser.ParseDInterval(string(b)) if err != nil { return d, errors.Errorf("could not parse string %q as interval", b) } return d, nil default: return d, errors.Errorf("unsupported interval format code: %d", code) } default: return d, errors.Errorf("unsupported OID: %v", id) } return d, nil }
// UnmarshalColumnValue decodes the value from a key-value pair using the type // expected by the column. An error is returned if the value's type does not // match the column's type. func UnmarshalColumnValue( a *DatumAlloc, kind ColumnType_Kind, value *roachpb.Value, ) (parser.Datum, error) { if value == nil { return parser.DNull, nil } switch kind { case ColumnType_BOOL: v, err := value.GetBool() if err != nil { return nil, err } return parser.MakeDBool(parser.DBool(v)), nil case ColumnType_INT: v, err := value.GetInt() if err != nil { return nil, err } return a.NewDInt(parser.DInt(v)), nil case ColumnType_FLOAT: v, err := value.GetFloat() if err != nil { return nil, err } return a.NewDFloat(parser.DFloat(v)), nil case ColumnType_DECIMAL: v, err := value.GetDecimal() if err != nil { return nil, err } dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(v) return dd, nil case ColumnType_STRING: v, err := value.GetBytes() if err != nil { return nil, err } return a.NewDString(parser.DString(v)), nil case ColumnType_BYTES: v, err := value.GetBytes() if err != nil { return nil, err } return a.NewDBytes(parser.DBytes(v)), nil case ColumnType_DATE: v, err := value.GetInt() if err != nil { return nil, err } return a.NewDDate(parser.DDate(v)), nil case ColumnType_TIMESTAMP: v, err := value.GetTime() if err != nil { return nil, err } return a.NewDTimestamp(parser.DTimestamp{Time: v}), nil case ColumnType_TIMESTAMPTZ: v, err := value.GetTime() if err != nil { return nil, err } return a.NewDTimestampTZ(parser.DTimestampTZ{Time: v}), nil case ColumnType_INTERVAL: d, err := value.GetDuration() if err != nil { return nil, err } return a.NewDInterval(parser.DInterval{Duration: d}), nil default: return nil, errors.Errorf("unsupported column type: %s", kind) } }
// ShowIndex returns all the indexes for a table. // Privileges: Any privilege on table. // Notes: postgres does not have a SHOW INDEXES statement. // mysql requires some privilege for any column. func (p *planner) ShowIndex(n *parser.ShowIndex) (planNode, error) { tn, err := n.Table.NormalizeWithDatabaseName(p.session.Database) if err != nil { return nil, err } desc, err := p.mustGetTableDesc(tn) if err != nil { return nil, err } if err := p.anyPrivilege(desc); err != nil { return nil, err } columns := ResultColumns{ {Name: "Table", Typ: parser.TypeString}, {Name: "Name", Typ: parser.TypeString}, {Name: "Unique", Typ: parser.TypeBool}, {Name: "Seq", Typ: parser.TypeInt}, {Name: "Column", Typ: parser.TypeString}, {Name: "Direction", Typ: parser.TypeString}, {Name: "Storing", Typ: parser.TypeBool}, } return &delayedNode{ p: p, name: "SHOW INDEX FROM " + tn.String(), columns: columns, constructor: func(p *planner) (planNode, error) { v := p.newContainerValuesNode(columns, 0) appendRow := func(index sqlbase.IndexDescriptor, colName string, sequence int, direction string, isStored bool) error { newRow := parser.DTuple{ parser.NewDString(tn.Table()), parser.NewDString(index.Name), parser.MakeDBool(parser.DBool(index.Unique)), parser.NewDInt(parser.DInt(sequence)), parser.NewDString(colName), parser.NewDString(direction), parser.MakeDBool(parser.DBool(isStored)), } _, err := v.rows.AddRow(newRow) return err } for _, index := range append([]sqlbase.IndexDescriptor{desc.PrimaryIndex}, desc.Indexes...) { sequence := 1 for i, col := range index.ColumnNames { if err := appendRow(index, col, sequence, index.ColumnDirections[i].String(), false); err != nil { v.rows.Close() return nil, err } sequence++ } for _, col := range index.StoreColumnNames { if err := appendRow(index, col, sequence, "N/A", true); err != nil { v.rows.Close() return nil, err } sequence++ } } return v, nil }, }, nil }
// RowIdxFn is a GenValueFn that returns the row number as a DInt func RowIdxFn(row int) parser.Datum { return parser.NewDInt(parser.DInt(row)) }