// golangFillQueryArguments populates the placeholder map with // types and values from an array of Go values. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func golangFillQueryArguments(pinfo *parser.PlaceholderInfo, args []interface{}) { pinfo.Clear() for i, arg := range args { k := fmt.Sprint(i + 1) if arg == nil { pinfo.SetValue(k, parser.DNull) continue } // A type switch to handle a few explicit types with special semantics: // - Datums are passed along as is. // - Time datatypes get special representation in the database. var d parser.Datum switch t := arg.(type) { case parser.Datum: d = t case time.Time: d = parser.MakeDTimestamp(t, time.Microsecond) case time.Duration: d = &parser.DInterval{Duration: duration.Duration{Nanos: t.Nanoseconds()}} case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) d = dd } if d == nil { // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: d = parser.MakeDBool(parser.DBool(val.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: d = parser.NewDInt(parser.DInt(val.Int())) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: d = parser.NewDInt(parser.DInt(val.Uint())) case reflect.Float32, reflect.Float64: d = parser.NewDFloat(parser.DFloat(val.Float())) case reflect.String: d = parser.NewDString(val.String()) case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { d = parser.NewDBytes(parser.DBytes(val.Bytes())) } } if d == nil { panic(fmt.Sprintf("unexpected type %T", arg)) } } pinfo.SetValue(k, d) } }
// Events is an endpoint that returns the latest event log entries, with the following // optional URL parameters: // // type=STRING returns events with this type (e.g. "create_table") // targetID=INT returns events for that have this targetID func (s *adminServer) Events(c context.Context, req *EventsRequest) (*EventsResponse, error) { var session sql.Session user := s.getUser(req) // Execute the query. q := &sqlQuery{} q.Append("SELECT timestamp, eventType, targetID, reportingID, info, uniqueID ") q.Append("FROM system.eventlog ") q.Append("WHERE true ") // This simplifies the WHERE clause logic below. if len(req.Type) > 0 { q.Append("AND eventType = $ ", parser.DString(req.Type)) } if req.TargetId > 0 { q.Append("AND targetID = $ ", parser.DInt(req.TargetId)) } q.Append("ORDER BY timestamp DESC ") q.Append("LIMIT $", parser.DInt(apiEventLimit)) if len(q.Errors()) > 0 { return nil, s.serverErrors(q.Errors()) } r := s.sqlExecutor.ExecuteStatements(user, &session, q.String(), q.Params()) if err := s.checkQueryResults(r.ResultList, 1); err != nil { return nil, s.serverError(err) } // Marshal response. var resp EventsResponse scanner := newResultScanner(r.ResultList[0].Columns) for _, row := range r.ResultList[0].Rows { var event EventsResponse_Event var ts time.Time if err := scanner.ScanIndex(row, 0, &ts); err != nil { return nil, err } nanos := ts.UnixNano() event.Timestamp = &EventsResponse_Event_Timestamp{Sec: nanos / 1e9, Nsec: uint32(nanos % 1e9)} if err := scanner.ScanIndex(row, 1, &event.EventType); err != nil { return nil, err } if err := scanner.ScanIndex(row, 2, &event.TargetID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 3, &event.ReportingID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 4, &event.Info); err != nil { return nil, err } if err := scanner.ScanIndex(row, 5, &event.UniqueID); err != nil { return nil, err } resp.Events = append(resp.Events, &event) } return &resp, nil }
// Events is an endpoint that returns the latest event log entries, with the following // optional URL parameters: // // type=STRING returns events with this type (e.g. "create_table") // targetID=INT returns events for that have this targetID func (s *adminServer) Events(ctx context.Context, req *serverpb.EventsRequest) (*serverpb.EventsResponse, error) { args := sql.SessionArgs{User: s.getUser(req)} session := sql.NewSession(ctx, args, s.server.sqlExecutor, nil) // Execute the query. q := makeSQLQuery() q.Append("SELECT timestamp, eventType, targetID, reportingID, info, uniqueID ") q.Append("FROM system.eventlog ") q.Append("WHERE true ") // This simplifies the WHERE clause logic below. if len(req.Type) > 0 { q.Append("AND eventType = $ ", parser.NewDString(req.Type)) } if req.TargetId > 0 { q.Append("AND targetID = $ ", parser.NewDInt(parser.DInt(req.TargetId))) } q.Append("ORDER BY timestamp DESC ") q.Append("LIMIT $", parser.NewDInt(parser.DInt(apiEventLimit))) if len(q.Errors()) > 0 { return nil, s.serverErrors(q.Errors()) } r := s.server.sqlExecutor.ExecuteStatements(session, q.String(), q.QueryArguments()) if err := s.checkQueryResults(r.ResultList, 1); err != nil { return nil, s.serverError(err) } // Marshal response. var resp serverpb.EventsResponse scanner := makeResultScanner(r.ResultList[0].Columns) for _, row := range r.ResultList[0].Rows { var event serverpb.EventsResponse_Event var ts time.Time if err := scanner.ScanIndex(row, 0, &ts); err != nil { return nil, err } event.Timestamp = serverpb.EventsResponse_Event_Timestamp{Sec: ts.Unix(), Nsec: uint32(ts.Nanosecond())} if err := scanner.ScanIndex(row, 1, &event.EventType); err != nil { return nil, err } if err := scanner.ScanIndex(row, 2, &event.TargetID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 3, &event.ReportingID); err != nil { return nil, err } if err := scanner.ScanIndex(row, 4, &event.Info); err != nil { return nil, err } if err := scanner.ScanIndex(row, 5, &event.UniqueID); err != nil { return nil, err } resp.Events = append(resp.Events, event) } return &resp, nil }
// Arg implements the parser.Args interface. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func (gp golangParameters) Arg(name string) (parser.Datum, bool) { i, err := processPositionalArgument(name) if err != nil { return nil, false } if i < 1 || int(i) > len(gp) { return nil, false } arg := gp[i-1] if arg == nil { return parser.DNull, true } // A type switch to handle a few explicit types with special semantics. switch t := arg.(type) { // Datums are passed along as is. case parser.Datum: return t, true // Time datatypes get special representation in the database. case time.Time: return parser.DTimestamp{Time: t}, true case time.Duration: return parser.DInterval{Duration: t}, true case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) return dd, true } // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: return parser.DBool(val.Bool()), true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return parser.DInt(val.Int()), true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return parser.DInt(val.Uint()), true case reflect.Float32, reflect.Float64: return parser.DFloat(val.Float()), true case reflect.String: return parser.DString(val.String()), true case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { return parser.DBytes(val.Bytes()), true } } panic(fmt.Sprintf("unexpected type %T", arg)) }
func decodeTableKey(valType parser.Datum, key []byte) (parser.Datum, []byte, error) { var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } switch valType.(type) { case parser.DBool: rkey, i, err := encoding.DecodeVarint(key) return parser.DBool(i != 0), rkey, err case parser.DInt: rkey, i, err := encoding.DecodeVarint(key) return parser.DInt(i), rkey, err case parser.DFloat: rkey, f, err := encoding.DecodeFloat(key, nil) return parser.DFloat(f), rkey, err case parser.DString: rkey, r, err := encoding.DecodeString(key, nil) return parser.DString(r), rkey, err case parser.DBytes: rkey, r, err := encoding.DecodeString(key, nil) return parser.DBytes(r), rkey, err case parser.DDate: rkey, t, err := encoding.DecodeTime(key) return parser.DDate{Time: t}, rkey, err case parser.DTimestamp: rkey, t, err := encoding.DecodeTime(key) return parser.DTimestamp{Time: t}, rkey, err case parser.DInterval: rkey, d, err := encoding.DecodeVarint(key) return parser.DInterval{Duration: time.Duration(d)}, rkey, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index key: %s", valType.Type()) } }
func makeIndexKeyVals(desc *structured.TableDescriptor, index structured.IndexDescriptor) ([]parser.Datum, error) { vals := make([]parser.Datum, len(index.ColumnIDs)) for i, id := range index.ColumnIDs { col, err := desc.FindColumnByID(id) if err != nil { return nil, err } switch col.Type.Kind { case structured.ColumnType_BIT, structured.ColumnType_INT: vals[i] = parser.DInt(0) case structured.ColumnType_FLOAT: vals[i] = parser.DFloat(0) case structured.ColumnType_CHAR, structured.ColumnType_TEXT, structured.ColumnType_BLOB: vals[i] = parser.DString("") default: return nil, util.Errorf("TODO(pmattis): decoded index key: %s", col.Type.Kind) } } if !index.Unique { // Non-unique columns are suffixed by the primary index key. pkVals, err := makeIndexKeyVals(desc, desc.PrimaryIndex) if err != nil { return nil, err } vals = append(vals, pkVals...) } return vals, nil }
func datumFromProto(d driver.Datum) parser.Datum { arg := d.Payload if arg == nil { return parser.DNull } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal) case *driver.Datum_IntVal: return parser.DInt(t.IntVal) case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal) case *driver.Datum_DecimalVal: dd := &parser.DDecimal{} if _, ok := dd.SetString(t.DecimalVal); !ok { panic(fmt.Sprintf("could not parse string %q as decimal", t.DecimalVal)) } return dd case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal) case *driver.Datum_StringVal: return parser.DString(t.StringVal) case *driver.Datum_DateVal: return parser.DDate(t.DateVal) case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()} case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)} default: panic(fmt.Sprintf("unexpected type %T", t)) } }
func decodeTableKey(valType parser.Datum, key []byte) (parser.Datum, []byte, error) { var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } switch valType.(type) { case parser.DBool: var i int64 key, i = encoding.DecodeVarint(key) return parser.DBool(i != 0), key, nil case parser.DInt: var i int64 key, i = encoding.DecodeVarint(key) return parser.DInt(i), key, nil case parser.DFloat: var f float64 key, f = encoding.DecodeFloat(key, nil) return parser.DFloat(f), key, nil case parser.DString: var r string key, r = encoding.DecodeString(key, nil) return parser.DString(r), key, nil default: return nil, nil, util.Errorf("TODO(pmattis): decoded index key: %s", valType.Type()) } }
func datumFromProto(d driver.Datum) parser.Datum { arg := d.Payload if arg == nil { return parser.DNull } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal) case *driver.Datum_IntVal: return parser.DInt(t.IntVal) case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal) case *driver.Datum_DecimalVal: dec, err := decimal.NewFromString(t.DecimalVal) if err != nil { panic(fmt.Sprintf("could not parse decimal: %v", err)) } return parser.DDecimal{Decimal: dec} case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal) case *driver.Datum_StringVal: return parser.DString(t.StringVal) case *driver.Datum_DateVal: return parser.DDate(t.DateVal) case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()} case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)} default: panic(fmt.Sprintf("unexpected type %T", t)) } }
func (n *scanNode) unmarshalValue(kv client.KeyValue) (parser.Datum, bool) { kind, ok := n.colKind[n.colID] if !ok { n.err = fmt.Errorf("column-id \"%d\" does not exist", n.colID) return nil, false } if kv.Exists() { switch kind { case ColumnType_INT: return parser.DInt(kv.ValueInt()), true case ColumnType_BOOL: return parser.DBool(kv.ValueInt() != 0), true case ColumnType_FLOAT: return parser.DFloat(math.Float64frombits(uint64(kv.ValueInt()))), true case ColumnType_STRING, ColumnType_BYTES: return parser.DString(kv.ValueBytes()), true case ColumnType_DATE: var t time.Time if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil { return nil, false } return parser.DDate{Time: t}, true case ColumnType_TIMESTAMP: var t time.Time if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil { return nil, false } return parser.DTimestamp{Time: t}, true case ColumnType_INTERVAL: return parser.DInterval{Duration: time.Duration(kv.ValueInt())}, true } } return parser.DNull, true }
// queryNamespaceID queries for the ID of the namespace with the given name and // parent ID. func (s *adminServer) queryNamespaceID( session *sql.Session, parentID sqlbase.ID, name string, ) (sqlbase.ID, error) { const query = `SELECT id FROM system.namespace WHERE parentID = $1 AND name = $2` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(parentID))) params.SetValue(`2`, parser.NewDString(name)) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) if err := s.checkQueryResults(r.ResultList, 1); err != nil { return 0, err } result := r.ResultList[0] if len(result.Rows) == 0 { return 0, errors.Errorf("namespace %s with ParentID %d not found", name, parentID) } var id int64 scanner := resultScanner{} err := scanner.ScanIndex(result.Rows[0], 0, &id) if err != nil { return 0, err } return sqlbase.ID(id), nil }
// queryZone retrieves the specific ZoneConfig associated with the supplied ID, // if it exists. func (s *adminServer) queryZone( session *sql.Session, id sqlbase.ID, ) (config.ZoneConfig, bool, error) { const query = `SELECT config FROM system.zones WHERE id = $1` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) r := s.server.sqlExecutor.ExecuteStatements(session, query, params) if err := s.checkQueryResults(r.ResultList, 1); err != nil { return config.ZoneConfig{}, false, err } result := r.ResultList[0] if len(result.Rows) == 0 { return config.ZoneConfig{}, false, nil } var zoneBytes []byte scanner := resultScanner{} err := scanner.ScanIndex(result.Rows[0], 0, &zoneBytes) if err != nil { return config.ZoneConfig{}, false, err } var zone config.ZoneConfig if err := zone.Unmarshal(zoneBytes); err != nil { return config.ZoneConfig{}, false, err } return zone, true, nil }
func (vals *debugValues) AsRow() parser.DTuple { keyVal := parser.DNull if vals.key != "" { keyVal = parser.DString(vals.key) } // The "output" value is NULL for partial rows, or a DBool indicating if the row passed the // filtering. outputVal := parser.DNull switch vals.output { case debugValueFiltered: outputVal = parser.DBool(false) case debugValueRow: outputVal = parser.DBool(true) } return parser.DTuple{ parser.DInt(vals.rowIdx), keyVal, parser.DString(vals.value), outputVal, } }
// explainDebug fills in four extra debugging values in the current row: // - the row index, // - the key, // - a value string, // - a true bool if we are at the end of the row, or a NULL otherwise. func (n *scanNode) explainDebug(endOfRow bool) { if len(n.row) == len(n.visibleCols) { n.row = append(n.row, nil, nil, nil, nil) } debugVals := n.row[len(n.row)-4:] debugVals[0] = parser.DInt(n.rowIndex) debugVals[1] = parser.DString(n.prettyKey()) if n.implicitVals != nil { debugVals[2] = parser.DString(prettyDatums(n.implicitVals)) } else { // This conversion to DString is odd. `n.explainValue` is already a // `Datum`, but logic_test currently expects EXPLAIN DEBUG output // to come out formatted using `encodeSQLString`. This is not // consistent across all printing of strings in logic_test, though. // TODO(tamird/pmattis): figure out a consistent story for string // printing in logic_test. debugVals[2] = parser.DString(n.explainValue.String()) } if endOfRow { debugVals[3] = parser.DBool(true) n.rowIndex++ } else { debugVals[3] = parser.DNull } n.explainValue = nil }
func (n *scanNode) getQVal(col ColumnDescriptor) parser.Expr { if n.qvals == nil { n.qvals = make(qvalMap) } qval := n.qvals[col.ID] if qval == nil { qval = &qvalue{col: col} // We initialize the qvalue expression to a datum of the type matching the // column. This allows type analysis to be performed on the expression // before we start retrieving rows. // // TODO(pmattis): Nullable columns can have NULL values. The type analysis // needs to take that into consideration, but how to surface that info? switch col.Type.Kind { case ColumnType_BIT, ColumnType_INT: qval.datum = parser.DInt(0) case ColumnType_BOOL: qval.datum = parser.DBool(true) case ColumnType_FLOAT: qval.datum = parser.DFloat(0) case ColumnType_CHAR, ColumnType_TEXT, ColumnType_BLOB: qval.datum = parser.DString("") default: panic(fmt.Sprintf("unsupported column type: %s", col.Type.Kind)) } n.qvals[col.ID] = qval } return qval }
// Arg implements the Args interface func (p parameters) Arg(name string) (parser.Datum, bool) { if !unicode.IsDigit(rune(name[0])) { // TODO(pmattis): Add support for named parameters (vs the numbered // parameter support below). return nil, false } i, err := strconv.ParseInt(name, 10, 0) if err != nil { return nil, false } if i < 1 || int(i) > len(p) { return nil, false } arg := p[i-1].GetValue() if arg == nil { return parser.DNull, true } switch t := arg.(type) { case *bool: return parser.DBool(*t), true case *int64: return parser.DInt(*t), true case *float64: return parser.DFloat(*t), true case []byte: return parser.DString(t), true case *string: return parser.DString(*t), true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
// Arg implements the parser.Args interface. func (p parameters) Arg(name string) (parser.Datum, bool) { i, err := processPositionalArgument(name) if err != nil { return nil, false } if i < 1 || int(i) > len(p) { return nil, false } arg := p[i-1].Payload if arg == nil { return parser.DNull, true } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal), true case *driver.Datum_IntVal: return parser.DInt(t.IntVal), true case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal), true case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal), true case *driver.Datum_StringVal: return parser.DString(t.StringVal), true case *driver.Datum_DateVal: return parser.DDate(t.DateVal), true case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()}, true case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)}, true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
// ShowIndex returns all the indexes for a table. // Privileges: None. // Notes: postgres does not have a SHOW INDEX statement. // mysql requires some privilege for any column. func (p *planner) ShowIndex(n *parser.ShowIndex) (planNode, error) { desc, err := p.getTableDesc(n.Table) if err != nil { return nil, err } v := &valuesNode{columns: []string{"Table", "Name", "Unique", "Seq", "Column", "Storing"}} name := n.Table.Table() for _, index := range append([]IndexDescriptor{desc.PrimaryIndex}, desc.Indexes...) { j := 1 for i, cols := range [][]string{index.ColumnNames, index.StoreColumnNames} { for _, col := range cols { v.rows = append(v.rows, []parser.Datum{ parser.DString(name), parser.DString(index.Name), parser.DBool(index.Unique), parser.DInt(j), parser.DString(col), parser.DBool(i == 1), }) j++ } } } return v, nil }
func checkEquivExpr(a, b parser.Expr, qvals qvalMap) error { // The expressions above only use the values 1 and 2. Verify that the // simplified expressions evaluate to the same value as the original // expression for interesting values. zero := parser.DInt(0) for _, v := range []parser.Datum{zero, zero + 1, zero + 2, zero + 3, parser.DNull} { for _, q := range qvals { q.datum = v } da, err := a.Eval(parser.EvalContext{}) if err != nil { return fmt.Errorf("%s: %v", a, err) } db, err := b.Eval(parser.EvalContext{}) if err != nil { return fmt.Errorf("%s: %v", b, err) } // This is tricky: we don't require the expressions to produce identical // results, but to either both return true or both return not true (either // false or NULL). if (da == parser.DBool(true)) != (db == parser.DBool(true)) { return fmt.Errorf("%s: %s: expected %s, but found %s", a, v, da, db) } } return nil }
func (n *scanNode) explainDebug(endOfRow, outputRow bool) { if n.row == nil { n.row = make([]parser.Datum, len(n.columns)) } n.row[0] = parser.DInt(n.rowIndex) n.row[1] = parser.DString(n.prettyKey()) if n.implicitVals != nil { n.row[2] = parser.DString(prettyKeyVals(n.implicitVals)) } else { // This conversion to DString is odd. `n.explainValue` is already a // `Datum`, but logic_test currently expects EXPLAIN DEBUG output // to come out formatted using `encodeSQLString`. This is not // consistent across all printing of strings in logic_test, though. // TODO(tamird/pmattis): figure out a consistent story for string // printing in logic_test. n.row[2] = parser.DString(n.explainValue.String()) } if endOfRow { n.row[3] = parser.DBool(outputRow) n.rowIndex++ } else { n.row[3] = parser.DNull } n.explainValue = nil }
// decodeKeyVals decodes the values that are part of the key. ValTypes is a // slice returned from makeKeyVals. The decoded values are stored in the vals // parameter while the valTypes parameter is unmodified. Note that len(vals) >= // len(valTypes). The types of the decoded values will match the corresponding // entry in the valTypes parameter with the exception that a value might also // be parser.DNull. The remaining bytes in the key after decoding the values // are returned. func decodeKeyVals(valTypes, vals []parser.Datum, key []byte) ([]byte, error) { for j := range valTypes { var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { vals[j] = parser.DNull continue } switch valTypes[j].(type) { case parser.DInt: var i int64 key, i = encoding.DecodeVarint(key) vals[j] = parser.DInt(i) case parser.DFloat: var f float64 key, f = encoding.DecodeFloat(key, nil) vals[j] = parser.DFloat(f) case parser.DString: var r string key, r = encoding.DecodeString(key, nil) vals[j] = parser.DString(r) default: return nil, util.Errorf("TODO(pmattis): decoded index key: %s", valTypes[j].Type()) } } return key, nil }
// DecodeTableValue decodes a value encoded by EncodeTableValue. func DecodeTableValue(a *DatumAlloc, valType parser.Datum, b []byte) (parser.Datum, []byte, error) { // TODO(dan): Merge this and DecodeTableKey. if len(b) == 0 { return nil, nil, util.Errorf("empty slice") } if roachpb.ValueType(b[0]) == roachpb.ValueType_NULL { return parser.DNull, b[1:], nil } var err error switch valType.(type) { case *parser.DBool: var i int64 b, i, err = roachpb.DecodeIntValue(b) // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(i != 0)), b, err case *parser.DInt: var i int64 b, i, err = roachpb.DecodeIntValue(b) return a.NewDInt(parser.DInt(i)), b, err case *parser.DFloat: var f float64 b, f, err = roachpb.DecodeFloatValue(b) return a.NewDFloat(parser.DFloat(f)), b, err case *parser.DDecimal: var d *inf.Dec b, d, err = roachpb.DecodeDecimalValue(b) dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, b, err case *parser.DString: var data []byte b, data, err = roachpb.DecodeBytesValue(b) return a.NewDString(parser.DString(data)), b, err case *parser.DBytes: var data []byte b, data, err = roachpb.DecodeBytesValue(b) return a.NewDBytes(parser.DBytes(data)), b, err case *parser.DDate: var i int64 b, i, err = roachpb.DecodeIntValue(b) return a.NewDDate(parser.DDate(i)), b, err case *parser.DTimestamp: var t time.Time b, t, err = roachpb.DecodeTimeValue(b) return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err case *parser.DTimestampTZ: var t time.Time b, t, err = roachpb.DecodeTimeValue(b) return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err case *parser.DInterval: var d duration.Duration b, d, err = roachpb.DecodeDurationValue(b) return a.NewDInterval(parser.DInterval{Duration: d}), b, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index value: %s", valType.Type()) } }
// unmarshalColumnValue decodes the value from a key-value pair using the type // expected by the column. An error is returned if the value's type does not // match the column's type. func unmarshalColumnValue(kind ColumnType_Kind, value *proto.Value) (parser.Datum, error) { if value == nil { return parser.DNull, nil } switch kind { case ColumnType_BOOL: v, err := value.GetInt() if err != nil { return nil, err } return parser.DBool(v != 0), nil case ColumnType_INT: v, err := value.GetInt() if err != nil { return nil, err } return parser.DInt(v), nil case ColumnType_FLOAT: v, err := value.GetFloat() if err != nil { return nil, err } return parser.DFloat(v), nil case ColumnType_STRING: v, err := value.GetBytesChecked() if err != nil { return nil, err } return parser.DString(v), nil case ColumnType_BYTES: v, err := value.GetBytesChecked() if err != nil { return nil, err } return parser.DBytes(v), nil case ColumnType_DATE: v, err := value.GetTime() if err != nil { return nil, err } return parser.DDate{Time: v}, nil case ColumnType_TIMESTAMP: v, err := value.GetTime() if err != nil { return nil, err } return parser.DTimestamp{Time: v}, nil case ColumnType_INTERVAL: v, err := value.GetInt() if err != nil { return nil, err } return parser.DInterval{Duration: time.Duration(v)}, nil default: return nil, util.Errorf("unsupported column type: %s", kind) } }
func (n *explainTraceNode) Next() bool { first := n.rows == nil if first { n.rows = []parser.DTuple{} } for !n.exhausted && len(n.rows) <= 1 { var vals debugValues if !n.plan.Next() { n.exhausted = true if pErr := n.PErr(); pErr != nil { n.txn.Trace.LogEvent(pErr.GoError().Error()) } n.txn.Trace.LogEvent("tracing completed") n.txn.Trace.Finish() n.txn.Trace = nil } else { vals = n.plan.DebugValues() } var basePos int if len(n.txn.CollectedSpans) == 0 { if !n.exhausted { n.txn.CollectedSpans = append(n.txn.CollectedSpans, basictracer.RawSpan{ Context: basictracer.Context{}, Logs: []opentracing.LogData{{Timestamp: n.lastTS}}, }) } basePos = n.lastPos + 1 } for _, sp := range n.txn.CollectedSpans { for i, entry := range sp.Logs { var timeVal string if i > 0 { timeVal = time.Duration(entry.Timestamp.Sub(n.lastTS)).String() } n.rows = append(n.rows, append(parser.DTuple{ parser.DTimestamp{Time: entry.Timestamp}, parser.DString(timeVal), parser.DInt(basePos + i), parser.DString(sp.Operation), parser.DString(entry.Event), }, vals.AsRow()...)) n.lastTS, n.lastPos = entry.Timestamp, i } } n.txn.CollectedSpans = nil } if first { return len(n.rows) > 0 } if len(n.rows) <= 1 { return false } n.rows = n.rows[1:] return true }
// DecodeTableValue decodes a value encoded by EncodeTableValue. func DecodeTableValue(a *DatumAlloc, valType parser.Datum, b []byte) (parser.Datum, []byte, error) { _, dataOffset, _, typ, err := encoding.DecodeValueTag(b) if err != nil { return nil, b, err } if typ == encoding.Null { return parser.DNull, b[dataOffset:], nil } switch valType.(type) { case *parser.DBool: var x bool b, x, err = encoding.DecodeBoolValue(b) // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(x)), b, err case *parser.DInt: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDInt(parser.DInt(i)), b, err case *parser.DFloat: var f float64 b, f, err = encoding.DecodeFloatValue(b) return a.NewDFloat(parser.DFloat(f)), b, err case *parser.DDecimal: var d *inf.Dec b, d, err = encoding.DecodeDecimalValue(b) dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, b, err case *parser.DString: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDString(parser.DString(data)), b, err case *parser.DBytes: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDBytes(parser.DBytes(data)), b, err case *parser.DDate: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDDate(parser.DDate(i)), b, err case *parser.DTimestamp: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err case *parser.DTimestampTZ: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err case *parser.DInterval: var d duration.Duration b, d, err = encoding.DecodeDurationValue(b) return a.NewDInterval(parser.DInterval{Duration: d}), b, err default: return nil, nil, errors.Errorf("TODO(pmattis): decoded index value: %s", valType.Type()) } }
func makeIntTestDatum(count int) []parser.Datum { rng, _ := randutil.NewPseudoRand() vals := make([]parser.Datum, count) for i := range vals { vals[i] = parser.DInt(rng.Int63()) } return vals }
// ShowIndex returns all the indexes for a table. // Privileges: Any privilege on table. // Notes: postgres does not have a SHOW INDEXES statement. // mysql requires some privilege for any column. func (p *planner) ShowIndex(n *parser.ShowIndex) (planNode, error) { tn, err := n.Table.NormalizeWithDatabaseName(p.session.Database) if err != nil { return nil, err } desc, err := p.mustGetTableDesc(tn) if err != nil { return nil, err } if err := p.anyPrivilege(desc); err != nil { return nil, err } v := &valuesNode{ columns: []ResultColumn{ {Name: "Table", Typ: parser.TypeString}, {Name: "Name", Typ: parser.TypeString}, {Name: "Unique", Typ: parser.TypeBool}, {Name: "Seq", Typ: parser.TypeInt}, {Name: "Column", Typ: parser.TypeString}, {Name: "Direction", Typ: parser.TypeString}, {Name: "Storing", Typ: parser.TypeBool}, }, } appendRow := func(index sqlbase.IndexDescriptor, colName string, sequence int, direction string, isStored bool) { v.rows = append(v.rows, []parser.Datum{ parser.NewDString(tn.Table()), parser.NewDString(index.Name), parser.MakeDBool(parser.DBool(index.Unique)), parser.NewDInt(parser.DInt(sequence)), parser.NewDString(colName), parser.NewDString(direction), parser.MakeDBool(parser.DBool(isStored)), }) } for _, index := range append([]sqlbase.IndexDescriptor{desc.PrimaryIndex}, desc.Indexes...) { sequence := 1 for i, col := range index.ColumnNames { appendRow(index, col, sequence, index.ColumnDirections[i].String(), false) sequence++ } for _, col := range index.StoreColumnNames { appendRow(index, col, sequence, "N/A", true) sequence++ } } return v, nil }
// prettyKey pretty-prints the specified key, skipping over the first skip // fields. func prettyKey(key roachpb.Key, skip int) string { if !bytes.HasPrefix(key, keys.TableDataPrefix) { return fmt.Sprintf("index key missing table data prefix: %q vs %q", key, keys.TableDataPrefix) } key = key[len(keys.TableDataPrefix):] var buf bytes.Buffer for k := 0; len(key) > 0; k++ { var d interface{} var err error switch encoding.PeekType(key) { case encoding.Null: key, _ = encoding.DecodeIfNull(key) d = parser.DNull case encoding.NotNull: key, _ = encoding.DecodeIfNotNull(key) d = "#" case encoding.Int: var i int64 key, i, err = encoding.DecodeVarint(key) d = parser.DInt(i) case encoding.Float: var f float64 key, f, err = encoding.DecodeFloat(key, nil) d = parser.DFloat(f) case encoding.Bytes: var s string key, s, err = encoding.DecodeString(key, nil) d = parser.DString(s) case encoding.Time: var t time.Time key, t, err = encoding.DecodeTime(key) d = parser.DTimestamp{Time: t} default: // This shouldn't ever happen, but if it does let the loop exit. key = nil d = "unknown" } if skip > 0 { skip-- continue } if err != nil { fmt.Fprintf(&buf, "/<%v>", err) continue } fmt.Fprintf(&buf, "/%s", d) } return buf.String() }
func unmarshalValue(col structured.ColumnDescriptor, kv client.KeyValue) parser.Datum { if kv.Exists() { switch col.Type.Kind { case structured.ColumnType_BIT, structured.ColumnType_INT: return parser.DInt(kv.ValueInt()) case structured.ColumnType_FLOAT: return parser.DFloat(math.Float64frombits(uint64(kv.ValueInt()))) case structured.ColumnType_CHAR, structured.ColumnType_TEXT, structured.ColumnType_BLOB: return parser.DString(kv.ValueBytes()) } } return parser.DNull }
func populateExplain(v *valuesNode, plan planNode, level int) { name, description, children := plan.ExplainPlan() row := parser.DTuple{ parser.DInt(level), parser.DString(name), parser.DString(description), } v.rows = append(v.rows, row) for _, child := range children { populateExplain(v, child, level+1) } }