// Arg implements the parser.Args interface. func (p parameters) Arg(name string) (parser.Datum, bool) { i, err := processPositionalArgument(name) if err != nil { return nil, false } if i < 1 || int(i) > len(p) { return nil, false } arg := p[i-1].Payload if arg == nil { return parser.DNull, true } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal), true case *driver.Datum_IntVal: return parser.DInt(t.IntVal), true case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal), true case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal), true case *driver.Datum_StringVal: return parser.DString(t.StringVal), true case *driver.Datum_DateVal: return parser.DDate(t.DateVal), true case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()}, true case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)}, true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
func datumFromProto(d driver.Datum) parser.Datum { arg := d.Payload if arg == nil { return parser.DNull } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal) case *driver.Datum_IntVal: return parser.DInt(t.IntVal) case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal) case *driver.Datum_DecimalVal: dec, err := decimal.NewFromString(t.DecimalVal) if err != nil { panic(fmt.Sprintf("could not parse decimal: %v", err)) } return parser.DDecimal{Decimal: dec} case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal) case *driver.Datum_StringVal: return parser.DString(t.StringVal) case *driver.Datum_DateVal: return parser.DDate(t.DateVal) case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()} case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)} default: panic(fmt.Sprintf("unexpected type %T", t)) } }
func decodeTableKey(valType parser.Datum, key []byte) (parser.Datum, []byte, error) { var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } switch valType.(type) { case parser.DBool: rkey, i, err := encoding.DecodeVarint(key) return parser.DBool(i != 0), rkey, err case parser.DInt: rkey, i, err := encoding.DecodeVarint(key) return parser.DInt(i), rkey, err case parser.DFloat: rkey, f, err := encoding.DecodeFloat(key, nil) return parser.DFloat(f), rkey, err case parser.DString: rkey, r, err := encoding.DecodeString(key, nil) return parser.DString(r), rkey, err case parser.DBytes: rkey, r, err := encoding.DecodeString(key, nil) return parser.DBytes(r), rkey, err case parser.DDate: rkey, t, err := encoding.DecodeTime(key) return parser.DDate{Time: t}, rkey, err case parser.DTimestamp: rkey, t, err := encoding.DecodeTime(key) return parser.DTimestamp{Time: t}, rkey, err case parser.DInterval: rkey, d, err := encoding.DecodeVarint(key) return parser.DInterval{Duration: time.Duration(d)}, rkey, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index key: %s", valType.Type()) } }
func datumFromProto(d driver.Datum) parser.Datum { arg := d.Payload if arg == nil { return parser.DNull } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal) case *driver.Datum_IntVal: return parser.DInt(t.IntVal) case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal) case *driver.Datum_DecimalVal: dd := &parser.DDecimal{} if _, ok := dd.SetString(t.DecimalVal); !ok { panic(fmt.Sprintf("could not parse string %q as decimal", t.DecimalVal)) } return dd case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal) case *driver.Datum_StringVal: return parser.DString(t.StringVal) case *driver.Datum_DateVal: return parser.DDate(t.DateVal) case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()} case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)} default: panic(fmt.Sprintf("unexpected type %T", t)) } }
// unmarshalColumnValue decodes the value from a key-value pair using the type // expected by the column. An error is returned if the value's type does not // match the column's type. func unmarshalColumnValue(kind ColumnType_Kind, value *proto.Value) (parser.Datum, error) { if value == nil { return parser.DNull, nil } switch kind { case ColumnType_BOOL: v, err := value.GetInt() if err != nil { return nil, err } return parser.DBool(v != 0), nil case ColumnType_INT: v, err := value.GetInt() if err != nil { return nil, err } return parser.DInt(v), nil case ColumnType_FLOAT: v, err := value.GetFloat() if err != nil { return nil, err } return parser.DFloat(v), nil case ColumnType_STRING: v, err := value.GetBytesChecked() if err != nil { return nil, err } return parser.DString(v), nil case ColumnType_BYTES: v, err := value.GetBytesChecked() if err != nil { return nil, err } return parser.DBytes(v), nil case ColumnType_DATE: v, err := value.GetTime() if err != nil { return nil, err } return parser.DDate{Time: v}, nil case ColumnType_TIMESTAMP: v, err := value.GetTime() if err != nil { return nil, err } return parser.DTimestamp{Time: v}, nil case ColumnType_INTERVAL: v, err := value.GetInt() if err != nil { return nil, err } return parser.DInterval{Duration: time.Duration(v)}, nil default: return nil, util.Errorf("unsupported column type: %s", kind) } }
// DecodeTableValue decodes a value encoded by EncodeTableValue. func DecodeTableValue(a *DatumAlloc, valType parser.Datum, b []byte) (parser.Datum, []byte, error) { // TODO(dan): Merge this and DecodeTableKey. if len(b) == 0 { return nil, nil, util.Errorf("empty slice") } if roachpb.ValueType(b[0]) == roachpb.ValueType_NULL { return parser.DNull, b[1:], nil } var err error switch valType.(type) { case *parser.DBool: var i int64 b, i, err = roachpb.DecodeIntValue(b) // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(i != 0)), b, err case *parser.DInt: var i int64 b, i, err = roachpb.DecodeIntValue(b) return a.NewDInt(parser.DInt(i)), b, err case *parser.DFloat: var f float64 b, f, err = roachpb.DecodeFloatValue(b) return a.NewDFloat(parser.DFloat(f)), b, err case *parser.DDecimal: var d *inf.Dec b, d, err = roachpb.DecodeDecimalValue(b) dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, b, err case *parser.DString: var data []byte b, data, err = roachpb.DecodeBytesValue(b) return a.NewDString(parser.DString(data)), b, err case *parser.DBytes: var data []byte b, data, err = roachpb.DecodeBytesValue(b) return a.NewDBytes(parser.DBytes(data)), b, err case *parser.DDate: var i int64 b, i, err = roachpb.DecodeIntValue(b) return a.NewDDate(parser.DDate(i)), b, err case *parser.DTimestamp: var t time.Time b, t, err = roachpb.DecodeTimeValue(b) return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err case *parser.DTimestampTZ: var t time.Time b, t, err = roachpb.DecodeTimeValue(b) return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err case *parser.DInterval: var d duration.Duration b, d, err = roachpb.DecodeDurationValue(b) return a.NewDInterval(parser.DInterval{Duration: d}), b, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index value: %s", valType.Type()) } }
// DecodeTableValue decodes a value encoded by EncodeTableValue. func DecodeTableValue(a *DatumAlloc, valType parser.Datum, b []byte) (parser.Datum, []byte, error) { _, dataOffset, _, typ, err := encoding.DecodeValueTag(b) if err != nil { return nil, b, err } if typ == encoding.Null { return parser.DNull, b[dataOffset:], nil } switch valType.(type) { case *parser.DBool: var x bool b, x, err = encoding.DecodeBoolValue(b) // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(x)), b, err case *parser.DInt: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDInt(parser.DInt(i)), b, err case *parser.DFloat: var f float64 b, f, err = encoding.DecodeFloatValue(b) return a.NewDFloat(parser.DFloat(f)), b, err case *parser.DDecimal: var d *inf.Dec b, d, err = encoding.DecodeDecimalValue(b) dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, b, err case *parser.DString: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDString(parser.DString(data)), b, err case *parser.DBytes: var data []byte b, data, err = encoding.DecodeBytesValue(b) return a.NewDBytes(parser.DBytes(data)), b, err case *parser.DDate: var i int64 b, i, err = encoding.DecodeIntValue(b) return a.NewDDate(parser.DDate(i)), b, err case *parser.DTimestamp: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err case *parser.DTimestampTZ: var t time.Time b, t, err = encoding.DecodeTimeValue(b) return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err case *parser.DInterval: var d duration.Duration b, d, err = encoding.DecodeDurationValue(b) return a.NewDInterval(parser.DInterval{Duration: d}), b, err default: return nil, nil, errors.Errorf("TODO(pmattis): decoded index value: %s", valType.Type()) } }
// golangFillQueryArguments populates the placeholder map with // types and values from an array of Go values. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func golangFillQueryArguments(pinfo *parser.PlaceholderInfo, args []interface{}) { pinfo.Clear() for i, arg := range args { k := fmt.Sprint(i + 1) if arg == nil { pinfo.SetValue(k, parser.DNull) continue } // A type switch to handle a few explicit types with special semantics: // - Datums are passed along as is. // - Time datatypes get special representation in the database. var d parser.Datum switch t := arg.(type) { case parser.Datum: d = t case time.Time: d = parser.MakeDTimestamp(t, time.Microsecond) case time.Duration: d = &parser.DInterval{Duration: duration.Duration{Nanos: t.Nanoseconds()}} case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) d = dd } if d == nil { // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: d = parser.MakeDBool(parser.DBool(val.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: d = parser.NewDInt(parser.DInt(val.Int())) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: d = parser.NewDInt(parser.DInt(val.Uint())) case reflect.Float32, reflect.Float64: d = parser.NewDFloat(parser.DFloat(val.Float())) case reflect.String: d = parser.NewDString(val.String()) case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { d = parser.NewDBytes(parser.DBytes(val.Bytes())) } } if d == nil { panic(fmt.Sprintf("unexpected type %T", arg)) } } pinfo.SetValue(k, d) } }
// SetUIData is an endpoint that sets the data associated with a key. func (s *adminServer) SetUIData(_ context.Context, req *SetUIDataRequest) (*SetUIDataResponse, error) { if len(req.Key) == 0 { return nil, grpc.Errorf(codes.InvalidArgument, "key cannot be empty") } var session sql.Session user := s.getUser(req) // Do an upsert of the key. br := s.sqlExecutor.ExecuteStatements(user, &session, "BEGIN;", nil) if err := s.checkQueryResults(br.ResultList, 1); err != nil { return nil, s.serverError(err) } // See if the key already exists. alreadyExists := true if _, _, err := s.getUIData(&session, user, req.Key); err != nil { if err != errUIKeyNotFound { return nil, s.serverError(err) } alreadyExists = false } // INSERT or UPDATE as appropriate. ts := session.Txn.TxnTimestamp if alreadyExists { query := "UPDATE system.ui SET value = $1, lastUpdated = $2 WHERE key = $3; COMMIT;" params := []parser.Datum{ parser.DString(req.Value), // $1 parser.DTimestamp{Time: ts.GoTime()}, // $2 parser.DString(req.Key), // $3 } r := s.sqlExecutor.ExecuteStatements(user, &session, query, params) if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } if a, e := r.ResultList[0].RowsAffected, 1; a != e { return nil, s.serverErrorf("rows affected %d != expected %d", a, e) } } else { query := "INSERT INTO system.ui (key, value, lastUpdated) VALUES ($1, $2, $3); COMMIT;" params := []parser.Datum{ parser.DString(req.Key), // $1 parser.DBytes(req.Value), // $2 parser.DTimestamp{Time: ts.GoTime()}, // $3 } r := s.sqlExecutor.ExecuteStatements(user, &session, query, params) if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } if a, e := r.ResultList[0].RowsAffected, 1; a != e { return nil, s.serverErrorf("rows affected %d != expected %d", a, e) } } return &SetUIDataResponse{}, nil }
// Arg implements the parser.Args interface. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func (gp golangParameters) Arg(name string) (parser.Datum, bool) { i, err := processPositionalArgument(name) if err != nil { return nil, false } if i < 1 || int(i) > len(gp) { return nil, false } arg := gp[i-1] if arg == nil { return parser.DNull, true } // A type switch to handle a few explicit types with special semantics. switch t := arg.(type) { // Datums are passed along as is. case parser.Datum: return t, true // Time datatypes get special representation in the database. case time.Time: return parser.DTimestamp{Time: t}, true case time.Duration: return parser.DInterval{Duration: t}, true case *inf.Dec: dd := &parser.DDecimal{} dd.Set(t) return dd, true } // Handle all types which have an underlying type that can be stored in the // database. // Note: if this reflection becomes a performance concern in the future, // commonly used types could be added explicitly into the type switch above // for a performance gain. val := reflect.ValueOf(arg) switch val.Kind() { case reflect.Bool: return parser.DBool(val.Bool()), true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return parser.DInt(val.Int()), true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return parser.DInt(val.Uint()), true case reflect.Float32, reflect.Float64: return parser.DFloat(val.Float()), true case reflect.String: return parser.DString(val.String()), true case reflect.Slice: // Handle byte slices. if val.Type().Elem().Kind() == reflect.Uint8 { return parser.DBytes(val.Bytes()), true } } panic(fmt.Sprintf("unexpected type %T", arg)) }
// SetUIData is an endpoint that stores the given key/value pairs in the // system.ui table. See GetUIData for more details on semantics. func (s *adminServer) SetUIData(ctx context.Context, req *SetUIDataRequest) (*SetUIDataResponse, error) { if len(req.KeyValues) == 0 { return nil, grpc.Errorf(codes.InvalidArgument, "KeyValues cannot be empty") } session := sql.NewSession(sql.SessionArgs{User: s.getUser(req)}, s.sqlExecutor, nil) for key, val := range req.KeyValues { // Do an upsert of the key. We update each key in a separate transaction to // avoid long-running transactions and possible deadlocks. br := s.sqlExecutor.ExecuteStatements(ctx, session, "BEGIN;", nil) if err := s.checkQueryResults(br.ResultList, 1); err != nil { return nil, s.serverError(err) } // See if the key already exists. resp, err := s.getUIData(session, s.getUser(req), []string{key}) if err != nil { return nil, s.serverError(err) } _, alreadyExists := resp.KeyValues[key] // INSERT or UPDATE as appropriate. if alreadyExists { query := "UPDATE system.ui SET value = $1, lastUpdated = NOW() WHERE key = $2; COMMIT;" params := []parser.Datum{ parser.DString(val), // $1 parser.DString(key), // $2 } r := s.sqlExecutor.ExecuteStatements(ctx, session, query, params) if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } if a, e := r.ResultList[0].RowsAffected, 1; a != e { return nil, s.serverErrorf("rows affected %d != expected %d", a, e) } } else { query := "INSERT INTO system.ui (key, value, lastUpdated) VALUES ($1, $2, NOW()); COMMIT;" params := []parser.Datum{ parser.DString(key), // $1 parser.DBytes(val), // $2 } r := s.sqlExecutor.ExecuteStatements(ctx, session, query, params) if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } if a, e := r.ResultList[0].RowsAffected, 1; a != e { return nil, s.serverErrorf("rows affected %d != expected %d", a, e) } } } return &SetUIDataResponse{}, nil }
// Arg implements the parser.Args interface. // TODO: This does not support arguments of the SQL 'Date' type, as there is not // an equivalent type in Go's standard library. It's not currently needed by any // of our internal tables. func (gp golangParameters) Arg(name string) (parser.Datum, bool) { i, err := processPositionalArgument(name) if err != nil { return nil, false } if i < 1 || int(i) > len(gp) { return nil, false } arg := gp[i-1] if arg == nil { return parser.DNull, true } // TODO: This type switch has been created primarily for convenience, but // the fact that multiple integer types are part of the switch is inelegant // and prone to errors (it has been described as "whack-a-mole"). There is // likely a better way to capture all of the various integer types using // reflection. switch t := arg.(type) { case bool: return parser.DBool(t), true case int: return parser.DInt(t), true case int32: return parser.DInt(t), true case uint32: return parser.DInt(t), true case int64: return parser.DInt(t), true case uint64: return parser.DInt(t), true case ID: return parser.DInt(t), true case []byte: return parser.DBytes(t), true case string: return parser.DString(t), true case time.Time: return parser.DTimestamp{Time: t}, true case time.Duration: return parser.DInterval{Duration: t}, true case parser.Datum: return t, true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
// Arg implements the parser.Args interface. func (p parameters) Arg(name string) (parser.Datum, bool) { if len(name) == 0 { // This shouldn't happen unless the parser let through an invalid parameter // specification. panic(fmt.Sprintf("invalid empty parameter name")) } if ch := name[0]; ch < '0' || ch > '9' { // TODO(pmattis): Add support for named parameters (vs the numbered // parameter support below). return nil, false } i, err := strconv.ParseInt(name, 10, 0) if err != nil { return nil, false } if i < 1 || int(i) > len(p) { return nil, false } arg := p[i-1].Payload if arg == nil { return parser.DNull, true } switch t := arg.(type) { case *driver.Datum_BoolVal: return parser.DBool(t.BoolVal), true case *driver.Datum_IntVal: return parser.DInt(t.IntVal), true case *driver.Datum_FloatVal: return parser.DFloat(t.FloatVal), true case *driver.Datum_BytesVal: return parser.DBytes(t.BytesVal), true case *driver.Datum_StringVal: return parser.DString(t.StringVal), true case *driver.Datum_DateVal: return parser.DDate(t.DateVal), true case *driver.Datum_TimeVal: return parser.DTimestamp{Time: t.TimeVal.GoTime()}, true case *driver.Datum_IntervalVal: return parser.DInterval{Duration: time.Duration(t.IntervalVal)}, true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
// RandDatum generates a random Datum of the given type. // If null is true, the datum can be DNull. func RandDatum(rng *rand.Rand, typ ColumnType_Kind, null bool) parser.Datum { if null && rng.Intn(10) == 0 { return parser.DNull } switch typ { case ColumnType_BOOL: return parser.MakeDBool(rng.Intn(2) == 1) case ColumnType_INT: return parser.NewDInt(parser.DInt(rng.Int63())) case ColumnType_FLOAT: return parser.NewDFloat(parser.DFloat(rng.NormFloat64())) case ColumnType_DECIMAL: d := &parser.DDecimal{} d.Dec.SetScale(inf.Scale(rng.Intn(40) - 20)) d.Dec.SetUnscaled(rng.Int63()) return d case ColumnType_DATE: return parser.NewDDate(parser.DDate(rng.Intn(10000))) case ColumnType_TIMESTAMP: return &parser.DTimestamp{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} case ColumnType_INTERVAL: return &parser.DInterval{Duration: duration.Duration{Months: rng.Int63n(1000), Days: rng.Int63n(1000), Nanos: rng.Int63n(1000000), }} case ColumnType_STRING: // Generate a random ASCII string. p := make([]byte, rng.Intn(10)) for i := range p { p[i] = byte(1 + rng.Intn(127)) } return parser.NewDString(string(p)) case ColumnType_BYTES: p := make([]byte, rng.Intn(10)) _, _ = rng.Read(p) return parser.NewDBytes(parser.DBytes(p)) case ColumnType_TIMESTAMPTZ: return &parser.DTimestampTZ{Time: time.Unix(rng.Int63n(1000000), rng.Int63n(1000000))} default: panic(fmt.Sprintf("invalid type %s", typ)) } }
// Arg implements the parser.Args interface. func (p parameters) Arg(name string) (parser.Datum, bool) { if len(name) == 0 { // This shouldn't happen unless the parser let through an invalid parameter // specification. panic(fmt.Sprintf("invalid empty parameter name")) } if ch := name[0]; ch < '0' || ch > '9' { // TODO(pmattis): Add support for named parameters (vs the numbered // parameter support below). return nil, false } i, err := strconv.ParseInt(name, 10, 0) if err != nil { return nil, false } if i < 1 || int(i) > len(p) { return nil, false } arg := p[i-1].GetValue() if arg == nil { return parser.DNull, true } switch t := arg.(type) { case *bool: return parser.DBool(*t), true case *int64: return parser.DInt(*t), true case *float64: return parser.DFloat(*t), true case []byte: return parser.DBytes(t), true case *string: return parser.DString(*t), true case *driver.Datum_Timestamp: return parser.DTimestamp{Time: time.Unix((*t).Sec, int64((*t).Nsec)).UTC()}, true default: panic(fmt.Sprintf("unexpected type %T", t)) } }
func (n *scanNode) unmarshalValue(kv client.KeyValue) (parser.Datum, bool) { kind, ok := n.colKind[n.colID] if !ok { n.err = fmt.Errorf("column-id \"%d\" does not exist", n.colID) return nil, false } if kv.Exists() { switch kind { case ColumnType_INT: return parser.DInt(kv.ValueInt()), true case ColumnType_BOOL: return parser.DBool(kv.ValueInt() != 0), true case ColumnType_FLOAT: return parser.DFloat(math.Float64frombits(uint64(kv.ValueInt()))), true case ColumnType_STRING: return parser.DString(kv.ValueBytes()), true case ColumnType_BYTES: return parser.DBytes(kv.ValueBytes()), true case ColumnType_DATE: var t time.Time if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil { return nil, false } return parser.DDate{Time: t}, true case ColumnType_TIMESTAMP: var t time.Time if err := t.UnmarshalBinary(kv.ValueBytes()); err != nil { return nil, false } return parser.DTimestamp{Time: t}, true case ColumnType_INTERVAL: return parser.DInterval{Duration: time.Duration(kv.ValueInt())}, true } } return parser.DNull, true }
func dumpTable(w io.Writer, conn *sqlConn, origDBName, origTableName string) error { const limit = 100 // Escape names since they can't be used in placeholders. dbname := parser.Name(origDBName).String() tablename := parser.Name(origTableName).String() if err := conn.Exec(fmt.Sprintf("SET DATABASE = %s", dbname), nil); err != nil { return err } // Fetch all table metadata in a transaction and its time to guarantee it // doesn't change between the various SHOW statements. if err := conn.Exec("BEGIN", nil); err != nil { return err } vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()::int", nil) if err != nil { return err } clusterTSStart := vals[0].(int64) clusterTS := time.Unix(0, clusterTSStart).Format(time.RFC3339Nano) // Fetch table descriptor. vals, err = conn.QueryRow(` SELECT descriptor FROM system.descriptor JOIN system.namespace tables ON tables.id = descriptor.id JOIN system.namespace dbs ON dbs.id = tables.parentid WHERE tables.name = $1 AND dbs.name = $2`, []driver.Value{origTableName, origDBName}) if err == io.EOF { return errors.Errorf("unknown database or table %s.%s", origTableName, origDBName) } else if err != nil { return err } b := vals[0].([]byte) var desc sqlbase.Descriptor if err := proto.Unmarshal(b, &desc); err != nil { return err } table := desc.GetTable() if table == nil { return errors.New("internal error: expected table descriptor") } coltypes := make(map[string]string) for _, c := range table.Columns { coltypes[c.Name] = c.Type.SQLString() } primaryIndex := table.PrimaryIndex.Name index := table.PrimaryIndex.ColumnNames indexes := strings.Join(index, ", ") // Build the SELECT query. var sbuf bytes.Buffer fmt.Fprintf(&sbuf, "SELECT %s, * FROM %s@%s AS OF SYSTEM TIME '%s'", indexes, tablename, primaryIndex, clusterTS) var wbuf bytes.Buffer fmt.Fprintf(&wbuf, " WHERE ROW (%s) > ROW (", indexes) for i := range index { if i > 0 { wbuf.WriteString(", ") } fmt.Fprintf(&wbuf, "$%d", i+1) } wbuf.WriteString(")") // No WHERE clause first time, so add a place to inject it. fmt.Fprintf(&sbuf, "%%s ORDER BY %s LIMIT %d", indexes, limit) bs := sbuf.String() vals, err = conn.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", tablename), nil) if err != nil { return err } create := vals[1].([]byte) if _, err := w.Write(create); err != nil { return err } if _, err := w.Write([]byte(";\n")); err != nil { return err } if err := conn.Exec("COMMIT", nil); err != nil { return err } // pk holds the last values of the fetched primary keys var pk []driver.Value q := fmt.Sprintf(bs, "") for { rows, err := conn.Query(q, pk) if err != nil { return err } cols := rows.Columns() pkcols := cols[:len(index)] cols = cols[len(index):] inserts := make([][]string, 0, limit) i := 0 for i < limit { vals := make([]driver.Value, len(cols)+len(pkcols)) if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } if pk == nil { q = fmt.Sprintf(bs, wbuf.String()) } pk = vals[:len(index)] vals = vals[len(index):] ivals := make([]string, len(vals)) // Values need to be correctly encoded for INSERT statements in a text file. for si, sv := range vals { switch t := sv.(type) { case nil: ivals[si] = "NULL" case bool: ivals[si] = parser.MakeDBool(parser.DBool(t)).String() case int64: ivals[si] = parser.NewDInt(parser.DInt(t)).String() case float64: ivals[si] = parser.NewDFloat(parser.DFloat(t)).String() case []byte: switch ct := coltypes[cols[si]]; ct { case "INTERVAL": ivals[si] = fmt.Sprintf("'%s'", t) case "DECIMAL": ivals[si] = fmt.Sprintf("%s", t) default: // STRING and BYTES types can have optional length suffixes, so only examine // the prefix of the type. if strings.HasPrefix(coltypes[cols[si]], "STRING") { ivals[si] = parser.NewDString(string(t)).String() } else if strings.HasPrefix(coltypes[cols[si]], "BYTES") { ivals[si] = parser.NewDBytes(parser.DBytes(t)).String() } else { panic(errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } } case time.Time: var d parser.Datum ct := coltypes[cols[si]] switch ct { case "DATE": d = parser.NewDDateFromTime(t, time.UTC) case "TIMESTAMP": d = parser.MakeDTimestamp(t, time.Nanosecond) case "TIMESTAMP WITH TIME ZONE": d = parser.MakeDTimestampTZ(t, time.Nanosecond) default: panic(errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } ivals[si] = fmt.Sprintf("'%s'", d) default: panic(errors.Errorf("unknown field type: %T (%s)", t, cols[si])) } } inserts = append(inserts, ivals) i++ } for si, sv := range pk { b, ok := sv.([]byte) if ok && strings.HasPrefix(coltypes[pkcols[si]], "STRING") { // Primary key strings need to be converted to a go string, but not SQL // encoded since they aren't being written to a text file. pk[si] = string(b) } } if err := rows.Close(); err != nil { return err } if i == 0 { break } fmt.Fprintf(w, "\nINSERT INTO %s VALUES", tablename) for idx, values := range inserts { if idx > 0 { fmt.Fprint(w, ",") } fmt.Fprint(w, "\n\t(") for vi, v := range values { if vi > 0 { fmt.Fprint(w, ", ") } fmt.Fprint(w, v) } fmt.Fprint(w, ")") } fmt.Fprintln(w, ";") if i < limit { break } } return nil }
// decodeOidDatum decodes bytes with specified Oid and format code into // a datum. func decodeOidDatum(id oid.Oid, code formatCode, b []byte) (parser.Datum, error) { var d parser.Datum switch id { case oid.T_bool: switch code { case formatText: v, err := strconv.ParseBool(string(b)) if err != nil { return d, err } d = parser.DBool(v) default: return d, fmt.Errorf("unsupported bool format code: %d", code) } case oid.T_int2: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.DInt(i) case formatBinary: var i int16 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.DInt(i) default: return d, fmt.Errorf("unsupported int2 format code: %d", code) } case oid.T_int4: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.DInt(i) case formatBinary: var i int32 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.DInt(i) default: return d, fmt.Errorf("unsupported int4 format code: %d", code) } case oid.T_int8: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.DInt(i) case formatBinary: var i int64 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.DInt(i) default: return d, fmt.Errorf("unsupported int8 format code: %d", code) } case oid.T_float4: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.DFloat(f) case formatBinary: var f float32 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &f) if err != nil { return d, err } d = parser.DFloat(f) default: return d, fmt.Errorf("unsupported float4 format code: %d", code) } case oid.T_float8: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.DFloat(f) case formatBinary: var f float64 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &f) if err != nil { return d, err } d = parser.DFloat(f) default: return d, fmt.Errorf("unsupported float8 format code: %d", code) } case oid.T_numeric: switch code { case formatText: dd := &parser.DDecimal{} if _, ok := dd.SetString(string(b)); !ok { return nil, fmt.Errorf("could not parse string %q as decimal", b) } d = dd default: return d, fmt.Errorf("unsupported numeric format code: %d", code) } case oid.T_text, oid.T_varchar: switch code { case formatText: d = parser.DString(b) default: return d, fmt.Errorf("unsupported text format code: %d", code) } case oid.T_bytea: switch code { case formatText: // http://www.postgresql.org/docs/current/static/datatype-binary.html#AEN5667 // Code cribbed from github.com/lib/pq. // We only support hex encoding. if len(b) >= 2 && bytes.Equal(b[:2], []byte("\\x")) { b = b[2:] // trim off leading "\\x" result := make([]byte, hex.DecodedLen(len(b))) _, err := hex.Decode(result, b) if err != nil { return d, err } d = parser.DBytes(result) } else { return d, fmt.Errorf("unsupported bytea encoding: %q", b) } case formatBinary: d = parser.DBytes(b) default: return d, fmt.Errorf("unsupported bytea format code: %d", code) } case oid.T_timestamp: switch code { case formatText: ts, err := pq.ParseTimestamp(nil, string(b)) if err != nil { return d, fmt.Errorf("could not parse string %q as timestamp", b) } d = parser.DTimestamp{Time: ts} case formatBinary: return d, fmt.Errorf("unsupported timestamp format code: %d", code) } case oid.T_date: switch code { case formatText: ts, err := pq.ParseTimestamp(nil, string(b)) if err != nil { return d, fmt.Errorf("could not parse string %q as date", b) } daysSinceEpoch := ts.Unix() / secondsInDay d = parser.DDate(daysSinceEpoch) case formatBinary: return d, fmt.Errorf("unsupported date format code: %d", code) } default: return d, fmt.Errorf("unsupported OID: %v", id) } return d, nil }
func TestAdminAPITableDetailsZone(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) // Create database and table. session := sql.NewSession(sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil) setupQueries := []string{ "CREATE DATABASE test", "CREATE TABLE test.tbl (val STRING)", } for _, q := range setupQueries { res := ts.sqlExecutor.ExecuteStatements(context.Background(), session, q, nil) if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err) } } // Function to verify the zone for test.tbl as returned by the Admin API. verifyZone := func(expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel) { var resp serverpb.TableDetailsResponse if err := apiGet(s, "databases/test/tables/tbl", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to store a zone config for a given object ID. setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) { zoneBytes, err := zoneCfg.Marshal() if err != nil { t.Fatal(err) } const query = `INSERT INTO system.zones VALUES($1, $2)` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes))) res := ts.sqlExecutor.ExecuteStatements(context.Background(), session, query, params) if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err) } } // Verify zone matches cluster default. verifyZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) // Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace, // the database, and the table (in that order). idPath, err := ts.admin.queryDescriptorIDPath(context.Background(), session, []string{"test", "tbl"}) if err != nil { t.Fatal(err) } // Apply zone configuration to database and check again. dbZone := config.ZoneConfig{ RangeMinBytes: 456, } setZone(dbZone, idPath[1]) verifyZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) // Apply zone configuration to table and check again. tblZone := config.ZoneConfig{ RangeMinBytes: 789, } setZone(tblZone, idPath[2]) verifyZone(tblZone, serverpb.ZoneConfigurationLevel_TABLE) }
func TestValues(t *testing.T) { defer leaktest.AfterTest(t) p := planner{} vInt := int64(5) vNum := 3.14159 vStr := "two furs one cub" vBool := true unsupp := &parser.RangeCond{} intVal := func(v int64) *parser.IntVal { return &parser.IntVal{Val: v} } asRow := func(datums ...parser.Datum) []parser.DTuple { return []parser.DTuple{datums} } testCases := []struct { stmt parser.Values rows []parser.DTuple ok bool }{ { parser.Values{{intVal(vInt)}}, asRow(parser.DInt(vInt)), true, }, { parser.Values{{intVal(vInt), intVal(vInt)}}, asRow(parser.DInt(vInt), parser.DInt(vInt)), true, }, { parser.Values{{parser.NumVal(fmt.Sprintf("%0.5f", vNum))}}, asRow(parser.DFloat(vNum)), true, }, { parser.Values{{parser.DString(vStr)}}, asRow(parser.DString(vStr)), true, }, { parser.Values{{parser.DBytes(vStr)}}, asRow(parser.DBytes(vStr)), true, }, { parser.Values{{parser.DBool(vBool)}}, asRow(parser.DBool(vBool)), true, }, { parser.Values{{unsupp}}, nil, false, }, } for i, tc := range testCases { plan, pErr := func() (_ planNode, pErr *roachpb.Error) { defer func() { if r := recover(); r != nil { pErr = roachpb.NewErrorf("%v", r) } }() return p.Values(tc.stmt) }() if pErr == nil != tc.ok { t.Errorf("%d: error_expected=%t, but got error %v", i, tc.ok, pErr) } if plan != nil { var rows []parser.DTuple for plan.Next() { rows = append(rows, plan.Values()) } if !reflect.DeepEqual(rows, tc.rows) { t.Errorf("%d: expected rows:\n%+v\nactual rows:\n%+v", i, tc.rows, rows) } } } }
// decodeOidDatum decodes bytes with specified Oid and format code into // a datum. func decodeOidDatum(id oid.Oid, code formatCode, b []byte) (parser.Datum, error) { var d parser.Datum switch id { case oid.T_bool: switch code { case formatText: v, err := strconv.ParseBool(string(b)) if err != nil { return d, err } d = parser.MakeDBool(parser.DBool(v)) case formatBinary: switch b[0] { case 0: d = parser.MakeDBool(false) case 1: d = parser.MakeDBool(true) default: return d, util.Errorf("unsupported binary bool: %q", b) } default: return d, util.Errorf("unsupported bool format code: %s", code) } case oid.T_int2: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: var i int16 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) default: return d, util.Errorf("unsupported int2 format code: %s", code) } case oid.T_int4: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: var i int32 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) default: return d, util.Errorf("unsupported int4 format code: %s", code) } case oid.T_int8: switch code { case formatText: i, err := strconv.ParseInt(string(b), 10, 64) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) case formatBinary: var i int64 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &i) if err != nil { return d, err } d = parser.NewDInt(parser.DInt(i)) default: return d, util.Errorf("unsupported int8 format code: %s", code) } case oid.T_float4: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) case formatBinary: var f float32 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &f) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) default: return d, util.Errorf("unsupported float4 format code: %s", code) } case oid.T_float8: switch code { case formatText: f, err := strconv.ParseFloat(string(b), 64) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) case formatBinary: var f float64 err := binary.Read(bytes.NewReader(b), binary.BigEndian, &f) if err != nil { return d, err } d = parser.NewDFloat(parser.DFloat(f)) default: return d, util.Errorf("unsupported float8 format code: %s", code) } case oid.T_numeric: switch code { case formatText: dd := &parser.DDecimal{} if _, ok := dd.SetString(string(b)); !ok { return nil, util.Errorf("could not parse string %q as decimal", b) } d = dd case formatBinary: r := bytes.NewReader(b) alloc := struct { pgNum pgNumeric i16 int16 dd parser.DDecimal }{} for _, ptr := range []interface{}{ &alloc.pgNum.ndigits, &alloc.pgNum.weight, &alloc.pgNum.sign, &alloc.pgNum.dscale, } { if err := binary.Read(r, binary.BigEndian, ptr); err != nil { return d, err } } decDigits := make([]byte, 0, alloc.pgNum.ndigits*pgDecDigits) for i := int16(0); i < alloc.pgNum.ndigits-1; i++ { if err := binary.Read(r, binary.BigEndian, &alloc.i16); err != nil { return d, err } decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10) } // The last digit may contain padding, which we need to deal with. if err := binary.Read(r, binary.BigEndian, &alloc.i16); err != nil { return d, err } dscale := (alloc.pgNum.ndigits - 1 - alloc.pgNum.weight) * pgDecDigits if overScale := dscale - alloc.pgNum.dscale; overScale > 0 { dscale -= overScale for i := int16(0); i < overScale; i++ { alloc.i16 /= 10 } } decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10) alloc.dd.UnscaledBig().SetString(string(decDigits), 10) alloc.dd.SetScale(inf.Scale(dscale)) switch alloc.pgNum.sign { case pgNumericPos: case pgNumericNeg: alloc.dd.Neg(&alloc.dd.Dec) default: return d, util.Errorf("unsupported numeric sign: %s", alloc.pgNum.sign) } d = &alloc.dd default: return d, util.Errorf("unsupported numeric format code: %s", code) } case oid.T_text, oid.T_varchar: switch code { case formatText, formatBinary: d = parser.NewDString(string(b)) default: return d, util.Errorf("unsupported text format code: %s", code) } case oid.T_bytea: switch code { case formatText: // http://www.postgresql.org/docs/current/static/datatype-binary.html#AEN5667 // Code cribbed from github.com/lib/pq. // We only support hex encoding. if len(b) >= 2 && bytes.Equal(b[:2], []byte("\\x")) { b = b[2:] // trim off leading "\\x" result := make([]byte, hex.DecodedLen(len(b))) _, err := hex.Decode(result, b) if err != nil { return d, err } d = parser.NewDBytes(parser.DBytes(result)) } else { return d, util.Errorf("unsupported bytea encoding: %q", b) } case formatBinary: d = parser.NewDBytes(parser.DBytes(b)) default: return d, util.Errorf("unsupported bytea format code: %s", code) } case oid.T_timestamp, oid.T_timestamptz: switch code { case formatText: ts, err := parseTs(string(b)) if err != nil { return d, util.Errorf("could not parse string %q as timestamp", b) } d = parser.MakeDTimestamp(ts, time.Microsecond) default: return d, util.Errorf("unsupported timestamp format code: %s", code) } case oid.T_date: switch code { case formatText: ts, err := parseTs(string(b)) if err != nil { return d, util.Errorf("could not parse string %q as date", b) } daysSinceEpoch := ts.Unix() / secondsInDay d = parser.NewDDate(parser.DDate(daysSinceEpoch)) default: return d, util.Errorf("unsupported date format code: %s", code) } default: return d, util.Errorf("unsupported OID: %v", id) } return d, nil }
func dumpTable(w io.Writer, conn *sqlConn, origDBName, origTableName string) error { const limit = 100 // Escape names since they can't be used in placeholders. dbname := parser.Name(origDBName).String() tablename := parser.Name(origTableName).String() if err := conn.Exec(fmt.Sprintf("SET DATABASE = %s", dbname), nil); err != nil { return err } // Fetch all table metadata in a transaction and its time to guarantee it // doesn't change between the various SHOW statements. if err := conn.Exec("BEGIN", nil); err != nil { return err } vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()::int", nil) if err != nil { return err } clusterTSStart := vals[0].(int64) clusterTS := time.Unix(0, clusterTSStart).Format(time.RFC3339Nano) // A previous version of the code did a SELECT on system.descriptor. This // required the SELECT privilege to the descriptor table, which only root // has. Allowing non-root to do this would let users see other users' table // descriptors which is a problem in multi-tenancy. // Fetch column types. rows, err := conn.Query(fmt.Sprintf("SHOW COLUMNS FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 2) coltypes := make(map[string]string) for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } name, ok := vals[0].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[1]) } typ, ok := vals[1].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[4]) } coltypes[name] = typ } if err := rows.Close(); err != nil { return err } // index holds the names, in order, of the primary key columns. var index []string // Primary index is always the first index returned by SHOW INDEX. rows, err = conn.Query(fmt.Sprintf("SHOW INDEX FROM %s", tablename), nil) if err != nil { return err } vals = make([]driver.Value, 5) var primaryIndex string // Find the primary index columns. for { if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } b, ok := vals[1].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[1]) } if primaryIndex == "" { primaryIndex = b } else if primaryIndex != b { break } b, ok = vals[4].(string) if !ok { return fmt.Errorf("unexpected value: %T", vals[4]) } index = append(index, parser.Name(b).String()) } if err := rows.Close(); err != nil { return err } if len(index) == 0 { return fmt.Errorf("no primary key index found") } indexes := strings.Join(index, ", ") // Build the SELECT query. var sbuf bytes.Buffer fmt.Fprintf(&sbuf, "SELECT %s, * FROM %s@%s AS OF SYSTEM TIME '%s'", indexes, tablename, primaryIndex, clusterTS) var wbuf bytes.Buffer fmt.Fprintf(&wbuf, " WHERE ROW (%s) > ROW (", indexes) for i := range index { if i > 0 { wbuf.WriteString(", ") } fmt.Fprintf(&wbuf, "$%d", i+1) } wbuf.WriteString(")") // No WHERE clause first time, so add a place to inject it. fmt.Fprintf(&sbuf, "%%s ORDER BY %s LIMIT %d", indexes, limit) bs := sbuf.String() vals, err = conn.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", tablename), nil) if err != nil { return err } create := vals[1].(string) if _, err := w.Write([]byte(create)); err != nil { return err } if _, err := w.Write([]byte(";\n")); err != nil { return err } if err := conn.Exec("COMMIT", nil); err != nil { return err } // pk holds the last values of the fetched primary keys var pk []driver.Value q := fmt.Sprintf(bs, "") for { rows, err := conn.Query(q, pk) if err != nil { return err } cols := rows.Columns() pkcols := cols[:len(index)] cols = cols[len(index):] inserts := make([][]string, 0, limit) i := 0 for i < limit { vals := make([]driver.Value, len(cols)+len(pkcols)) if err := rows.Next(vals); err == io.EOF { break } else if err != nil { return err } if pk == nil { q = fmt.Sprintf(bs, wbuf.String()) } pk = vals[:len(index)] vals = vals[len(index):] ivals := make([]string, len(vals)) // Values need to be correctly encoded for INSERT statements in a text file. for si, sv := range vals { switch t := sv.(type) { case nil: ivals[si] = "NULL" case bool: ivals[si] = parser.MakeDBool(parser.DBool(t)).String() case int64: ivals[si] = parser.NewDInt(parser.DInt(t)).String() case float64: ivals[si] = parser.NewDFloat(parser.DFloat(t)).String() case string: ivals[si] = parser.NewDString(t).String() case []byte: switch ct := coltypes[cols[si]]; ct { case "INTERVAL": ivals[si] = fmt.Sprintf("'%s'", t) case "BYTES": ivals[si] = parser.NewDBytes(parser.DBytes(t)).String() default: // STRING and DECIMAL types can have optional length // suffixes, so only examine the prefix of the type. if strings.HasPrefix(coltypes[cols[si]], "STRING") { ivals[si] = parser.NewDString(string(t)).String() } else if strings.HasPrefix(coltypes[cols[si]], "DECIMAL") { ivals[si] = string(t) } else { panic(errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } } case time.Time: var d parser.Datum ct := coltypes[cols[si]] switch ct { case "DATE": d = parser.NewDDateFromTime(t, time.UTC) case "TIMESTAMP": d = parser.MakeDTimestamp(t, time.Nanosecond) case "TIMESTAMP WITH TIME ZONE": d = parser.MakeDTimestampTZ(t, time.Nanosecond) default: panic(errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], coltypes[cols[si]])) } ivals[si] = fmt.Sprintf("'%s'", d) default: panic(errors.Errorf("unknown field type: %T (%s)", t, cols[si])) } } inserts = append(inserts, ivals) i++ } for si, sv := range pk { b, ok := sv.([]byte) if ok && strings.HasPrefix(coltypes[pkcols[si]], "STRING") { // Primary key strings need to be converted to a go string, but not SQL // encoded since they aren't being written to a text file. pk[si] = string(b) } } if err := rows.Close(); err != nil { return err } if i == 0 { break } fmt.Fprintf(w, "\nINSERT INTO %s VALUES", tablename) for idx, values := range inserts { if idx > 0 { fmt.Fprint(w, ",") } fmt.Fprint(w, "\n\t(") for vi, v := range values { if vi > 0 { fmt.Fprint(w, ", ") } fmt.Fprint(w, v) } fmt.Fprint(w, ")") } fmt.Fprintln(w, ";") if i < limit { break } } return nil }
// unmarshalColumnValue decodes the value from a key-value pair using the type // expected by the column. An error is returned if the value's type does not // match the column's type. func unmarshalColumnValue(kind ColumnType_Kind, value *roachpb.Value) (parser.Datum, *roachpb.Error) { if value == nil { return parser.DNull, nil } switch kind { case ColumnType_BOOL: v, err := value.GetInt() if err != nil { return nil, roachpb.NewError(err) } return parser.DBool(v != 0), nil case ColumnType_INT: v, err := value.GetInt() if err != nil { return nil, roachpb.NewError(err) } return parser.DInt(v), nil case ColumnType_FLOAT: v, err := value.GetFloat() if err != nil { return nil, roachpb.NewError(err) } return parser.DFloat(v), nil case ColumnType_DECIMAL: v, err := value.GetDecimal() if err != nil { return nil, roachpb.NewError(err) } dd := parser.DDecimal{} dd.Set(v) return dd, nil case ColumnType_STRING: v, err := value.GetBytes() if err != nil { return nil, roachpb.NewError(err) } return parser.DString(v), nil case ColumnType_BYTES: v, err := value.GetBytes() if err != nil { return nil, roachpb.NewError(err) } return parser.DBytes(v), nil case ColumnType_DATE: v, err := value.GetInt() if err != nil { return nil, roachpb.NewError(err) } return parser.DDate(v), nil case ColumnType_TIMESTAMP: v, err := value.GetTime() if err != nil { return nil, roachpb.NewError(err) } return parser.DTimestamp{Time: v}, nil case ColumnType_INTERVAL: v, err := value.GetInt() if err != nil { return nil, roachpb.NewError(err) } return parser.DInterval{Duration: time.Duration(v)}, nil default: return nil, roachpb.NewErrorf("unsupported column type: %s", kind) } }
// UnmarshalColumnValue decodes the value from a key-value pair using the type // expected by the column. An error is returned if the value's type does not // match the column's type. func UnmarshalColumnValue( a *DatumAlloc, kind ColumnType_Kind, value *roachpb.Value, ) (parser.Datum, error) { if value == nil { return parser.DNull, nil } switch kind { case ColumnType_BOOL: v, err := value.GetInt() if err != nil { return nil, err } return parser.MakeDBool(parser.DBool(v != 0)), nil case ColumnType_INT: v, err := value.GetInt() if err != nil { return nil, err } return a.NewDInt(parser.DInt(v)), nil case ColumnType_FLOAT: v, err := value.GetFloat() if err != nil { return nil, err } return a.NewDFloat(parser.DFloat(v)), nil case ColumnType_DECIMAL: v, err := value.GetDecimal() if err != nil { return nil, err } dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(v) return dd, nil case ColumnType_STRING: v, err := value.GetBytes() if err != nil { return nil, err } return a.NewDString(parser.DString(v)), nil case ColumnType_BYTES: v, err := value.GetBytes() if err != nil { return nil, err } return a.NewDBytes(parser.DBytes(v)), nil case ColumnType_DATE: v, err := value.GetInt() if err != nil { return nil, err } return a.NewDDate(parser.DDate(v)), nil case ColumnType_TIMESTAMP: v, err := value.GetTime() if err != nil { return nil, err } return a.NewDTimestamp(parser.DTimestamp{Time: v}), nil case ColumnType_TIMESTAMPTZ: v, err := value.GetTime() if err != nil { return nil, err } return a.NewDTimestampTZ(parser.DTimestampTZ{Time: v}), nil case ColumnType_INTERVAL: d, err := value.GetDuration() if err != nil { return nil, err } return a.NewDInterval(parser.DInterval{Duration: d}), nil default: return nil, util.Errorf("unsupported column type: %s", kind) } }
// DecodeTableKey decodes a table key/value. func DecodeTableKey( a *DatumAlloc, valType parser.Datum, key []byte, dir encoding.Direction, ) (parser.Datum, []byte, error) { if (dir != encoding.Ascending) && (dir != encoding.Descending) { return nil, nil, util.Errorf("invalid direction: %d", dir) } var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } var rkey []byte var err error switch valType.(type) { case *parser.DBool: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } // No need to chunk allocate DBool as MakeDBool returns either // parser.DBoolTrue or parser.DBoolFalse. return parser.MakeDBool(parser.DBool(i != 0)), rkey, err case *parser.DInt: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } return a.NewDInt(parser.DInt(i)), rkey, err case *parser.DFloat: var f float64 if dir == encoding.Ascending { rkey, f, err = encoding.DecodeFloatAscending(key) } else { rkey, f, err = encoding.DecodeFloatDescending(key) } return a.NewDFloat(parser.DFloat(f)), rkey, err case *parser.DDecimal: var d *inf.Dec if dir == encoding.Ascending { rkey, d, err = encoding.DecodeDecimalAscending(key, nil) } else { rkey, d, err = encoding.DecodeDecimalDescending(key, nil) } dd := a.NewDDecimal(parser.DDecimal{}) dd.Set(d) return dd, rkey, err case *parser.DString: var r string if dir == encoding.Ascending { rkey, r, err = encoding.DecodeUnsafeStringAscending(key, nil) } else { rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) } return a.NewDString(parser.DString(r)), rkey, err case *parser.DBytes: var r []byte if dir == encoding.Ascending { rkey, r, err = encoding.DecodeBytesAscending(key, nil) } else { rkey, r, err = encoding.DecodeBytesDescending(key, nil) } return a.NewDBytes(parser.DBytes(r)), rkey, err case *parser.DDate: var t int64 if dir == encoding.Ascending { rkey, t, err = encoding.DecodeVarintAscending(key) } else { rkey, t, err = encoding.DecodeVarintDescending(key) } return a.NewDDate(parser.DDate(t)), rkey, err case *parser.DTimestamp: var t time.Time if dir == encoding.Ascending { rkey, t, err = encoding.DecodeTimeAscending(key) } else { rkey, t, err = encoding.DecodeTimeDescending(key) } return a.NewDTimestamp(parser.DTimestamp{Time: t}), rkey, err case *parser.DTimestampTZ: var t time.Time if dir == encoding.Ascending { rkey, t, err = encoding.DecodeTimeAscending(key) } else { rkey, t, err = encoding.DecodeTimeDescending(key) } return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), rkey, err case *parser.DInterval: var d duration.Duration if dir == encoding.Ascending { rkey, d, err = encoding.DecodeDurationAscending(key) } else { rkey, d, err = encoding.DecodeDurationDescending(key) } return a.NewDInterval(parser.DInterval{Duration: d}), rkey, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index key: %s", valType.Type()) } }
func decodeTableKey(valType parser.Datum, key []byte, dir encoding.Direction) ( parser.Datum, []byte, error) { if (dir != encoding.Ascending) && (dir != encoding.Descending) { return nil, nil, util.Errorf("invalid direction: %d", dir) } var isNull bool if key, isNull = encoding.DecodeIfNull(key); isNull { return parser.DNull, key, nil } var rkey []byte var err error switch valType.(type) { case parser.DBool: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } return parser.DBool(i != 0), rkey, err case parser.DInt: var i int64 if dir == encoding.Ascending { rkey, i, err = encoding.DecodeVarintAscending(key) } else { rkey, i, err = encoding.DecodeVarintDescending(key) } return parser.DInt(i), rkey, err case parser.DFloat: var f float64 if dir == encoding.Ascending { rkey, f, err = encoding.DecodeFloatAscending(key, nil) } else { rkey, f, err = encoding.DecodeFloatDescending(key, nil) } return parser.DFloat(f), rkey, err case *parser.DDecimal: var d *inf.Dec if dir == encoding.Ascending { rkey, d, err = encoding.DecodeDecimalAscending(key, nil) } else { rkey, d, err = encoding.DecodeDecimalDescending(key, nil) } dd := &parser.DDecimal{} dd.Set(d) return dd, rkey, err case parser.DString: var r string if dir == encoding.Ascending { rkey, r, err = encoding.DecodeStringAscending(key, nil) } else { rkey, r, err = encoding.DecodeStringDescending(key, nil) } return parser.DString(r), rkey, err case parser.DBytes: var r []byte if dir == encoding.Ascending { rkey, r, err = encoding.DecodeBytesAscending(key, nil) } else { rkey, r, err = encoding.DecodeBytesDescending(key, nil) } return parser.DBytes(r), rkey, err case parser.DDate: var t int64 if dir == encoding.Ascending { rkey, t, err = encoding.DecodeVarintAscending(key) } else { rkey, t, err = encoding.DecodeVarintDescending(key) } return parser.DDate(t), rkey, err case parser.DTimestamp: var t time.Time if dir == encoding.Ascending { rkey, t, err = encoding.DecodeTimeAscending(key) } else { rkey, t, err = encoding.DecodeTimeDescending(key) } return parser.DTimestamp{Time: t}, rkey, err case parser.DInterval: var d int64 if dir == encoding.Ascending { rkey, d, err = encoding.DecodeVarintAscending(key) } else { rkey, d, err = encoding.DecodeVarintDescending(key) } return parser.DInterval{Duration: time.Duration(d)}, rkey, err default: return nil, nil, util.Errorf("TODO(pmattis): decoded index key: %s", valType.Type()) } }
func TestValues(t *testing.T) { defer leaktest.AfterTest(t)() p := makePlanner() vInt := int64(5) vNum := 3.14159 vStr := "two furs one cub" vBool := true unsupp := &parser.RangeCond{} intVal := func(v int64) *parser.NumVal { return &parser.NumVal{Value: constant.MakeInt64(v)} } floatVal := func(f float64) *parser.CastExpr { return &parser.CastExpr{ Expr: &parser.NumVal{Value: constant.MakeFloat64(f)}, Type: &parser.FloatColType{}, } } asRow := func(datums ...parser.Datum) []parser.DTuple { return []parser.DTuple{datums} } makeValues := func(tuples ...*parser.Tuple) *parser.ValuesClause { return &parser.ValuesClause{Tuples: tuples} } makeTuple := func(exprs ...parser.Expr) *parser.Tuple { return &parser.Tuple{Exprs: exprs} } testCases := []struct { stmt *parser.ValuesClause rows []parser.DTuple ok bool }{ { makeValues(makeTuple(intVal(vInt))), asRow(parser.NewDInt(parser.DInt(vInt))), true, }, { makeValues(makeTuple(intVal(vInt), intVal(vInt))), asRow(parser.NewDInt(parser.DInt(vInt)), parser.NewDInt(parser.DInt(vInt))), true, }, { makeValues(makeTuple(floatVal(vNum))), asRow(parser.NewDFloat(parser.DFloat(vNum))), true, }, { makeValues(makeTuple(parser.NewDString(vStr))), asRow(parser.NewDString(vStr)), true, }, { makeValues(makeTuple(parser.NewDBytes(parser.DBytes(vStr)))), asRow(parser.NewDBytes(parser.DBytes(vStr))), true, }, { makeValues(makeTuple(parser.MakeDBool(parser.DBool(vBool)))), asRow(parser.MakeDBool(parser.DBool(vBool))), true, }, { makeValues(makeTuple(unsupp)), nil, false, }, } for i, tc := range testCases { plan, err := func() (_ planNode, err error) { defer func() { if r := recover(); r != nil { err = errors.Errorf("%v", r) } }() return p.ValuesClause(tc.stmt, nil) }() if err == nil != tc.ok { t.Errorf("%d: error_expected=%t, but got error %v", i, tc.ok, err) } if plan != nil { if err := plan.expandPlan(); err != nil { t.Errorf("%d: unexpected error in expandPlan: %v", i, err) continue } if err := plan.Start(); err != nil { t.Errorf("%d: unexpected error in Start: %v", i, err) continue } var rows []parser.DTuple next, err := plan.Next() for ; next; next, err = plan.Next() { rows = append(rows, plan.Values()) } if err != nil { t.Error(err) continue } if !reflect.DeepEqual(rows, tc.rows) { t.Errorf("%d: expected rows:\n%+v\nactual rows:\n%+v", i, tc.rows, rows) } } } }