Пример #1
0
// golangFillQueryArguments populates the placeholder map with
// types and values from an array of Go values.
// TODO: This does not support arguments of the SQL 'Date' type, as there is not
// an equivalent type in Go's standard library. It's not currently needed by any
// of our internal tables.
func golangFillQueryArguments(pinfo *parser.PlaceholderInfo, args []interface{}) {
	pinfo.Clear()

	for i, arg := range args {
		k := fmt.Sprint(i + 1)
		if arg == nil {
			pinfo.SetValue(k, parser.DNull)
			continue
		}

		// A type switch to handle a few explicit types with special semantics:
		// - Datums are passed along as is.
		// - Time datatypes get special representation in the database.
		var d parser.Datum
		switch t := arg.(type) {
		case parser.Datum:
			d = t
		case time.Time:
			d = parser.MakeDTimestamp(t, time.Microsecond)
		case time.Duration:
			d = &parser.DInterval{Duration: duration.Duration{Nanos: t.Nanoseconds()}}
		case *inf.Dec:
			dd := &parser.DDecimal{}
			dd.Set(t)
			d = dd
		}
		if d == nil {
			// Handle all types which have an underlying type that can be stored in the
			// database.
			// Note: if this reflection becomes a performance concern in the future,
			// commonly used types could be added explicitly into the type switch above
			// for a performance gain.
			val := reflect.ValueOf(arg)
			switch val.Kind() {
			case reflect.Bool:
				d = parser.MakeDBool(parser.DBool(val.Bool()))
			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
				d = parser.NewDInt(parser.DInt(val.Int()))
			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
				d = parser.NewDInt(parser.DInt(val.Uint()))
			case reflect.Float32, reflect.Float64:
				d = parser.NewDFloat(parser.DFloat(val.Float()))
			case reflect.String:
				d = parser.NewDString(val.String())
			case reflect.Slice:
				// Handle byte slices.
				if val.Type().Elem().Kind() == reflect.Uint8 {
					d = parser.NewDBytes(parser.DBytes(val.Bytes()))
				}
			}
			if d == nil {
				panic(fmt.Sprintf("unexpected type %T", arg))
			}
		}
		pinfo.SetValue(k, d)
	}
}
Пример #2
0
// DecodeTableValue decodes a value encoded by EncodeTableValue.
func DecodeTableValue(a *DatumAlloc, valType parser.Type, b []byte) (parser.Datum, []byte, error) {
	_, dataOffset, _, typ, err := encoding.DecodeValueTag(b)
	if err != nil {
		return nil, b, err
	}
	if typ == encoding.Null {
		return parser.DNull, b[dataOffset:], nil
	}
	switch valType {
	case parser.TypeBool:
		var x bool
		b, x, err = encoding.DecodeBoolValue(b)
		// No need to chunk allocate DBool as MakeDBool returns either
		// parser.DBoolTrue or parser.DBoolFalse.
		return parser.MakeDBool(parser.DBool(x)), b, err
	case parser.TypeInt:
		var i int64
		b, i, err = encoding.DecodeIntValue(b)
		return a.NewDInt(parser.DInt(i)), b, err
	case parser.TypeFloat:
		var f float64
		b, f, err = encoding.DecodeFloatValue(b)
		return a.NewDFloat(parser.DFloat(f)), b, err
	case parser.TypeDecimal:
		var d *inf.Dec
		b, d, err = encoding.DecodeDecimalValue(b)
		dd := a.NewDDecimal(parser.DDecimal{})
		dd.Set(d)
		return dd, b, err
	case parser.TypeString:
		var data []byte
		b, data, err = encoding.DecodeBytesValue(b)
		return a.NewDString(parser.DString(data)), b, err
	case parser.TypeBytes:
		var data []byte
		b, data, err = encoding.DecodeBytesValue(b)
		return a.NewDBytes(parser.DBytes(data)), b, err
	case parser.TypeDate:
		var i int64
		b, i, err = encoding.DecodeIntValue(b)
		return a.NewDDate(parser.DDate(i)), b, err
	case parser.TypeTimestamp:
		var t time.Time
		b, t, err = encoding.DecodeTimeValue(b)
		return a.NewDTimestamp(parser.DTimestamp{Time: t}), b, err
	case parser.TypeTimestampTZ:
		var t time.Time
		b, t, err = encoding.DecodeTimeValue(b)
		return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), b, err
	case parser.TypeInterval:
		var d duration.Duration
		b, d, err = encoding.DecodeDurationValue(b)
		return a.NewDInterval(parser.DInterval{Duration: d}), b, err
	default:
		return nil, nil, errors.Errorf("TODO(pmattis): decoded index value: %s", valType)
	}
}
Пример #3
0
// ShowColumns of a table.
// Privileges: Any privilege on table.
//   Notes: postgres does not have a SHOW COLUMNS statement.
//          mysql only returns columns you have privileges on.
func (p *planner) ShowColumns(n *parser.ShowColumns) (planNode, error) {
	tn, err := n.Table.NormalizeWithDatabaseName(p.session.Database)
	if err != nil {
		return nil, err
	}

	desc, err := p.mustGetTableDesc(tn)
	if err != nil {
		return nil, err
	}
	if err := p.anyPrivilege(desc); err != nil {
		return nil, err
	}

	columns := ResultColumns{
		{Name: "Field", Typ: parser.TypeString},
		{Name: "Type", Typ: parser.TypeString},
		{Name: "Null", Typ: parser.TypeBool},
		{Name: "Default", Typ: parser.TypeString},
	}
	return &delayedNode{
		p:       p,
		name:    "SHOW COLUMNS FROM " + tn.String(),
		columns: columns,
		constructor: func(p *planner) (planNode, error) {
			v := p.newContainerValuesNode(columns, 0)

			for i, col := range desc.Columns {
				defaultExpr := parser.DNull
				if e := desc.Columns[i].DefaultExpr; e != nil {
					defaultExpr = parser.NewDString(*e)
				}
				newRow := parser.DTuple{
					parser.NewDString(desc.Columns[i].Name),
					parser.NewDString(col.Type.SQLString()),
					parser.MakeDBool(parser.DBool(desc.Columns[i].Nullable)),
					defaultExpr,
				}
				if err := v.rows.AddRow(newRow); err != nil {
					v.rows.Close()
					return nil, err
				}
			}
			return v, nil
		},
	}, nil
}
Пример #4
0
// MakePrimaryIndexKey creates a key prefix that corresponds to a table row
// (in the primary index); it is intended for tests.
//
// The value types must match the primary key columns (or a prefix of them);
// supported types are: - Datum
//  - bool (converts to DBool)
//  - int (converts to DInt)
//  - string (converts to DString)
func MakePrimaryIndexKey(desc *TableDescriptor, vals ...interface{}) (roachpb.Key, error) {
	index := &desc.PrimaryIndex
	if len(vals) > len(index.ColumnIDs) {
		return nil, errors.Errorf("got %d values, PK has %d columns", len(vals), len(index.ColumnIDs))
	}
	datums := make([]parser.Datum, len(vals))
	for i, v := range vals {
		switch v := v.(type) {
		case bool:
			datums[i] = parser.MakeDBool(parser.DBool(v))
		case int:
			datums[i] = parser.NewDInt(parser.DInt(v))
		case string:
			datums[i] = parser.NewDString(v)
		case parser.Datum:
			datums[i] = v
		default:
			return nil, errors.Errorf("unexpected value type %T", v)
		}
		// Check that the value type matches.
		colID := index.ColumnIDs[i]
		for _, c := range desc.Columns {
			if c.ID == colID {
				if t := DatumTypeToColumnKind(datums[i].ResolvedType()); t != c.Type.Kind {
					return nil, errors.Errorf("column %d of type %s, got value of type %s", i, c.Type.Kind, t)
				}
				break
			}
		}
	}
	// Create the ColumnID to index in datums slice map needed by
	// MakeIndexKeyPrefix.
	colIDToRowIndex := make(map[ColumnID]int)
	for i := range vals {
		colIDToRowIndex[index.ColumnIDs[i]] = i
	}

	keyPrefix := MakeIndexKeyPrefix(desc, index.ID)
	key, _, err := EncodeIndexKey(desc, index, colIDToRowIndex, datums, keyPrefix)
	if err != nil {
		return nil, err
	}
	return roachpb.Key(key), nil
}
Пример #5
0
// ShowIndex returns all the indexes for a table.
// Privileges: Any privilege on table.
//   Notes: postgres does not have a SHOW INDEXES statement.
//          mysql requires some privilege for any column.
func (p *planner) ShowIndex(n *parser.ShowIndex) (planNode, error) {
	tn, err := n.Table.NormalizeWithDatabaseName(p.session.Database)
	if err != nil {
		return nil, err
	}

	desc, err := p.mustGetTableDesc(tn)
	if err != nil {
		return nil, err
	}
	if err := p.anyPrivilege(desc); err != nil {
		return nil, err
	}

	columns := ResultColumns{
		{Name: "Table", Typ: parser.TypeString},
		{Name: "Name", Typ: parser.TypeString},
		{Name: "Unique", Typ: parser.TypeBool},
		{Name: "Seq", Typ: parser.TypeInt},
		{Name: "Column", Typ: parser.TypeString},
		{Name: "Direction", Typ: parser.TypeString},
		{Name: "Storing", Typ: parser.TypeBool},
	}

	return &delayedNode{
		p:       p,
		name:    "SHOW INDEX FROM " + tn.String(),
		columns: columns,
		constructor: func(p *planner) (planNode, error) {
			v := p.newContainerValuesNode(columns, 0)

			appendRow := func(index sqlbase.IndexDescriptor, colName string, sequence int,
				direction string, isStored bool) error {
				newRow := parser.DTuple{
					parser.NewDString(tn.Table()),
					parser.NewDString(index.Name),
					parser.MakeDBool(parser.DBool(index.Unique)),
					parser.NewDInt(parser.DInt(sequence)),
					parser.NewDString(colName),
					parser.NewDString(direction),
					parser.MakeDBool(parser.DBool(isStored)),
				}
				_, err := v.rows.AddRow(newRow)
				return err
			}

			for _, index := range append([]sqlbase.IndexDescriptor{desc.PrimaryIndex}, desc.Indexes...) {
				sequence := 1
				for i, col := range index.ColumnNames {
					if err := appendRow(index, col, sequence, index.ColumnDirections[i].String(), false); err != nil {
						v.rows.Close()
						return nil, err
					}
					sequence++
				}
				for _, col := range index.StoreColumnNames {
					if err := appendRow(index, col, sequence, "N/A", true); err != nil {
						v.rows.Close()
						return nil, err
					}
					sequence++
				}
			}
			return v, nil
		},
	}, nil
}
Пример #6
0
func dumpTable(w io.Writer, conn *sqlConn, origDBName, origTableName string) error {
	const limit = 100

	// Escape names since they can't be used in placeholders.
	dbname := parser.Name(origDBName).String()
	tablename := parser.Name(origTableName).String()

	if err := conn.Exec(fmt.Sprintf("SET DATABASE = %s", dbname), nil); err != nil {
		return err
	}

	// Fetch all table metadata in a transaction and its time to guarantee it
	// doesn't change between the various SHOW statements.
	if err := conn.Exec("BEGIN", nil); err != nil {
		return err
	}

	vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()", nil)
	if err != nil {
		return err
	}
	clusterTS := string(vals[0].([]byte))

	// A previous version of the code did a SELECT on system.descriptor. This
	// required the SELECT privilege to the descriptor table, which only root
	// has. Allowing non-root to do this would let users see other users' table
	// descriptors which is a problem in multi-tenancy.

	// Fetch column types.
	rows, err := conn.Query(fmt.Sprintf("SHOW COLUMNS FROM %s", tablename), nil)
	if err != nil {
		return err
	}
	vals = make([]driver.Value, 2)
	coltypes := make(map[string]string)
	for {
		if err := rows.Next(vals); err == io.EOF {
			break
		} else if err != nil {
			return err
		}
		nameI, typI := vals[0], vals[1]
		name, ok := nameI.(string)
		if !ok {
			return fmt.Errorf("unexpected value: %T", nameI)
		}
		typ, ok := typI.(string)
		if !ok {
			return fmt.Errorf("unexpected value: %T", typI)
		}
		coltypes[name] = typ
	}
	if err := rows.Close(); err != nil {
		return err
	}

	// index holds the names, in order, of the primary key columns.
	var index []string
	// Primary index is always the first index returned by SHOW INDEX.
	rows, err = conn.Query(fmt.Sprintf("SHOW INDEX FROM %s", tablename), nil)
	if err != nil {
		return err
	}
	vals = make([]driver.Value, 5)
	var primaryIndex string
	// Find the primary index columns.
	for {
		if err := rows.Next(vals); err == io.EOF {
			break
		} else if err != nil {
			return err
		}
		b, ok := vals[1].(string)
		if !ok {
			return fmt.Errorf("unexpected value: %T", vals[1])
		}
		if primaryIndex == "" {
			primaryIndex = b
		} else if primaryIndex != b {
			break
		}
		b, ok = vals[4].(string)
		if !ok {
			return fmt.Errorf("unexpected value: %T", vals[4])
		}
		index = append(index, parser.Name(b).String())
	}
	if err := rows.Close(); err != nil {
		return err
	}
	if len(index) == 0 {
		return fmt.Errorf("no primary key index found")
	}
	indexes := strings.Join(index, ", ")

	// Build the SELECT query.
	var sbuf bytes.Buffer
	fmt.Fprintf(&sbuf, "SELECT %s, * FROM %s@%s AS OF SYSTEM TIME %s", indexes, tablename, primaryIndex, clusterTS)

	var wbuf bytes.Buffer
	fmt.Fprintf(&wbuf, " WHERE ROW (%s) > ROW (", indexes)
	for i := range index {
		if i > 0 {
			wbuf.WriteString(", ")
		}
		fmt.Fprintf(&wbuf, "$%d", i+1)
	}
	wbuf.WriteString(")")
	// No WHERE clause first time, so add a place to inject it.
	fmt.Fprintf(&sbuf, "%%s ORDER BY %s LIMIT %d", indexes, limit)
	bs := sbuf.String()

	vals, err = conn.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", tablename), nil)
	if err != nil {
		return err
	}
	create := vals[1].(string)
	if _, err := w.Write([]byte(create)); err != nil {
		return err
	}
	if _, err := w.Write([]byte(";\n")); err != nil {
		return err
	}

	if err := conn.Exec("COMMIT", nil); err != nil {
		return err
	}

	// pk holds the last values of the fetched primary keys
	var pk []driver.Value
	q := fmt.Sprintf(bs, "")
	for {
		rows, err := conn.Query(q, pk)
		if err != nil {
			return err
		}
		cols := rows.Columns()
		pkcols := cols[:len(index)]
		cols = cols[len(index):]
		inserts := make([][]string, 0, limit)
		i := 0
		for i < limit {
			vals := make([]driver.Value, len(cols)+len(pkcols))
			if err := rows.Next(vals); err == io.EOF {
				break
			} else if err != nil {
				return err
			}
			if pk == nil {
				q = fmt.Sprintf(bs, wbuf.String())
			}
			pk = vals[:len(index)]
			vals = vals[len(index):]
			ivals := make([]string, len(vals))
			// Values need to be correctly encoded for INSERT statements in a text file.
			for si, sv := range vals {
				switch t := sv.(type) {
				case nil:
					ivals[si] = "NULL"
				case bool:
					ivals[si] = parser.MakeDBool(parser.DBool(t)).String()
				case int64:
					ivals[si] = parser.NewDInt(parser.DInt(t)).String()
				case float64:
					ivals[si] = parser.NewDFloat(parser.DFloat(t)).String()
				case string:
					ivals[si] = parser.NewDString(t).String()
				case []byte:
					switch ct := coltypes[cols[si]]; ct {
					case "INTERVAL":
						ivals[si] = fmt.Sprintf("'%s'", t)
					case "BYTES":
						ivals[si] = parser.NewDBytes(parser.DBytes(t)).String()
					default:
						// STRING and DECIMAL types can have optional length
						// suffixes, so only examine the prefix of the type.
						if strings.HasPrefix(coltypes[cols[si]], "STRING") {
							ivals[si] = parser.NewDString(string(t)).String()
						} else if strings.HasPrefix(coltypes[cols[si]], "DECIMAL") {
							ivals[si] = string(t)
						} else {
							panic(errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], coltypes[cols[si]]))
						}
					}
				case time.Time:
					var d parser.Datum
					ct := coltypes[cols[si]]
					switch ct {
					case "DATE":
						d = parser.NewDDateFromTime(t, time.UTC)
					case "TIMESTAMP":
						d = parser.MakeDTimestamp(t, time.Nanosecond)
					case "TIMESTAMP WITH TIME ZONE":
						d = parser.MakeDTimestampTZ(t, time.Nanosecond)
					default:
						panic(errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], coltypes[cols[si]]))
					}
					ivals[si] = fmt.Sprintf("'%s'", d)
				default:
					panic(errors.Errorf("unknown field type: %T (%s)", t, cols[si]))
				}
			}
			inserts = append(inserts, ivals)
			i++
		}
		for si, sv := range pk {
			b, ok := sv.([]byte)
			if ok && strings.HasPrefix(coltypes[pkcols[si]], "STRING") {
				// Primary key strings need to be converted to a go string, but not SQL
				// encoded since they aren't being written to a text file.
				pk[si] = string(b)
			}
		}
		if err := rows.Close(); err != nil {
			return err
		}
		if i == 0 {
			break
		}
		fmt.Fprintf(w, "\nINSERT INTO %s VALUES", tablename)
		for idx, values := range inserts {
			if idx > 0 {
				fmt.Fprint(w, ",")
			}
			fmt.Fprint(w, "\n\t(")
			for vi, v := range values {
				if vi > 0 {
					fmt.Fprint(w, ", ")
				}
				fmt.Fprint(w, v)
			}
			fmt.Fprint(w, ")")
		}
		fmt.Fprintln(w, ";")
		if i < limit {
			break
		}
	}
	return nil
}
Пример #7
0
// DecodeTableKey decodes a table key/value.
func DecodeTableKey(
	a *DatumAlloc, valType parser.Type, key []byte, dir encoding.Direction,
) (parser.Datum, []byte, error) {
	if (dir != encoding.Ascending) && (dir != encoding.Descending) {
		return nil, nil, errors.Errorf("invalid direction: %d", dir)
	}
	var isNull bool
	if key, isNull = encoding.DecodeIfNull(key); isNull {
		return parser.DNull, key, nil
	}
	var rkey []byte
	var err error
	switch valType {
	case parser.TypeBool:
		var i int64
		if dir == encoding.Ascending {
			rkey, i, err = encoding.DecodeVarintAscending(key)
		} else {
			rkey, i, err = encoding.DecodeVarintDescending(key)
		}
		// No need to chunk allocate DBool as MakeDBool returns either
		// parser.DBoolTrue or parser.DBoolFalse.
		return parser.MakeDBool(parser.DBool(i != 0)), rkey, err
	case parser.TypeInt:
		var i int64
		if dir == encoding.Ascending {
			rkey, i, err = encoding.DecodeVarintAscending(key)
		} else {
			rkey, i, err = encoding.DecodeVarintDescending(key)
		}
		return a.NewDInt(parser.DInt(i)), rkey, err
	case parser.TypeFloat:
		var f float64
		if dir == encoding.Ascending {
			rkey, f, err = encoding.DecodeFloatAscending(key)
		} else {
			rkey, f, err = encoding.DecodeFloatDescending(key)
		}
		return a.NewDFloat(parser.DFloat(f)), rkey, err
	case parser.TypeDecimal:
		var d *inf.Dec
		if dir == encoding.Ascending {
			rkey, d, err = encoding.DecodeDecimalAscending(key, nil)
		} else {
			rkey, d, err = encoding.DecodeDecimalDescending(key, nil)
		}
		dd := a.NewDDecimal(parser.DDecimal{})
		dd.Set(d)
		return dd, rkey, err
	case parser.TypeString:
		var r string
		if dir == encoding.Ascending {
			rkey, r, err = encoding.DecodeUnsafeStringAscending(key, nil)
		} else {
			rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil)
		}
		return a.NewDString(parser.DString(r)), rkey, err
	case parser.TypeBytes:
		var r []byte
		if dir == encoding.Ascending {
			rkey, r, err = encoding.DecodeBytesAscending(key, nil)
		} else {
			rkey, r, err = encoding.DecodeBytesDescending(key, nil)
		}
		return a.NewDBytes(parser.DBytes(r)), rkey, err
	case parser.TypeDate:
		var t int64
		if dir == encoding.Ascending {
			rkey, t, err = encoding.DecodeVarintAscending(key)
		} else {
			rkey, t, err = encoding.DecodeVarintDescending(key)
		}
		return a.NewDDate(parser.DDate(t)), rkey, err
	case parser.TypeTimestamp:
		var t time.Time
		if dir == encoding.Ascending {
			rkey, t, err = encoding.DecodeTimeAscending(key)
		} else {
			rkey, t, err = encoding.DecodeTimeDescending(key)
		}
		return a.NewDTimestamp(parser.DTimestamp{Time: t}), rkey, err
	case parser.TypeTimestampTZ:
		var t time.Time
		if dir == encoding.Ascending {
			rkey, t, err = encoding.DecodeTimeAscending(key)
		} else {
			rkey, t, err = encoding.DecodeTimeDescending(key)
		}
		return a.NewDTimestampTZ(parser.DTimestampTZ{Time: t}), rkey, err
	case parser.TypeInterval:
		var d duration.Duration
		if dir == encoding.Ascending {
			rkey, d, err = encoding.DecodeDurationAscending(key)
		} else {
			rkey, d, err = encoding.DecodeDurationDescending(key)
		}
		return a.NewDInterval(parser.DInterval{Duration: d}), rkey, err
	default:
		return nil, nil, errors.Errorf("TODO(pmattis): decoded index key: %s", valType)
	}
}
Пример #8
0
// UnmarshalColumnValue decodes the value from a key-value pair using the type
// expected by the column. An error is returned if the value's type does not
// match the column's type.
func UnmarshalColumnValue(
	a *DatumAlloc, kind ColumnType_Kind, value *roachpb.Value,
) (parser.Datum, error) {
	if value == nil {
		return parser.DNull, nil
	}

	switch kind {
	case ColumnType_BOOL:
		v, err := value.GetBool()
		if err != nil {
			return nil, err
		}
		return parser.MakeDBool(parser.DBool(v)), nil
	case ColumnType_INT:
		v, err := value.GetInt()
		if err != nil {
			return nil, err
		}
		return a.NewDInt(parser.DInt(v)), nil
	case ColumnType_FLOAT:
		v, err := value.GetFloat()
		if err != nil {
			return nil, err
		}
		return a.NewDFloat(parser.DFloat(v)), nil
	case ColumnType_DECIMAL:
		v, err := value.GetDecimal()
		if err != nil {
			return nil, err
		}
		dd := a.NewDDecimal(parser.DDecimal{})
		dd.Set(v)
		return dd, nil
	case ColumnType_STRING:
		v, err := value.GetBytes()
		if err != nil {
			return nil, err
		}
		return a.NewDString(parser.DString(v)), nil
	case ColumnType_BYTES:
		v, err := value.GetBytes()
		if err != nil {
			return nil, err
		}
		return a.NewDBytes(parser.DBytes(v)), nil
	case ColumnType_DATE:
		v, err := value.GetInt()
		if err != nil {
			return nil, err
		}
		return a.NewDDate(parser.DDate(v)), nil
	case ColumnType_TIMESTAMP:
		v, err := value.GetTime()
		if err != nil {
			return nil, err
		}
		return a.NewDTimestamp(parser.DTimestamp{Time: v}), nil
	case ColumnType_TIMESTAMPTZ:
		v, err := value.GetTime()
		if err != nil {
			return nil, err
		}
		return a.NewDTimestampTZ(parser.DTimestampTZ{Time: v}), nil
	case ColumnType_INTERVAL:
		d, err := value.GetDuration()
		if err != nil {
			return nil, err
		}
		return a.NewDInterval(parser.DInterval{Duration: d}), nil
	default:
		return nil, errors.Errorf("unsupported column type: %s", kind)
	}
}
Пример #9
0
// decodeOidDatum decodes bytes with specified Oid and format code into
// a datum.
func decodeOidDatum(id oid.Oid, code formatCode, b []byte) (parser.Datum, error) {
	var d parser.Datum
	switch id {
	case oid.T_bool:
		switch code {
		case formatText:
			v, err := strconv.ParseBool(string(b))
			if err != nil {
				return d, err
			}
			d = parser.MakeDBool(parser.DBool(v))
		case formatBinary:
			switch b[0] {
			case 0:
				d = parser.MakeDBool(false)
			case 1:
				d = parser.MakeDBool(true)
			default:
				return d, errors.Errorf("unsupported binary bool: %q", b)
			}
		default:
			return d, errors.Errorf("unsupported bool format code: %d", code)
		}
	case oid.T_int2:
		switch code {
		case formatText:
			i, err := strconv.ParseInt(string(b), 10, 64)
			if err != nil {
				return d, err
			}
			d = parser.NewDInt(parser.DInt(i))
		case formatBinary:
			if len(b) < 2 {
				return d, errors.Errorf("int2 requires 2 bytes for binary format")
			}
			i := int16(binary.BigEndian.Uint16(b))
			d = parser.NewDInt(parser.DInt(i))
		default:
			return d, errors.Errorf("unsupported int2 format code: %d", code)
		}
	case oid.T_int4:
		switch code {
		case formatText:
			i, err := strconv.ParseInt(string(b), 10, 64)
			if err != nil {
				return d, err
			}
			d = parser.NewDInt(parser.DInt(i))
		case formatBinary:
			if len(b) < 4 {
				return d, errors.Errorf("int4 requires 4 bytes for binary format")
			}
			i := int32(binary.BigEndian.Uint32(b))
			d = parser.NewDInt(parser.DInt(i))
		default:
			return d, errors.Errorf("unsupported int4 format code: %d", code)
		}
	case oid.T_int8:
		switch code {
		case formatText:
			i, err := strconv.ParseInt(string(b), 10, 64)
			if err != nil {
				return d, err
			}
			d = parser.NewDInt(parser.DInt(i))
		case formatBinary:
			if len(b) < 8 {
				return d, errors.Errorf("int8 requires 8 bytes for binary format")
			}
			i := int64(binary.BigEndian.Uint64(b))
			d = parser.NewDInt(parser.DInt(i))
		default:
			return d, errors.Errorf("unsupported int8 format code: %d", code)
		}
	case oid.T_float4:
		switch code {
		case formatText:
			f, err := strconv.ParseFloat(string(b), 64)
			if err != nil {
				return d, err
			}
			d = parser.NewDFloat(parser.DFloat(f))
		case formatBinary:
			if len(b) < 4 {
				return d, errors.Errorf("float4 requires 4 bytes for binary format")
			}
			f := math.Float32frombits(binary.BigEndian.Uint32(b))
			d = parser.NewDFloat(parser.DFloat(f))
		default:
			return d, errors.Errorf("unsupported float4 format code: %d", code)
		}
	case oid.T_float8:
		switch code {
		case formatText:
			f, err := strconv.ParseFloat(string(b), 64)
			if err != nil {
				return d, err
			}
			d = parser.NewDFloat(parser.DFloat(f))
		case formatBinary:
			if len(b) < 8 {
				return d, errors.Errorf("float8 requires 8 bytes for binary format")
			}
			f := math.Float64frombits(binary.BigEndian.Uint64(b))
			d = parser.NewDFloat(parser.DFloat(f))
		default:
			return d, errors.Errorf("unsupported float8 format code: %d", code)
		}
	case oid.T_numeric:
		switch code {
		case formatText:
			dd := &parser.DDecimal{}
			if _, ok := dd.SetString(string(b)); !ok {
				return nil, errors.Errorf("could not parse string %q as decimal", b)
			}
			d = dd
		case formatBinary:
			r := bytes.NewReader(b)

			alloc := struct {
				pgNum pgNumeric
				i16   int16

				dd parser.DDecimal
			}{}

			for _, ptr := range []interface{}{
				&alloc.pgNum.ndigits,
				&alloc.pgNum.weight,
				&alloc.pgNum.sign,
				&alloc.pgNum.dscale,
			} {
				if err := binary.Read(r, binary.BigEndian, ptr); err != nil {
					return d, err
				}
			}

			if alloc.pgNum.ndigits > 0 {
				decDigits := make([]byte, 0, alloc.pgNum.ndigits*pgDecDigits)
				nextDigit := func() error {
					if err := binary.Read(r, binary.BigEndian, &alloc.i16); err != nil {
						return err
					}
					numZeroes := pgDecDigits
					for i16 := alloc.i16; i16 > 0; i16 /= 10 {
						numZeroes--
					}
					for ; numZeroes > 0; numZeroes-- {
						decDigits = append(decDigits, '0')
					}
					return nil
				}

				for i := int16(0); i < alloc.pgNum.ndigits-1; i++ {
					if err := nextDigit(); err != nil {
						return d, err
					}
					if alloc.i16 > 0 {
						decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10)
					}
				}

				// The last digit may contain padding, which we need to deal with.
				if err := nextDigit(); err != nil {
					return d, err
				}
				dscale := (alloc.pgNum.ndigits - (alloc.pgNum.weight + 1)) * pgDecDigits
				if overScale := dscale - alloc.pgNum.dscale; overScale > 0 {
					dscale -= overScale
					for i := int16(0); i < overScale; i++ {
						alloc.i16 /= 10
					}
				}
				decDigits = strconv.AppendUint(decDigits, uint64(alloc.i16), 10)
				decString := string(decDigits)
				if _, ok := alloc.dd.UnscaledBig().SetString(decString, 10); !ok {
					return nil, errors.Errorf("could not parse string %q as decimal", decString)
				}
				alloc.dd.SetScale(inf.Scale(dscale))
			}

			switch alloc.pgNum.sign {
			case pgNumericPos:
			case pgNumericNeg:
				alloc.dd.Neg(&alloc.dd.Dec)
			default:
				return d, errors.Errorf("unsupported numeric sign: %d", alloc.pgNum.sign)
			}

			d = &alloc.dd
		default:
			return d, errors.Errorf("unsupported numeric format code: %d", code)
		}
	case oid.T_text, oid.T_varchar:
		switch code {
		case formatText, formatBinary:
			d = parser.NewDString(string(b))
		default:
			return d, errors.Errorf("unsupported text format code: %d", code)
		}
	case oid.T_bytea:
		switch code {
		case formatText:
			// http://www.postgresql.org/docs/current/static/datatype-binary.html#AEN5667
			// Code cribbed from github.com/lib/pq.

			// We only support hex encoding.
			if len(b) >= 2 && bytes.Equal(b[:2], []byte("\\x")) {
				b = b[2:] // trim off leading "\\x"
				result := make([]byte, hex.DecodedLen(len(b)))
				_, err := hex.Decode(result, b)
				if err != nil {
					return d, err
				}
				d = parser.NewDBytes(parser.DBytes(result))
			} else {
				return d, errors.Errorf("unsupported bytea encoding: %q", b)
			}
		case formatBinary:
			d = parser.NewDBytes(parser.DBytes(b))
		default:
			return d, errors.Errorf("unsupported bytea format code: %d", code)
		}
	case oid.T_timestamp:
		switch code {
		case formatText:
			ts, err := parseTs(string(b))
			if err != nil {
				return d, errors.Errorf("could not parse string %q as timestamp", b)
			}
			d = parser.MakeDTimestamp(ts, time.Microsecond)
		case formatBinary:
			if len(b) < 8 {
				return d, errors.Errorf("timestamp requires 8 bytes for binary format")
			}
			i := int64(binary.BigEndian.Uint64(b))
			d = parser.MakeDTimestamp(pgBinaryToTime(i), time.Microsecond)
		default:
			return d, errors.Errorf("unsupported timestamp format code: %d", code)
		}
	case oid.T_timestamptz:
		switch code {
		case formatText:
			ts, err := parseTs(string(b))
			if err != nil {
				return d, errors.Errorf("could not parse string %q as timestamp", b)
			}
			d = parser.MakeDTimestampTZ(ts, time.Microsecond)
		case formatBinary:
			if len(b) < 8 {
				return d, errors.Errorf("timestamptz requires 8 bytes for binary format")
			}
			i := int64(binary.BigEndian.Uint64(b))
			d = parser.MakeDTimestampTZ(pgBinaryToTime(i), time.Microsecond)
		default:
			return d, errors.Errorf("unsupported timestamptz format code: %d", code)
		}
	case oid.T_date:
		switch code {
		case formatText:
			ts, err := parseTs(string(b))
			if err != nil {
				res, err := parser.ParseDDate(string(b), time.UTC)
				if err != nil {
					return d, errors.Errorf("could not parse string %q as date", b)
				}
				d = res
			} else {
				daysSinceEpoch := ts.Unix() / secondsInDay
				d = parser.NewDDate(parser.DDate(daysSinceEpoch))
			}
		case formatBinary:
			if len(b) < 4 {
				return d, errors.Errorf("date requires 4 bytes for binary format")
			}
			i := int32(binary.BigEndian.Uint32(b))
			d = pgBinaryToDate(i)
		default:
			return d, errors.Errorf("unsupported date format code: %d", code)
		}
	case oid.T_interval:
		switch code {
		case formatText:
			d, err := parser.ParseDInterval(string(b))
			if err != nil {
				return d, errors.Errorf("could not parse string %q as interval", b)
			}
			return d, nil
		default:
			return d, errors.Errorf("unsupported interval format code: %d", code)
		}
	default:
		return d, errors.Errorf("unsupported OID: %v", id)
	}
	return d, nil
}
Пример #10
0
func typByVal(typ parser.Type) parser.Datum {
	_, variable := typ.Size()
	return parser.MakeDBool(parser.DBool(!variable))
}
Пример #11
0
				addColumn := func(column *sqlbase.ColumnDescriptor, attRelID parser.Datum, colNum int) error {
					colTyp := column.Type.ToDatumType()
					return addRow(
						attRelID,                            // attrelid
						parser.NewDString(column.Name),      // attname
						typOid(colTyp),                      // atttypid
						zeroVal,                             // attstattarget
						typLen(colTyp),                      // attlen
						parser.NewDInt(parser.DInt(colNum)), // attnum
						zeroVal,      // attndims
						negOneVal,    // attcacheoff
						negOneVal,    // atttypmod
						parser.DNull, // attbyval (see pg_type.typbyval)
						parser.DNull, // attstorage
						parser.DNull, // attalign
						parser.MakeDBool(parser.DBool(!column.Nullable)),          // attnotnull
						parser.MakeDBool(parser.DBool(column.DefaultExpr != nil)), // atthasdef
						parser.MakeDBool(false),                                   // attisdropped
						parser.MakeDBool(true),                                    // attislocal
						zeroVal,                                                   // attinhcount
						parser.DNull,                                              // attacl
						parser.DNull,                                              // attoptions
						parser.DNull,                                              // attfdwoptions
					)
				}

				// Columns for table.
				colNum := 0
				if err := forEachColumnInTable(table, func(column *sqlbase.ColumnDescriptor) error {
					colNum++
					tableID := h.TableOid(db, table)
Пример #12
0
	COLUMN_NAME STRING NOT NULL DEFAULT '',
	"COLLATION" STRING NOT NULL DEFAULT '',
	CARDINALITY INT NOT NULL DEFAULT 0,
	DIRECTION STRING NOT NULL DEFAULT '',
	STORING BOOL NOT NULL DEFAULT FALSE 
);`,
	populate: func(p *planner, addRow func(...parser.Datum) error) error {
		return forEachTableDesc(p,
			func(db *sqlbase.DatabaseDescriptor, table *sqlbase.TableDescriptor) error {
				appendRow := func(index *sqlbase.IndexDescriptor, colName string, sequence int,
					direction string, isStored bool) error {
					return addRow(
						defString,                                    // table_catalog
						parser.NewDString(db.GetName()),              // table_schema
						parser.NewDString(table.GetName()),           // table_name
						parser.MakeDBool(parser.DBool(index.Unique)), // non_unique
						parser.NewDString(db.GetName()),              // index_schema
						parser.NewDString(index.Name),                // index_name
						parser.NewDInt(parser.DInt(sequence)),        // seq_in_index
						parser.NewDString(colName),                   // column_name
						parser.DNull,                                 // collation
						parser.DNull,                                 // cardinality
						parser.NewDString(direction),                 // direction
						parser.MakeDBool(parser.DBool(isStored)),     // storing
					)
				}

				return forEachIndexInTable(table, func(index *sqlbase.IndexDescriptor) error {
					sequence := 1
					for i, col := range index.ColumnNames {
						// We add a row for each column of index.
Пример #13
0
				addColumn := func(column *sqlbase.ColumnDescriptor, attRelID parser.Datum, colNum int) error {
					colTyp := column.Type.ToDatumType()
					return addRow(
						attRelID,                            // attrelid
						parser.NewDString(column.Name),      // attname
						typOid(colTyp),                      // atttypid
						zeroVal,                             // attstattarget
						typLen(colTyp),                      // attlen
						parser.NewDInt(parser.DInt(colNum)), // attnum
						zeroVal,      // attndims
						negOneVal,    // attcacheoff
						negOneVal,    // atttypmod
						parser.DNull, // attbyval (see pg_type.typbyval)
						parser.DNull, // attstorage
						parser.DNull, // attalign
						parser.MakeDBool(parser.DBool(!column.Nullable)),          // attnotnull
						parser.MakeDBool(parser.DBool(column.DefaultExpr != nil)), // atthasdef
						parser.MakeDBool(false),                                   // attisdropped
						parser.MakeDBool(true),                                    // attislocal
						zeroVal,                                                   // attinhcount
						parser.DNull,                                              // attacl
						parser.DNull,                                              // attoptions
						parser.DNull,                                              // attfdwoptions
					)
				}

				// Columns for table.
				colNum := 0
				if err := forEachColumnInTable(table, func(column *sqlbase.ColumnDescriptor) error {
					colNum++
					tableID := h.TableOid(db, table)
Пример #14
0
func TestValues(t *testing.T) {
	defer leaktest.AfterTest(t)()

	p := makeTestPlanner()

	vInt := int64(5)
	vNum := 3.14159
	vStr := "two furs one cub"
	vBool := true

	unsupp := &parser.RangeCond{}

	intVal := func(v int64) *parser.NumVal {
		return &parser.NumVal{Value: constant.MakeInt64(v)}
	}
	floatVal := func(f float64) *parser.CastExpr {
		return &parser.CastExpr{
			Expr: &parser.NumVal{Value: constant.MakeFloat64(f)},
			Type: &parser.FloatColType{},
		}
	}
	asRow := func(datums ...parser.Datum) []parser.DTuple {
		return []parser.DTuple{datums}
	}

	makeValues := func(tuples ...*parser.Tuple) *parser.ValuesClause {
		return &parser.ValuesClause{Tuples: tuples}
	}
	makeTuple := func(exprs ...parser.Expr) *parser.Tuple {
		return &parser.Tuple{Exprs: exprs}
	}

	testCases := []struct {
		stmt *parser.ValuesClause
		rows []parser.DTuple
		ok   bool
	}{
		{
			makeValues(makeTuple(intVal(vInt))),
			asRow(parser.NewDInt(parser.DInt(vInt))),
			true,
		},
		{
			makeValues(makeTuple(intVal(vInt), intVal(vInt))),
			asRow(parser.NewDInt(parser.DInt(vInt)), parser.NewDInt(parser.DInt(vInt))),
			true,
		},
		{
			makeValues(makeTuple(floatVal(vNum))),
			asRow(parser.NewDFloat(parser.DFloat(vNum))),
			true,
		},
		{
			makeValues(makeTuple(parser.NewDString(vStr))),
			asRow(parser.NewDString(vStr)),
			true,
		},
		{
			makeValues(makeTuple(parser.NewDBytes(parser.DBytes(vStr)))),
			asRow(parser.NewDBytes(parser.DBytes(vStr))),
			true,
		},
		{
			makeValues(makeTuple(parser.MakeDBool(parser.DBool(vBool)))),
			asRow(parser.MakeDBool(parser.DBool(vBool))),
			true,
		},
		{
			makeValues(makeTuple(unsupp)),
			nil,
			false,
		},
	}

	for i, tc := range testCases {
		plan, err := func() (_ planNode, err error) {
			defer func() {
				if r := recover(); r != nil {
					err = errors.Errorf("%v", r)
				}
			}()
			return p.ValuesClause(tc.stmt, nil)
		}()
		if err == nil != tc.ok {
			t.Errorf("%d: error_expected=%t, but got error %v", i, tc.ok, err)
		}
		if plan != nil {
			defer plan.Close()
			plan, err = p.optimizePlan(plan, allColumns(plan))
			if err != nil {
				t.Errorf("%d: unexpected error in optimizePlan: %v", i, err)
				continue
			}
			if err := p.startPlan(plan); err != nil {
				t.Errorf("%d: unexpected error in Start: %v", i, err)
				continue
			}
			var rows []parser.DTuple
			next, err := plan.Next()
			for ; next; next, err = plan.Next() {
				rows = append(rows, plan.Values())
			}
			if err != nil {
				t.Error(err)
				continue
			}
			if !reflect.DeepEqual(rows, tc.rows) {
				t.Errorf("%d: expected rows:\n%+v\nactual rows:\n%+v", i, tc.rows, rows)
			}
		}
	}
}
Пример #15
0
// splitBoolExpr splits a boolean expression E into two boolean expressions RES and REM such that:
//
//  - RES only has variables known to the conversion function (it is "restricted" to a particular
//    set of variables)
//
//  - If weaker is true, for any setting of variables x:
//       E(x) = (RES(x) AND REM(x))
//    This implies RES(x) <= E(x), i.e. RES is "weaker"
//
//  - If weaker is false:
//       E(x) = (RES(x) OR REM(x))
//    This implies RES(x) => E(x), i.e. RES is "stronger"
func splitBoolExpr(
	expr parser.TypedExpr, conv varConvertFunc, weaker bool,
) (restricted, remainder parser.TypedExpr) {
	// If the expression only contains "restricted" vars, the split is trivial.
	if exprCheckVars(expr, conv) {
		// An "empty" filter is always true in the weaker (normal) case (where the filter is
		// equivalent to RES AND REM) and always false in the stronger (inverted) case (where the
		// filter is equivalent to RES OR REM).
		return exprConvertVars(expr, conv), parser.MakeDBool(parser.DBool(weaker))
	}

	switch t := expr.(type) {
	case *parser.AndExpr:
		if weaker {
			// In the weaker (normal) case, we have
			//   E = (leftRes AND leftRem) AND (rightRes AND rightRem)
			// We can just rearrange:
			//   E = (leftRes AND rightRes) AND (leftRem AND rightRem)
			leftRes, leftRem := splitBoolExpr(t.TypedLeft(), conv, weaker)
			rightRes, rightRem := splitBoolExpr(t.TypedRight(), conv, weaker)
			return makeAnd(leftRes, rightRes), makeAnd(leftRem, rightRem)
		}

		// In the stronger (inverted) case, we have
		//   E = (leftRes OR leftRem) AND (rightRes OR rightRem)
		// We can't do more than:
		//   E = (leftRes AND rightRes) OR E
		leftRes, _ := splitBoolExpr(t.TypedLeft(), conv, weaker)
		rightRes, _ := splitBoolExpr(t.TypedRight(), conv, weaker)
		return makeAnd(leftRes, rightRes), expr

	case *parser.OrExpr:
		if !weaker {
			// In the stronger (inverted) case, we have
			//   E = (leftRes OR leftRem) OR (rightRes AND rightRem)
			// We can just rearrange:
			//   E = (leftRes OR rightRes) OR (leftRem AND rightRem)
			leftRes, leftRem := splitBoolExpr(t.TypedLeft(), conv, weaker)
			rightRes, rightRem := splitBoolExpr(t.TypedRight(), conv, weaker)
			return makeOr(leftRes, rightRes), makeOr(leftRem, rightRem)
		}

		// In the weaker (normal) case, we have
		//   E = (leftRes AND leftRem) OR (rightRes AND rightRem)
		// We can't do more than:
		//   E = (leftRes OR rightRes) OR E
		leftRes, _ := splitBoolExpr(t.TypedLeft(), conv, weaker)
		rightRes, _ := splitBoolExpr(t.TypedRight(), conv, weaker)
		return makeOr(leftRes, rightRes), expr

	case *parser.ParenExpr:
		return splitBoolExpr(t.TypedInnerExpr(), conv, weaker)

	case *parser.NotExpr:
		exprRes, exprRem := splitBoolExpr(t.TypedInnerExpr(), conv, !weaker)
		return makeNot(exprRes), makeNot(exprRem)

	default:
		// We can't split off anything (we already handled the case when expr contains only
		// restricted vars above).
		// For why we return DBool(weaker), see the comment above on "empty" filters.
		return parser.MakeDBool(parser.DBool(weaker)), expr
	}
}
Пример #16
0
// Test that distributing  agg functions according to DistAggregationTable
// yields correct results. We're going to run each aggregation as either the
// two-stage process described by the DistAggregationTable or as a single global
// process, and verify that the results are the same.
func TestDistAggregationTable(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const numRows = 100

	tc := serverutils.StartTestCluster(t, 1, base.TestClusterArgs{})
	defer tc.Stopper().Stop()

	// Create a table with a few columns:
	//  - random integer values from 0 to numRows
	//  - random integer values (with some NULLs)
	//  - random bool value (mostly false)
	//  - random bool value (mostly true)
	//  - random decimals
	//  - random decimals (with some NULLs)
	rng, _ := randutil.NewPseudoRand()
	sqlutils.CreateTable(
		t, tc.ServerConn(0), "t",
		"k INT PRIMARY KEY, int1 INT, int2 INT, bool1 BOOL, bool2 BOOL, dec1 DECIMAL, dec2 DECIMAL",
		numRows,
		func(row int) []parser.Datum {
			return []parser.Datum{
				parser.NewDInt(parser.DInt(row)),
				parser.NewDInt(parser.DInt(rng.Intn(numRows))),
				sqlbase.RandDatum(rng, sqlbase.ColumnType{Kind: sqlbase.ColumnType_INT}, true),
				parser.MakeDBool(parser.DBool(rng.Intn(10) == 0)),
				parser.MakeDBool(parser.DBool(rng.Intn(10) != 0)),
				sqlbase.RandDatum(rng, sqlbase.ColumnType{Kind: sqlbase.ColumnType_DECIMAL}, false),
				sqlbase.RandDatum(rng, sqlbase.ColumnType{Kind: sqlbase.ColumnType_DECIMAL}, true),
			}
		},
	)

	kvDB := tc.Server(0).KVClient().(*client.DB)
	desc := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	for fn, info := range DistAggregationTable {
		if info.LocalStage == distsqlrun.AggregatorSpec_IDENT &&
			info.FinalStage == distsqlrun.AggregatorSpec_IDENT {
			// IDENT only works as expected if all rows have the same value on the
			// relevant column; skip testing this trivial case.
			continue
		}
		// We're going to test each aggregation function on every column that can be
		// used as input for it.
		foundCol := false
		for colIdx := 1; colIdx < len(desc.Columns); colIdx++ {
			// See if this column works with this function.
			_, _, err := distsqlrun.GetAggregateInfo(fn, desc.Columns[colIdx].Type)
			if err != nil {
				continue
			}
			foundCol = true
			for _, numRows := range []int{5, numRows / 10, numRows / 2, numRows} {
				name := fmt.Sprintf("%s/%s/%d", fn, desc.Columns[colIdx].Name, numRows)
				t.Run(name, func(t *testing.T) {
					checkDistAggregationInfo(t, tc.Server(0), desc, colIdx, numRows, fn, info)
				})
			}
		}
		if !foundCol {
			t.Errorf("aggregation function %s was not tested (no suitable column)", fn)
		}
	}
}