// deleteRow adds to the batch the kv operations necessary to delete a table row // with the given values. func (rd *rowDeleter) deleteRow(b *client.Batch, values []parser.Datum) error { if err := rd.fks.checkAll(values); err != nil { return err } primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values) if err != nil { return err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.Key) } b.Del(secondaryIndexEntry.Key) } // Delete the row. rd.startKey = roachpb.Key(primaryIndexKey) rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey)) if log.V(2) { log.Infof("DelRange %s - %s", rd.startKey, rd.endKey) } b.DelRange(&rd.startKey, &rd.endKey, false) rd.startKey, rd.endKey = nil, nil return nil }
// StoreData writes the supplied time series data to the cockroach server. // Stored data will be sampled at the supplied resolution. func (db *DB) StoreData(r Resolution, data []tspb.TimeSeriesData) error { var kvs []roachpb.KeyValue // Process data collection: data is converted to internal format, and a key // is generated for each internal message. for _, d := range data { idatas, err := d.ToInternal(r.KeyDuration(), r.SampleDuration()) if err != nil { return err } for _, idata := range idatas { var value roachpb.Value if err := value.SetProto(&idata); err != nil { return err } kvs = append(kvs, roachpb.KeyValue{ Key: MakeDataKey(d.Name, d.Source, r, idata.StartTimestampNanos), Value: value, }) } } // Send the individual internal merge requests. b := client.Batch{} for _, kv := range kvs { b.AddRawRequest(&roachpb.MergeRequest{ Span: roachpb.Span{ Key: kv.Key, }, Value: kv.Value, }) } return db.db.Run(&b) }
// insertCPutFn is used by insertRow when conflicts should be respected. // logValue is used for pretty printing. func insertCPutFn(b *client.Batch, key *roachpb.Key, value *roachpb.Value) { // TODO(dan): We want do this V(2) log everywhere in sql. Consider making a // client.Batch wrapper instead of inlining it everywhere. if log.V(2) { log.InfofDepth(1, "CPut %s -> %s", *key, value.PrettyPrint()) } b.CPut(key, value, nil) }
// createDescriptor implements the DescriptorAccessor interface. func (p *planner) createDescriptor(plainKey sqlbase.DescriptorKey, descriptor sqlbase.DescriptorProto, ifNotExists bool) (bool, error) { idKey := plainKey.Key() // Check whether idKey exists. gr, err := p.txn.Get(idKey) if err != nil { return false, err } if gr.Exists() { if ifNotExists { // Noop. return false, nil } // Key exists, but we don't want it to: error out. return false, fmt.Errorf("%s %q already exists", descriptor.TypeName(), plainKey.Name()) } // Increment unique descriptor counter. if ir, err := p.txn.Inc(keys.DescIDGenerator, 1); err == nil { descriptor.SetID(sqlbase.ID(ir.ValueInt() - 1)) } else { return false, err } // TODO(pmattis): The error currently returned below is likely going to be // difficult to interpret. // // TODO(pmattis): Need to handle if-not-exists here as well. // // TODO(pmattis): This is writing the namespace and descriptor table entries, // but not going through the normal INSERT logic and not performing a precise // mimicry. In particular, we're only writing a single key per table, while // perfect mimicry would involve writing a sentinel key for each row as well. descKey := sqlbase.MakeDescMetadataKey(descriptor.GetID()) b := client.Batch{} descID := descriptor.GetID() descDesc := sqlbase.WrapDescriptor(descriptor) if log.V(2) { log.Infof("CPut %s -> %d", idKey, descID) log.Infof("CPut %s -> %s", descKey, descDesc) } b.CPut(idKey, descID, nil) b.CPut(descKey, descDesc, nil) p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, idKey, descID); err != nil { return err } return expectDescriptor(systemConfig, descKey, descDesc) }) return true, p.txn.Run(&b) }
// truncateTable truncates the data of a table. // It deletes a range of data for the table, which includes the PK and all // indexes. func truncateTable(tableDesc *sqlbase.TableDescriptor, txn *client.Txn) error { tablePrefix := keys.MakeTablePrefix(uint32(tableDesc.ID)) // Delete rows and indexes starting with the table's prefix. tableStartKey := roachpb.Key(tablePrefix) tableEndKey := tableStartKey.PrefixEnd() if log.V(2) { log.Infof("DelRange %s - %s", tableStartKey, tableEndKey) } b := client.Batch{} b.DelRange(tableStartKey, tableEndKey, false) return txn.Run(&b) }
// deleteIndexRow adds to the batch the kv operations necessary to delete a // table row from the given index. func (rd *rowDeleter) deleteIndexRow( ctx context.Context, b *client.Batch, idx *sqlbase.IndexDescriptor, values []parser.Datum, ) error { if err := rd.fks.checkAll(values); err != nil { return err } secondaryIndexEntry, err := sqlbase.EncodeSecondaryIndex( rd.helper.tableDesc, idx, rd.fetchColIDtoRowIndex, values) if err != nil { return err } if log.V(2) { log.Infof(ctx, "Del %s", secondaryIndexEntry.Key) } b.Del(secondaryIndexEntry.Key) return nil }
func convertBatchError(tableDesc *sqlbase.TableDescriptor, b *client.Batch) error { origPErr := b.MustPErr() if origPErr.Index == nil { return origPErr.GoError() } index := origPErr.Index.Index if index >= int32(len(b.Results)) { panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results)) } result := b.Results[index] var alloc sqlbase.DatumAlloc if _, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok { for _, row := range result.Rows { // TODO(dan): There's too much internal knowledge of the sql table // encoding here (and this callsite is the only reason // DecodeIndexKeyPrefix is exported). Refactor this bit out. indexID, key, err := sqlbase.DecodeIndexKeyPrefix(&alloc, tableDesc, row.Key) if err != nil { return err } index, err := tableDesc.FindIndexByID(indexID) if err != nil { return err } valTypes, err := sqlbase.MakeKeyVals(tableDesc, index.ColumnIDs) if err != nil { return err } dirs := make([]encoding.Direction, 0, len(index.ColumnIDs)) for _, dir := range index.ColumnDirections { convertedDir, err := dir.ToEncodingDirection() if err != nil { return err } dirs = append(dirs, convertedDir) } vals := make([]parser.Datum, len(valTypes)) if _, err := sqlbase.DecodeKeyVals(&alloc, valTypes, vals, dirs, key); err != nil { return err } return sqlbase.NewUniquenessConstraintViolationError(index, vals) } } return origPErr.GoError() }
func runDel(cmd *cobra.Command, args []string) { if len(args) == 0 { mustUsage(cmd) return } var b client.Batch for _, arg := range args { b.Del(unquoteArg(arg, true /* disallow system keys */)) } kvDB, stopper := makeDBClient() defer stopper.Stop() if err := kvDB.Run(&b); err != nil { panicf("delete failed: %s", err) } }
// RenameDatabase renames the database. // Privileges: security.RootUser user. // Notes: postgres requires superuser, db owner, or "CREATEDB". // mysql >= 5.1.23 does not allow database renames. func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) { if n.Name == "" || n.NewName == "" { return nil, errEmptyDatabaseName } if p.session.User != security.RootUser { return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser) } dbDesc, err := p.getDatabaseDesc(string(n.Name)) if err != nil { return nil, err } if dbDesc == nil { return nil, sqlbase.NewUndefinedDatabaseError(string(n.Name)) } if n.Name == n.NewName { // Noop. return &emptyNode{}, nil } // Now update the nameMetadataKey and the descriptor. descKey := sqlbase.MakeDescMetadataKey(dbDesc.GetID()) dbDesc.SetName(string(n.NewName)) if err := dbDesc.Validate(); err != nil { return nil, err } newKey := databaseKey{string(n.NewName)}.Key() oldKey := databaseKey{string(n.Name)}.Key() descID := dbDesc.GetID() descDesc := sqlbase.WrapDescriptor(dbDesc) b := client.Batch{} b.CPut(newKey, descID, nil) b.Put(descKey, descDesc) b.Del(oldKey) if err := p.txn.Run(&b); err != nil { if _, ok := err.(*roachpb.ConditionFailedError); ok { return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName)) } return nil, err } p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, newKey, descID); err != nil { return err } if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil { return err } return expectDeleted(systemConfig, oldKey) }) return &emptyNode{}, nil }
func runPut(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args)%2 == 1 { mustUsage(cmd) return } var b client.Batch for i := 0; i < len(args); i += 2 { b.Put( unquoteArg(args[i], true /* disallow system keys */), unquoteArg(args[i+1], false), ) } kvDB, stopper := makeDBClient() defer stopper.Stop() if err := kvDB.Run(&b); err != nil { panicf("put failed: %s", err) } }
// TestAuthentication tests authentication for the KV endpoint. func TestAuthentication(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() var b1 client.Batch b1.Put("a", "b") // Create a node user client and call Run() on it which lets us build our own // request, specifying the user. db1 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser) if err := db1.Run(&b1); err != nil { t.Fatal(err) } var b2 client.Batch b2.Put("c", "d") // Try again, but this time with certs for a non-node user (even the root // user has no KV permissions). db2 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.RootUser) if err := db2.Run(&b2); !testutils.IsError(err, "is not allowed") { t.Fatal(err) } }
// flush writes all dirty nodes and the tree to the transaction. func (tc *treeContext) flush(b *client.Batch) { if tc.dirty { b.Put(keys.RangeTreeRoot, tc.tree) } for key, cachedNode := range tc.nodes { if cachedNode.dirty { if cachedNode.node == nil { b.Del(keys.RangeTreeNodeKey(roachpb.RKey(key))) } else { b.Put(keys.RangeTreeNodeKey(roachpb.RKey(key)), cachedNode.node) } } } }
func delMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { b.Del(key) }
func putMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { b.Put(key, desc) }
// Execute the entire schema change in steps. startBackfillNotification is // called before the backfill starts; it can be nil. func (sc SchemaChanger) exec( startBackfillNotification func() error, oldNameNotInUseNotification func(), ) error { // Acquire lease. lease, err := sc.AcquireLease() if err != nil { return err } needRelease := true // Always try to release lease. defer func(l *sqlbase.TableDescriptor_SchemaChangeLease) { // If the schema changer deleted the descriptor, there's no longer a lease to be // released. if !needRelease { return } if err := sc.ReleaseLease(*l); err != nil { log.Warning(err) } }(&lease) // Increment the version and unset tableDescriptor.UpVersion. desc, err := sc.MaybeIncrementVersion() if err != nil { return err } if desc.GetTable().Deleted() { lease, err = sc.ExtendLease(lease) if err != nil { return err } // Wait for everybody to see the version with the deleted bit set. When // this returns, nobody has any leases on the table, nor can get new leases, // so the table will no longer be modified. if err := sc.waitToUpdateLeases(); err != nil { return err } // Truncate the table and delete the descriptor. if err := sc.truncateAndDropTable(&lease, desc.GetTable()); err != nil { return err } needRelease = false return nil } if desc.GetTable().Renamed() { lease, err = sc.ExtendLease(lease) if err != nil { return err } // Wait for everyone to see the version with the new name. When this // returns, no new transactions will be using the old name for the table, so // the old name can now be re-used (by CREATE). if err := sc.waitToUpdateLeases(); err != nil { return err } if oldNameNotInUseNotification != nil { oldNameNotInUseNotification() } // Free up the old name(s). err := sc.db.Txn(func(txn *client.Txn) error { b := client.Batch{} for _, renameDetails := range desc.GetTable().Renames { tbKey := tableKey{ sqlbase.ID(renameDetails.OldParentID), renameDetails.OldName}.Key() b.Del(tbKey) } if err := txn.Run(&b); err != nil { return err } return nil }) if err != nil { return err } // Clean up - clear the descriptor's state. _, err = sc.leaseMgr.Publish(sc.tableID, func(desc *sqlbase.TableDescriptor) error { desc.Renames = nil return nil }, nil) if err != nil { return err } } // Wait for the schema change to propagate to all nodes after this function // returns, so that the new schema is live everywhere. This is not needed for // correctness but is done to make the UI experience/tests predictable. defer func() { if err := sc.waitToUpdateLeases(); err != nil { log.Warning(err) } }() if sc.mutationID == sqlbase.InvalidMutationID { // Nothing more to do. return nil } // Another transaction might set the up_version bit again, // but we're no longer responsible for taking care of that. // Run through mutation state machine and backfill. err = sc.runStateMachineAndBackfill(&lease, startBackfillNotification) // Purge the mutations if the application of the mutations failed due to // an integrity constraint violation. All other errors are transient // errors that are resolved by retrying the backfill. if sqlbase.IsIntegrityConstraintError(err) { log.Warningf("reversing schema change due to irrecoverable error: %s", err) if errReverse := sc.reverseMutations(err); errReverse != nil { // Although the backfill did hit an integrity constraint violation // and made a decision to reverse the mutations, // reverseMutations() failed. If exec() is called again the entire // schema change will be retried. return errReverse } // After this point the schema change has been reversed and any retry // of the schema change will act upon the reversed schema change. if errPurge := sc.runStateMachineAndBackfill( &lease, startBackfillNotification, ); errPurge != nil { // Don't return this error because we do want the caller to know // that an integrity constraint was violated with the original // schema change. The reversed schema change will be // retried via the async schema change manager. log.Warningf("error purging mutation: %s, after error: %s", errPurge, err) } } return err }
// updateRow adds to the batch the kv operations necessary to update a table row // with the given values. // // The row corresponding to oldValues is updated with the ones in updateValues. // Note that updateValues only contains the ones that are changing. // // The return value is only good until the next call to UpdateRow. func (ru *rowUpdater) updateRow( b *client.Batch, oldValues []parser.Datum, updateValues []parser.Datum, ) ([]parser.Datum, error) { if len(oldValues) != len(ru.fetchCols) { return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols)) } if len(updateValues) != len(ru.updateCols) { return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols)) } primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues) if err != nil { return nil, err } // The secondary index entries returned by rowHelper.encodeIndexes are only // valid until the next call to encodeIndexes. We need to copy them so that // we can compare against the new secondary index entries. secondaryIndexEntries = append(ru.indexEntriesBuf[:0], secondaryIndexEntries...) ru.indexEntriesBuf = secondaryIndexEntries // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range updateValues { if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil { return nil, err } } // Update the row values. copy(ru.newValues, oldValues) for i, updateCol := range ru.updateCols { ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i] } rowPrimaryKeyChanged := false var newSecondaryIndexEntries []sqlbase.IndexEntry if ru.primaryKeyColChange { var newPrimaryIndexKey []byte newPrimaryIndexKey, newSecondaryIndexEntries, err = ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues) if err != nil { return nil, err } rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey) } else { newSecondaryIndexEntries, err = ru.helper.encodeSecondaryIndexes(ru.fetchColIDtoRowIndex, ru.newValues) if err != nil { return nil, err } } if rowPrimaryKeyChanged { if err := ru.fks.checkIdx(ru.helper.tableDesc.PrimaryIndex.ID, oldValues, ru.newValues); err != nil { return nil, err } for i := range newSecondaryIndexEntries { if !bytes.Equal(newSecondaryIndexEntries[i].Key, secondaryIndexEntries[i].Key) { if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil { return nil, err } } } if err := ru.rd.deleteRow(b, oldValues); err != nil { return nil, err } if err := ru.ri.insertRow(b, ru.newValues, false); err != nil { return nil, err } return ru.newValues, nil } // Add the new values. // TODO(dan): This has gotten very similar to the loop in insertRow, see if // they can be DRY'd. Ideally, this would also work for // truncateAndBackfillColumnsChunk, which is currently abusing rowUdpdater. for i, family := range ru.helper.tableDesc.Families { update := false for _, colID := range family.ColumnIDs { if _, ok := ru.updateColIDtoRowIndex[colID]; ok { update = true break } } if !update { continue } if i > 0 { // HACK: MakeFamilyKey appends to its argument, so on every loop iteration // after the first, trim primaryIndexKey so nothing gets overwritten. // TODO(dan): Instead of this, use something like engine.ChunkAllocator. primaryIndexKey = primaryIndexKey[:len(primaryIndexKey):len(primaryIndexKey)] } if len(family.ColumnIDs) == 1 && family.ColumnIDs[0] == family.DefaultColumnID { // Storage optimization to store DefaultColumnID directly as a value. Also // backwards compatible with the original BaseFormatVersion. idx, ok := ru.updateColIDtoRowIndex[family.DefaultColumnID] if !ok { continue } ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) if log.V(2) { log.Infof("Put %s -> %v", ru.key, ru.marshalled[idx].PrettyPrint()) } b.Put(&ru.key, &ru.marshalled[idx]) ru.key = nil continue } ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) ru.valueBuf = ru.valueBuf[:0] var lastColID sqlbase.ColumnID familySortedColumnIDs, ok := ru.helper.sortedColumnFamily(family.ID) if !ok { panic("invalid family sorted column id map") } for _, colID := range familySortedColumnIDs { if ru.helper.columnInPK(colID) { if family.ID != 0 { return nil, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family.ID) } // Skip primary key columns as their values are encoded in the key of // each family. Family 0 is guaranteed to exist and acts as a sentinel. continue } idx, ok := ru.fetchColIDtoRowIndex[colID] if !ok { return nil, errors.Errorf("column %d was expected to be fetched, but wasn't", colID) } col := ru.fetchCols[idx] if ru.newValues[idx].Compare(parser.DNull) == 0 { continue } if lastColID > col.ID { panic(fmt.Errorf("cannot write column id %d after %d", col.ID, lastColID)) } colIDDiff := col.ID - lastColID lastColID = col.ID ru.valueBuf, err = sqlbase.EncodeTableValue(ru.valueBuf, colIDDiff, ru.newValues[idx]) if err != nil { return nil, err } } if family.ID != 0 && len(ru.valueBuf) == 0 { // The family might have already existed but every column in it is being // set to NULL, so delete it. if log.V(2) { log.Infof("Del %s", ru.key) } b.Del(&ru.key) } else { ru.value.SetTuple(ru.valueBuf) if log.V(2) { log.Infof("Put %s -> %v", ru.key, ru.value.PrettyPrint()) } b.Put(&ru.key, &ru.value) } ru.key = nil } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key) if secondaryKeyChanged { if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil { return nil, err } if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.Key) } b.Del(secondaryIndexEntry.Key) // Do not update Indexes in the DELETE_ONLY state. if _, ok := ru.deleteOnlyIndex[i]; !ok { if log.V(2) { log.Infof("CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value.PrettyPrint()) } b.CPut(newSecondaryIndexEntry.Key, &newSecondaryIndexEntry.Value, nil) } } } return ru.newValues, nil }
// insertPutFn is used by insertRow when conflicts should be ignored. // logValue is used for pretty printing. func insertPutFn(b *client.Batch, key *roachpb.Key, value *roachpb.Value) { if log.V(2) { log.InfofDepth(1, "Put %s -> %s", *key, value.PrettyPrint()) } b.Put(key, value) }
// Query returns datapoints for the named time series during the supplied time // span. Data is returned as a series of consecutive data points. // // Data is queried only at the Resolution supplied: if data for the named time // series is not stored at the given resolution, an empty result will be // returned. // // All data stored on the server is downsampled to some degree; the data points // returned represent the average value within a sample period. Each datapoint's // timestamp falls in the middle of the sample period it represents. // // If data for the named time series was collected from multiple sources, each // returned datapoint will represent the sum of datapoints from all sources at // the same time. The returned string slices contains a list of all sources for // the metric which were aggregated to produce the result. func (db *DB) Query(query tspb.Query, r Resolution, startNanos, endNanos int64) ([]tspb.TimeSeriesDatapoint, []string, error) { // Normalize startNanos and endNanos the nearest SampleDuration boundary. startNanos -= startNanos % r.SampleDuration() var rows []client.KeyValue if len(query.Sources) == 0 { // Based on the supplied timestamps and resolution, construct start and end // keys for a scan that will return every key with data relevant to the // query. startKey := MakeDataKey(query.Name, "" /* source */, r, startNanos) endKey := MakeDataKey(query.Name, "" /* source */, r, endNanos).PrefixEnd() var b client.Batch b.Header.ReadConsistency = roachpb.INCONSISTENT b.Scan(startKey, endKey, 0) if err := db.db.Run(&b); err != nil { return nil, nil, err } rows = b.Results[0].Rows } else { b := db.db.NewBatch() b.Header.ReadConsistency = roachpb.INCONSISTENT // Iterate over all key timestamps which may contain data for the given // sources, based on the given start/end time and the resolution. kd := r.KeyDuration() startKeyNanos := startNanos - (startNanos % kd) endKeyNanos := endNanos - (endNanos % kd) for currentTimestamp := startKeyNanos; currentTimestamp <= endKeyNanos; currentTimestamp += kd { for _, source := range query.Sources { key := MakeDataKey(query.Name, source, r, currentTimestamp) b.Get(key) } } err := db.db.Run(b) if err != nil { return nil, nil, err } for _, result := range b.Results { row := result.Rows[0] if row.Value == nil { continue } rows = append(rows, row) } } // Convert the queried source data into a set of data spans, one for each // source. sourceSpans, err := makeDataSpans(rows, startNanos) if err != nil { return nil, nil, err } // Compute a downsample function which will be used to return values from // each source for each sample period. downsampler, err := getDownsampleFunction(query.GetDownsampler()) if err != nil { return nil, nil, err } // If we are returning a derivative, iteration needs to start at offset -1 // (in order to correctly compute the rate of change at offset 0). var startOffset int32 isDerivative := query.GetDerivative() != tspb.TimeSeriesQueryDerivative_NONE if isDerivative { startOffset = -1 } // Create an interpolatingIterator for each dataSpan, adding each iterator // into a unionIterator collection. This is also where we compute a list of // all sources with data present in the query. sources := make([]string, 0, len(sourceSpans)) iters := make(unionIterator, 0, len(sourceSpans)) for name, span := range sourceSpans { sources = append(sources, name) iters = append(iters, span.newIterator(startOffset, downsampler)) } // Choose an aggregation function to use when taking values from the // unionIterator. var valueFn func() float64 switch query.GetSourceAggregator() { case tspb.TimeSeriesQueryAggregator_SUM: valueFn = iters.sum case tspb.TimeSeriesQueryAggregator_AVG: valueFn = iters.avg case tspb.TimeSeriesQueryAggregator_MAX: valueFn = iters.max case tspb.TimeSeriesQueryAggregator_MIN: valueFn = iters.min } // Iterate over all requested offsets, recording a value from the // unionIterator at each offset encountered. If the query is requesting a // derivative, a rate of change is recorded instead of the actual values. iters.init() var last tspb.TimeSeriesDatapoint if isDerivative { last = tspb.TimeSeriesDatapoint{ TimestampNanos: iters.timestamp(), Value: valueFn(), } // For derivatives, the iterator was initialized at offset -1 in order // to calculate the rate of change at offset zero. However, in some // cases (such as the very first value recorded) offset -1 is not // available. In this case, we treat the rate-of-change at the first // offset as zero. if iters.offset() < 0 { iters.advance() } } var responseData []tspb.TimeSeriesDatapoint for iters.isValid() && iters.timestamp() <= endNanos { current := tspb.TimeSeriesDatapoint{ TimestampNanos: iters.timestamp(), Value: valueFn(), } response := current if isDerivative { dTime := (current.TimestampNanos - last.TimestampNanos) / time.Second.Nanoseconds() if dTime == 0 { response.Value = 0 } else { response.Value = (current.Value - last.Value) / float64(dTime) } if response.Value < 0 && query.GetDerivative() == tspb.TimeSeriesQueryDerivative_NON_NEGATIVE_DERIVATIVE { response.Value = 0 } } responseData = append(responseData, response) last = current iters.advance() } return responseData, sources, nil }
// RenameTable renames the table. // Privileges: DROP on source table, CREATE on destination database. // Notes: postgres requires the table owner. // mysql requires ALTER, DROP on the original table, and CREATE, INSERT // on the new table (and does not copy privileges over). func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) { if err := n.NewName.NormalizeTableName(p.session.Database); err != nil { return nil, err } if n.NewName.Table() == "" { return nil, errEmptyTableName } if err := n.Name.NormalizeTableName(p.session.Database); err != nil { return nil, err } dbDesc, err := p.getDatabaseDesc(n.Name.Database()) if err != nil { return nil, err } if dbDesc == nil { return nil, sqlbase.NewUndefinedDatabaseError(n.Name.Database()) } tbKey := tableKey{dbDesc.ID, n.Name.Table()}.Key() // Check if table exists. gr, err := p.txn.Get(tbKey) if err != nil { return nil, err } if !gr.Exists() { if n.IfExists { // Noop. return &emptyNode{}, nil } // Key does not exist, but we want it to: error out. return nil, fmt.Errorf("table %q does not exist", n.Name.Table()) } targetDbDesc, err := p.getDatabaseDesc(n.NewName.Database()) if err != nil { return nil, err } if targetDbDesc == nil { return nil, sqlbase.NewUndefinedDatabaseError(n.NewName.Database()) } if err := p.checkPrivilege(targetDbDesc, privilege.CREATE); err != nil { return nil, err } if n.Name.Database() == n.NewName.Database() && n.Name.Table() == n.NewName.Table() { // Noop. return &emptyNode{}, nil } tableDesc, err := p.getTableDesc(n.Name) if err != nil { return nil, err } if tableDesc == nil || tableDesc.State != sqlbase.TableDescriptor_PUBLIC { return nil, sqlbase.NewUndefinedTableError(n.Name.String()) } if err := p.checkPrivilege(tableDesc, privilege.DROP); err != nil { return nil, err } tableDesc.SetName(n.NewName.Table()) tableDesc.ParentID = targetDbDesc.ID descKey := sqlbase.MakeDescMetadataKey(tableDesc.GetID()) newTbKey := tableKey{targetDbDesc.ID, n.NewName.Table()}.Key() if err := tableDesc.Validate(); err != nil { return nil, err } descID := tableDesc.GetID() descDesc := sqlbase.WrapDescriptor(tableDesc) if err := tableDesc.SetUpVersion(); err != nil { return nil, err } renameDetails := sqlbase.TableDescriptor_RenameInfo{ OldParentID: uint32(dbDesc.ID), OldName: n.Name.Table()} tableDesc.Renames = append(tableDesc.Renames, renameDetails) if err := p.writeTableDesc(tableDesc); err != nil { return nil, err } // We update the descriptor to the new name, but also leave the mapping of the // old name to the id, so that the name is not reused until the schema changer // has made sure it's not in use any more. b := client.Batch{} b.Put(descKey, descDesc) b.CPut(newTbKey, descID, nil) if err := p.txn.Run(&b); err != nil { if _, ok := err.(*roachpb.ConditionFailedError); ok { return nil, fmt.Errorf("table name %q already exists", n.NewName.Table()) } return nil, err } p.notifySchemaChange(tableDesc.ID, sqlbase.InvalidMutationID) p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, newTbKey, descID); err != nil { return err } if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil { return err } return nil }) return &emptyNode{}, nil }