func (sc *SchemaChanger) maybeWriteResumeSpan( txn *client.Txn, version sqlbase.DescriptorVersion, resume roachpb.Span, mutationIdx int, lastCheckpoint *time.Time, ) error { checkpointInterval := checkpointInterval if sc.testingKnobs.WriteCheckpointInterval > 0 { checkpointInterval = sc.testingKnobs.WriteCheckpointInterval } if timeutil.Since(*lastCheckpoint) < checkpointInterval { return nil } tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID) if err != nil { return err } if tableDesc.Version != version { return errVersionMismatch } tableDesc.Mutations[mutationIdx].ResumeSpan = resume txn.SetSystemConfigTrigger() if err := txn.Put(sqlbase.MakeDescMetadataKey(tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc)); err != nil { return err } *lastCheckpoint = timeutil.Now() return nil }
// GenerateUniqueDescID returns the next available Descriptor ID and increments // the counter. func GenerateUniqueDescID(txn *client.Txn) (sqlbase.ID, error) { // Increment unique descriptor counter. ir, err := txn.Inc(keys.DescIDGenerator, 1) if err != nil { return 0, err } return sqlbase.ID(ir.ValueInt() - 1), nil }
func getDescriptorID(txn *client.Txn, key sqlbase.DescriptorKey) (sqlbase.ID, error) { // TODO(dan): Share this with (*planner).getDescriptor. idValue, err := txn.Get(key.Key()) if err != nil { return 0, err } if !idValue.Exists() { return 0, errors.Errorf("no descriptor for key: %s", key) } return sqlbase.ID(idValue.ValueInt()), nil }
// writeCmd sums values from the env (and possibly numeric constants) // and writes the value to the db. "c.endKey" here needs to be parsed // in the context of this command, which is a "+"-separated list of // keys from the env or numeric constants to sum. func writeCmd(c *cmd, txn *client.Txn, t *testing.T) error { sum := int64(0) for _, sp := range strings.Split(c.endKey, "+") { if constant, err := strconv.Atoi(sp); err != nil { sum += c.env[sp] } else { sum += int64(constant) } } err := txn.Put(c.getKey(), sum) c.debug = fmt.Sprintf("[%d]", sum) return err }
// readCmd reads a value from the db and stores it in the env. func readCmd(c *cmd, txn *client.Txn, t *testing.T) error { r, err := txn.Get(c.getKey()) if err != nil { return err } var value int64 if r.Value != nil { value = r.ValueInt() } c.env[c.key] = value c.debug = fmt.Sprintf("[%d]", value) return nil }
// incCmd adds one to the value of c.key in the env (as determined by // a previous read or write, or else assumed to be zero) and writes it // to the db. func incCmd(c *cmd, txn *client.Txn, t *testing.T) error { val, ok := c.env[c.key] if !ok { panic(fmt.Sprintf("can't increment key %q; not yet read", c.key)) } r := val + 1 if err := txn.Put(c.getKey(), r); err != nil { return err } c.env[c.key] = r c.debug = fmt.Sprintf("[%d]", r) return nil }
// resolveName resolves a table name to a descriptor ID by looking in the // database. If the mapping is not found, sqlbase.ErrDescriptorNotFound is returned. func (m *LeaseManager) resolveName( txn *client.Txn, dbID sqlbase.ID, tableName string, ) (sqlbase.ID, error) { nameKey := tableKey{dbID, tableName} key := nameKey.Key() gr, err := txn.Get(key) if err != nil { return 0, err } if !gr.Exists() { return 0, sqlbase.ErrDescriptorNotFound } return sqlbase.ID(gr.ValueInt()), nil }
// AllRangeDescriptors fetches all meta2 RangeDescriptor using the given txn. func AllRangeDescriptors(txn *client.Txn) ([]roachpb.RangeDescriptor, error) { // TODO(dan): Iterate with some batch size. rows, err := txn.Scan(keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, errors.Wrap(err, "unable to scan range descriptors") } rangeDescs := make([]roachpb.RangeDescriptor, len(rows)) for i, row := range rows { if err := row.ValueProto(&rangeDescs[i]); err != nil { return nil, errors.Wrapf(err, "%s: unable to unmarshal range descriptor", row.Key) } } return rangeDescs, nil }
// scanCmd reads the values from the db from [key, endKey). func scanCmd(c *cmd, txn *client.Txn, t *testing.T) error { rows, err := txn.Scan(c.getKey(), c.getEndKey(), 0) if err != nil { return err } var vals []string keyPrefix := []byte(fmt.Sprintf("%d.", c.historyIdx)) for _, kv := range rows { key := bytes.TrimPrefix(kv.Key, keyPrefix) c.env[string(key)] = kv.ValueInt() vals = append(vals, fmt.Sprintf("%d", kv.ValueInt())) } c.debug = fmt.Sprintf("[%s]", strings.Join(vals, " ")) return nil }
func restoreTableDesc( ctx context.Context, txn *client.Txn, database sqlbase.DatabaseDescriptor, table sqlbase.TableDescriptor, ) error { // Run getDescriptorID again to make sure the database hasn't been dropped // while we were importing. var err error if table.ParentID, err = getDescriptorID(txn, tableKey{name: database.Name}); err != nil { return err } tableIDKey := tableKey{parentID: table.ParentID, name: table.Name}.Key() tableDescKey := sqlbase.MakeDescMetadataKey(table.ID) // Check for an existing table. var existingDesc sqlbase.Descriptor existingIDKV, err := txn.Get(tableIDKey) if err != nil { return err } if existingIDKV.Value != nil { existingID, err := existingIDKV.Value.GetInt() if err != nil { return err } existingDescKV, err := txn.Get(sqlbase.MakeDescMetadataKey(sqlbase.ID(existingID))) if err != nil { return err } if err := existingDescKV.Value.GetProto(&existingDesc); err != nil { return err } } // Write the new descriptors. First the ID -> TableDescriptor for the new // table, then flip (or initialize) the name -> ID entry so any new queries // will use the new one. If there was an existing table, it can now be // cleaned up. b := txn.NewBatch() b.CPut(tableDescKey, sqlbase.WrapDescriptor(&table), nil) if existingTable := existingDesc.GetTable(); existingTable == nil { b.CPut(tableIDKey, table.ID, nil) } else { existingIDKV.Value.ClearChecksum() b.CPut(tableIDKey, table.ID, existingIDKV.Value) // TODO(dan): This doesn't work for interleaved tables. Fix it when we // fix the empty range interleaved table TODO below. existingDataPrefix := roachpb.Key(keys.MakeTablePrefix(uint32(existingTable.ID))) b.DelRange(existingDataPrefix, existingDataPrefix.PrefixEnd(), false) zoneKey, _, descKey := GetKeysForTableDescriptor(existingTable) // Delete the desc and zone entries. Leave the name because the new // table is using it. b.Del(descKey) b.Del(zoneKey) } return txn.Run(b) }
func TestRemoveLeaseIfExpiring(t *testing.T) { defer leaktest.AfterTest(t)() p := planner{session: &Session{context: context.Background()}} mc := hlc.NewManualClock(123) p.leaseMgr = &LeaseManager{LeaseStore: LeaseStore{clock: hlc.NewClock(mc.UnixNano, time.Nanosecond)}} p.leases = make([]*LeaseState, 0) txn := client.Txn{Context: context.Background()} p.setTxn(&txn) if p.removeLeaseIfExpiring(nil) { t.Error("expected false with nil input") } // Add a lease to the planner. d := int64(LeaseDuration) l1 := &LeaseState{expiration: parser.DTimestamp{Time: time.Unix(0, mc.UnixNano()+d+1)}} p.leases = append(p.leases, l1) et := hlc.Timestamp{WallTime: l1.Expiration().UnixNano()} txn.UpdateDeadlineMaybe(et) if p.removeLeaseIfExpiring(l1) { t.Error("expected false with a non-expiring lease") } if !p.txn.GetDeadline().Equal(et) { t.Errorf("expected deadline %s but got %s", et, p.txn.GetDeadline()) } // Advance the clock so that l1 will be expired. mc.Increment(d + 1) // Add another lease. l2 := &LeaseState{expiration: parser.DTimestamp{Time: time.Unix(0, mc.UnixNano()+d+1)}} p.leases = append(p.leases, l2) if !p.removeLeaseIfExpiring(l1) { t.Error("expected true with an expiring lease") } et = hlc.Timestamp{WallTime: l2.Expiration().UnixNano()} txn.UpdateDeadlineMaybe(et) if !(len(p.leases) == 1 && p.leases[0] == l2) { t.Errorf("expected leases to contain %s but has %s", l2, p.leases) } if !p.txn.GetDeadline().Equal(et) { t.Errorf("expected deadline %s, but got %s", et, p.txn.GetDeadline()) } }
func allSQLDescriptors(txn *client.Txn) ([]sqlbase.Descriptor, error) { startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID)) endKey := startKey.PrefixEnd() // TODO(dan): Iterate with some batch size. rows, err := txn.Scan(startKey, endKey, 0) if err != nil { return nil, errors.Wrap(err, "unable to scan SQL descriptors") } sqlDescs := make([]sqlbase.Descriptor, len(rows)) for i, row := range rows { if err := row.ValueProto(&sqlDescs[i]); err != nil { return nil, errors.Wrapf(err, "%s: unable to unmarshal SQL descriptor", row.Key) } } return sqlDescs, nil }
func getDescriptor( txn *client.Txn, plainKey sqlbase.DescriptorKey, descriptor sqlbase.DescriptorProto, ) (bool, error) { gr, err := txn.Get(plainKey.Key()) if err != nil { return false, err } if !gr.Exists() { return false, nil } descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) desc := &sqlbase.Descriptor{} if err := txn.GetProto(descKey, desc); err != nil { return false, err } switch t := descriptor.(type) { case *sqlbase.TableDescriptor: table := desc.GetTable() if table == nil { return false, errors.Errorf("%q is not a table", plainKey.Name()) } table.MaybeUpgradeFormatVersion() // TODO(dan): Write the upgraded TableDescriptor back to kv. This will break // the ability to use a previous version of cockroach with the on-disk data, // but it's worth it to avoid having to do the upgrade every time the // descriptor is fetched. Our current test for this enforces compatibility // backward and forward, so that'll have to be extended before this is done. if err := table.Validate(txn); err != nil { return false, err } *t = *table case *sqlbase.DatabaseDescriptor: database := desc.GetDatabase() if database == nil { return false, errors.Errorf("%q is not a database", plainKey.Name()) } if err := database.Validate(); err != nil { return false, err } *t = *database } return true, nil }
// InsertEventRecord inserts a single event into the event log as part of the // provided transaction. func (ev EventLogger) InsertEventRecord( txn *client.Txn, eventType EventLogType, targetID, reportingID int32, info interface{}, ) error { // Record event record insertion in local log output. txn.AddCommitTrigger(func() { log.Infof(txn.Context, "Event: %q, target: %d, info: %+v", eventType, targetID, info) }) const insertEventTableStmt = ` INSERT INTO system.eventlog ( timestamp, eventType, targetID, reportingID, info ) VALUES( $1, $2, $3, $4, $5 ) ` args := []interface{}{ ev.selectEventTimestamp(txn.Proto.Timestamp), eventType, targetID, reportingID, nil, // info } if info != nil { infoBytes, err := json.Marshal(info) if err != nil { return err } args[4] = string(infoBytes) } rows, err := ev.ExecuteStatementInTransaction("log-event", txn, insertEventTableStmt, args...) if err != nil { return err } if rows != 1 { return errors.Errorf("%d rows affected by log insertion; expected exactly one row affected.", rows) } return nil }
// Ingest loads some data in an sstable into an empty range. Only the keys // between startKey and endKey are loaded. If newTableID is non-zero, every // row's key is rewritten to be for that table. func Ingest( ctx context.Context, txn *client.Txn, path string, checksum uint32, startKey, endKey roachpb.Key, newTableID sqlbase.ID, ) error { // TODO(mjibson): An appropriate value for this should be determined. The // current value was guessed at but appears to work well. const batchSize = 10000 // TODO(dan): Check if the range being ingested into is empty. If newTableID // is non-zero, it'll have to be derived from startKey and endKey. f, err := os.Open(path) if err != nil { return err } defer f.Close() crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) if _, err := io.Copy(crc, f); err != nil { return nil } if c := crc.Sum32(); c != checksum { return errors.Errorf("%s: checksum mismatch got %d expected %d", path, c, checksum) } sst, err := engine.MakeRocksDBSstFileReader() if err != nil { return err } defer sst.Close() if err := sst.AddFile(path); err != nil { return err } b := txn.NewBatch() var v roachpb.Value count := 0 ingestFunc := func(kv engine.MVCCKeyValue) (bool, error) { v = roachpb.Value{RawBytes: kv.Value} v.ClearChecksum() if log.V(3) { log.Infof(ctx, "Put %s %s\n", kv.Key.Key, v.PrettyPrint()) } b.Put(kv.Key.Key, &v) count++ if count > batchSize { if err := txn.Run(b); err != nil { return true, err } b = txn.NewBatch() count = 0 } return false, nil } if newTableID != 0 { // MakeRekeyMVCCKeyValFunc modifies the keys, but this is safe because // the one we get back from rocksDBIterator.Key is a copy (not a // reference to the mmaped file.) ingestFunc = MakeRekeyMVCCKeyValFunc(newTableID, ingestFunc) } startKeyMVCC, endKeyMVCC := engine.MVCCKey{Key: startKey}, engine.MVCCKey{Key: endKey} if err := sst.Iterate(startKeyMVCC, endKeyMVCC, ingestFunc); err != nil { return err } return txn.Run(b) }
func (ti *tableInserter) init(txn *client.Txn) error { ti.txn = txn ti.b = txn.NewBatch() return nil }
// commitCmd commits the transaction. func commitCmd(c *cmd, txn *client.Txn, t *testing.T) error { return txn.Commit() }
// deleteRngCmd deletes the range of values from the db from [key, endKey). func deleteRngCmd(c *cmd, txn *client.Txn, t *testing.T) error { return txn.DelRange(c.getKey(), c.getEndKey()) }
// deleteCmd deletes the value at the given key from the db. func deleteCmd(c *cmd, txn *client.Txn, t *testing.T) error { return txn.Del(c.getKey()) }
func (tu *tableUpdater) init(txn *client.Txn) error { tu.txn = txn tu.b = txn.NewBatch() return nil }
// setTxnTimestamps sets the transaction's proto timestamps and deadline // to ts. This is for use with AS OF queries, and should be called in the // retry block (except in the case of prepare which doesn't use retry). The // deadline-checking code checks that the `Timestamp` field of the proto // hasn't exceeded the deadline. Since we set the Timestamp field each retry, // it won't ever exceed the deadline, and thus setting the deadline here is // not strictly needed. However, it doesn't do anything incorrect and it will // possibly find problems if things change in the future, so it is left in. func setTxnTimestamps(txn *client.Txn, ts hlc.Timestamp) { txn.Proto.Timestamp = ts txn.Proto.OrigTimestamp = ts txn.Proto.MaxTimestamp = ts txn.UpdateDeadlineMaybe(ts) }
func (td *tableDeleter) init(txn *client.Txn) error { td.txn = txn td.b = txn.NewBatch() return nil }