Ejemplo n.º 1
0
func BenchmarkClusterRestore(b *testing.B) {
	defer tracing.Disable()()

	// TODO(dan): count=10000 has some issues replicating. Investigate.
	for _, numAccounts := range []int{10, 100, 1000} {
		b.Run(strconv.Itoa(numAccounts), func(b *testing.B) {
			ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(b, numAccounts)
			defer cleanupFn()

			// TODO(dan): Once mjibson's sql -> kv function is committed, use it
			// here on the output of bankDataInsert to generate the backup data
			// instead of this call.
			desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
			if err != nil {
				b.Fatal(err)
			}
			b.SetBytes(desc.DataSize)

			rebalanceLeases(b, tc)

			b.ResetTimer()
			table := parser.TableName{DatabaseName: "bench", TableName: "bank"}
			for i := 0; i < b.N; i++ {
				if _, err := sql.Restore(ctx, *kvDB, dir, table); err != nil {
					b.Fatal(err)
				}
			}
		})
	}
}
Ejemplo n.º 2
0
func TestBackupRestoreOnce(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// TODO(dan): Actually invalidate the descriptor cache and delete this line.
	defer sql.TestDisableTableLeases()()
	const numAccounts = 1000

	ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(t, numAccounts)
	defer cleanupFn()

	{
		desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
		if err != nil {
			t.Fatal(err)
		}
		approxDataSize := int64(backupRestoreRowPayloadSize) * numAccounts
		if max := approxDataSize * 2; desc.DataSize < approxDataSize || desc.DataSize > 2*max {
			t.Errorf("expected data size in [%d,%d] but was %d", approxDataSize, max, desc.DataSize)
		}
	}

	// Start a new cluster to restore into.
	{
		tcRestore := testcluster.StartTestCluster(t, backupRestoreClusterSize, base.TestClusterArgs{})
		defer tcRestore.Stopper().Stop()
		sqlDBRestore := sqlutils.MakeSQLRunner(t, tcRestore.Conns[0])
		kvDBRestore := tcRestore.Server(0).KVClient().(*client.DB)

		// Restore assumes the database exists.
		sqlDBRestore.Exec(bankCreateDatabase)

		table := parser.TableName{DatabaseName: "bench", TableName: "bank"}
		if _, err := sql.Restore(ctx, *kvDBRestore, dir, table); err != nil {
			t.Fatal(err)
		}

		var rowCount int
		sqlDBRestore.QueryRow(`SELECT COUNT(*) FROM bench.bank`).Scan(&rowCount)
		if rowCount != numAccounts {
			t.Fatalf("expected %d rows but found %d", numAccounts, rowCount)
		}
	}
}
Ejemplo n.º 3
0
func BenchmarkClusterBackup(b *testing.B) {
	defer tracing.Disable()()

	for _, numAccounts := range []int{10, 100, 1000, 10000} {
		b.Run(strconv.Itoa(numAccounts), func(b *testing.B) {
			ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(b, numAccounts)
			defer cleanupFn()
			rebalanceLeases(b, tc)

			b.ResetTimer()
			for i := 0; i < b.N; i++ {
				desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
				if err != nil {
					b.Fatal(err)
				}
				b.SetBytes(desc.DataSize)
			}
		})
	}
}
Ejemplo n.º 4
0
func runBackup(cmd *cobra.Command, args []string) error {
	if len(args) != 1 {
		return errors.New("output basepath argument is required")
	}
	base := args[0]

	ctx := context.Background()
	kvDB, stopper, err := makeDBClient()
	if err != nil {
		return err
	}
	defer stopper.Stop()

	desc, err := sql.Backup(ctx, *kvDB, base, hlc.Timestamp{WallTime: hlc.UnixNano()})
	if err != nil {
		return err
	}

	fmt.Printf("Backed up %d data bytes in %d ranges to %s\n", desc.DataSize, len(desc.Ranges), base)
	return nil
}
Ejemplo n.º 5
0
func TestBackupRestoreBank(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// TODO(dan): Actually invalidate the descriptor cache and delete this line.
	defer sql.TestDisableTableLeases()()

	const numAccounts = 10
	const backupRestoreIterations = 10

	ctx, baseDir, tc, kvDB, sqlDB, cleanupFn := backupRestoreTestSetup(t, numAccounts)
	defer cleanupFn()

	tc.Stopper().RunWorker(func() {
		// Use a different sql gateway to make sure leasing is right.
		startBankTransfers(t, tc.Stopper(), tc.Conns[len(tc.Conns)-1], numAccounts)
	})

	// Loop continually doing backup and restores while the bank transfers are
	// running in a goroutine. After each iteration, check the invariant that
	// all balances sum to zero. Make sure the data changes a bit between each
	// backup and restore as well as after the restore before checking the
	// invariant by checking the sum of squares of balances, which is chosen to
	// be likely to change if any balances change.
	var squaresSum int64
	table := parser.TableName{DatabaseName: "bench", TableName: "bank"}
	for i := 0; i < backupRestoreIterations; i++ {
		dir := filepath.Join(baseDir, strconv.Itoa(i))

		_, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now())
		if err != nil {
			t.Fatal(err)
		}

		var newSquaresSum int64
		util.SucceedsSoon(t, func() error {
			sqlDB.QueryRow(`SELECT SUM(balance*balance) FROM bench.bank`).Scan(&newSquaresSum)
			if squaresSum == newSquaresSum {
				return errors.Errorf("squared deviation didn't change, still %d", newSquaresSum)
			}
			return nil
		})
		squaresSum = newSquaresSum

		if _, err := sql.Restore(ctx, *kvDB, dir, table); err != nil {
			t.Fatal(err)
		}

		util.SucceedsSoon(t, func() error {
			sqlDB.QueryRow(`SELECT SUM(balance*balance) FROM bench.bank`).Scan(&newSquaresSum)
			if squaresSum == newSquaresSum {
				return errors.Errorf("squared deviation didn't change, still %d", newSquaresSum)
			}
			return nil
		})
		squaresSum = newSquaresSum

		var sum int64
		sqlDB.QueryRow(`SELECT SUM(balance) FROM bench.bank`).Scan(&sum)
		if sum != 0 {
			t.Fatalf("The bank is not in good order. Total value: %d", sum)
		}
	}
}