func TestPlan_Steps_NewCluster_Default(t *testing.T) {
	t.Parallel()

	testPrefix := "TestPlan_Steps_NewCluster_Default"
	logger := testutil.NewTestLogger(testPrefix, t)

	schedulerConfig := config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "cell1"},
			&config.Cell{GUID: "cell2"},
			&config.Cell{GUID: "cell3"},
			&config.Cell{GUID: "cell4"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}
	scheduler, err := NewScheduler(schedulerConfig, new(fakes.FakePatroni), logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	clusterModel := state.NewClusterModel(&state.StateEtcd{}, structs.ClusterState{})
	plan, err := scheduler.newPlan(clusterModel, structs.ClusterFeatures{NodeCount: 2})
	if err != nil {
		t.Fatalf("scheduler.newPlan error: %v", err)
	}
	expectedStepTypes := []string{"AddNode", "AddNode", "WaitForAllMembers", "WaitForLeader"}
	stepTypes := plan.stepTypes()
	if !reflect.DeepEqual(stepTypes, expectedStepTypes) {
		t.Fatalf("plan should have steps %v, got %v", expectedStepTypes, stepTypes)
	}
}
func TestRouter_RemoveClusterAssignement(t *testing.T) {
	t.Parallel()

	testPrefix := "TestRouter_RemoveClusterAssignement"
	etcdApi := resetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	clusterID := structs.ClusterID("clusterID")
	port := 30000

	key := fmt.Sprintf("%s/routing/allocation/%s", testPrefix, clusterID)
	_, err := etcdApi.Set(context.Background(), key, fmt.Sprintf("%d", port), &etcd.SetOptions{})

	router, err := NewRouterWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create a new router", err)
	}

	err = router.RemoveClusterAssignment(clusterID)
	if err != nil {
		t.Fatalf("Could not remove the assignment %s", err)
	}

	_, err = etcdApi.Get(context.Background(), fmt.Sprintf("%s/routing/allocation/%s", testPrefix, clusterID), &etcd.GetOptions{})
	if err == nil {
		t.Fatalf("port wasn't deleted %s", err)
	}
}
func TestState_LoadCluster(t *testing.T) {
	t.Parallel()

	testPrefix := "TestState_LoadClusterState"
	testutil.ResetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	state, err := NewStateEtcdWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create state", err)
	}

	instanceID := structs.ClusterID(uuid.New())
	planID := uuid.New()
	clusterState := structs.ClusterState{
		InstanceID:       instanceID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           planID,
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
	}
	err = state.SaveCluster(clusterState)
	if err != nil {
		t.Fatalf("SaveCluster failed %s", err)
	}

	loadedState, err := state.LoadCluster(instanceID)
	if !reflect.DeepEqual(clusterState, loadedState) {
		t.Fatalf("Failed to load ClusterState")
	}
}
func TestScheduler_filterCellsByCellGUIDs(t *testing.T) {
	t.Parallel()

	testPrefix := "TestScheduler_filterCellsByCellGUIDs"
	logger := testutil.NewTestLogger(testPrefix, t)

	schedulerConfig := config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "cell-guid1"},
			&config.Cell{GUID: "cell-guid2"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}
	scheduler, err := NewScheduler(schedulerConfig, logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	features := structs.ClusterFeatures{
		CellGUIDs: []string{"cell-guid1", "unknown-cell-guid"},
	}
	clusterModel := state.NewClusterModel(&state.StateEtcd{}, structs.ClusterState{InstanceID: "test"})
	plan, err := scheduler.newPlan(clusterModel, features)
	if err != nil {
		t.Fatalf("scheduler.newPlan error: %v", err)
	}

	if len(plan.availableCells) != 1 {
		t.Fatalf("Plan should only have one filtered cell")
	}
	if len(plan.allCells) != 2 {
		t.Fatalf("Plan should only have two cells")
	}
}
func TestScheduler_allCells(t *testing.T) {
	t.Parallel()

	testPrefix := "TestScheduler_allCells"
	logger := testutil.NewTestLogger(testPrefix, t)

	schedulerConfig := config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "cell-guid1"},
			&config.Cell{GUID: "cell-guid2"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}
	scheduler, err := NewScheduler(schedulerConfig, new(fakes.FakePatroni), logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	clusterModel := state.NewClusterModel(&state.StateEtcd{}, structs.ClusterState{InstanceID: "test"})
	features := structs.ClusterFeatures{}
	plan, err := scheduler.newPlan(clusterModel, features)
	if err != nil {
		t.Fatalf("scheduler.newPlan error: %v", err)
	}

	if len(plan.availableCells) != 2 {
		t.Fatalf("Plan should have both cell cells")
	}
	if len(plan.allCells) != 2 {
		t.Fatalf("Plan should only have two cells")
	}
}
func TestScheduler_VerifyClusterFeatures(t *testing.T) {
	t.Parallel()

	testPrefix := "TestScheduler_VerifyClusterFeatures"
	logger := testutil.NewTestLogger(testPrefix, t)
	scheduler, err := NewScheduler(config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "a"},
			&config.Cell{GUID: "b"},
			&config.Cell{GUID: "c"},
			&config.Cell{GUID: "d"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}, new(fakes.FakePatroni), logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	features := structs.ClusterFeatures{
		NodeCount: 3,
		CellGUIDs: []string{"a", "b", "c"},
	}
	err = scheduler.VerifyClusterFeatures(features)
	if err != nil {
		t.Fatalf("Cluster features %v should be valid", features)
	}
}
func TestState_ClusterExists(t *testing.T) {
	t.Parallel()

	testPrefix := "TestState_ClusterExists"
	testutil.ResetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	state, err := NewStateEtcdWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create state", err)
	}

	clusterID := structs.ClusterID(uuid.New())
	planID := uuid.New()
	clusterState := structs.ClusterState{
		InstanceID:       clusterID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           planID,
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
	}
	err = state.SaveCluster(clusterState)
	if err != nil {
		t.Fatalf("SaveCluster failed %s", err)
	}

	if !state.ClusterExists(clusterID) {
		t.Fatalf("Cluster %s should exist", clusterID)
	}

	if state.ClusterExists("fakeID") {
		t.Fatalf("Cluster %s should not exist", "fakeID")
	}
}
func TestRouter_AssignPortToCluster(t *testing.T) {
	t.Parallel()

	testPrefix := "TestRouter_AssignPortToCluster"
	etcdApi := resetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	router, err := NewRouterWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create a new router", err)
	}

	clusterID := structs.ClusterID("clusterID")
	port := 30000
	err = router.AssignPortToCluster(clusterID, port)
	if err != nil {
		t.Fatalf("Assigning port failed")
	}

	key := fmt.Sprintf("%s/routing/allocation/%s", testPrefix, clusterID)
	resp, err := etcdApi.Get(context.Background(), key, &etcd.GetOptions{})
	if err != nil {
		t.Fatalf("Could not read port from etcd")
	}

	retrievedPort, err := strconv.Atoi(resp.Node.Value)
	if want, got := port, retrievedPort; want != got {
		t.Fatalf("Routing was not initialized. Expected %d, got %d", want, got)
	}
}
func TestCallbacks_Configures(t *testing.T) {
	t.Parallel()

	testPrefix := "TestCallbacks_WriteRecreationData"
	logger := testutil.NewTestLogger(testPrefix, t)

	callbacks := NewCallbacks(config.Callbacks{}, logger)
	if want, got := false, callbacks.Configured(); want != got {
		t.Fatalf("Callbacks should not be configures")
	}
}
func TestAddNode_PrioritizeCells_SecondNodeDiffAZ(t *testing.T) {
	t.Parallel()

	testPrefix := "TestAddNode_PrioritizeCells_SecondNodeDiffAZ"
	logger := testutil.NewTestLogger(testPrefix, t)

	clusterLoader := &FakeClusterLoader{
		Clusters: []*structs.ClusterState{
			&structs.ClusterState{
				Nodes: []*structs.Node{
					&structs.Node{CellGUID: "cell-n1-z1"},
					&structs.Node{CellGUID: "cell-n3-z2"},
				},
			},
			&structs.ClusterState{
				Nodes: []*structs.Node{
					&structs.Node{CellGUID: "cell-n1-z1"},
					&structs.Node{CellGUID: "cell-n3-z2"},
				},
			},
			&structs.ClusterState{
				Nodes: []*structs.Node{
					&structs.Node{CellGUID: "cell-n1-z1"},
					&structs.Node{CellGUID: "cell-n2-z1"},
				},
			},
		},
	}
	availableCells := cells.NewCells([]*config.Cell{
		&config.Cell{GUID: "cell-n1-z1", AvailabilityZone: "z1"},
		&config.Cell{GUID: "cell-n2-z1", AvailabilityZone: "z1"},
		&config.Cell{GUID: "cell-n3-z2", AvailabilityZone: "z2"},
		&config.Cell{GUID: "cell-n4-z2", AvailabilityZone: "z2"},
	}, clusterLoader)
	currentClusterNodes := []*structs.Node{
		&structs.Node{ID: "node-1", CellGUID: "cell-n1-z1"},
	}

	step := AddNode{logger: logger, availableCells: availableCells}
	cellsToTry, _ := step.prioritizeCellsToTry(currentClusterNodes)
	cellIDs := []string{}
	for _, cell := range cellsToTry {
		cellIDs = append(cellIDs, cell.GUID)
	}
	// Expect all z2 AZs first, then z1 AZs as node-1 is in z1 already
	expectedPriority := []string{"cell-n4-z2", "cell-n3-z2", "cell-n2-z1", "cell-n1-z1"}
	if !reflect.DeepEqual(cellIDs, expectedPriority) {
		t.Fatalf("Expected prioritized cells %v to be %v", cellIDs, expectedPriority)
	}

}
func TestCallbacks_WriteRecreationData(t *testing.T) {
	t.Parallel()

	testPrefix := "TestCallbacks_WriteRecreationData"
	logger := testutil.NewTestLogger(testPrefix, t)

	testDir, err := ioutil.TempDir("", testPrefix)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer os.Remove(testDir)
	fileName := fmt.Sprintf("%s/%s", testDir, testPrefix)

	config := config.Callbacks{
		ClusterDataBackup: &config.CallbackCommand{
			Command:   "tee",
			Arguments: []string{fileName},
		},
	}

	recreationData := &structs.ClusterRecreationData{
		InstanceID:       "instance-id",
		OrganizationGUID: "OrganizationGUID",
		PlanID:           "PlanID",
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
		AdminCredentials: structs.PostgresCredentials{
			Username: "******",
			Password: "******",
		},
		AllocatedPort: 1234,
	}

	callbacks := NewCallbacks(config, logger)
	callbacks.WriteRecreationData(recreationData)

	rawData, err := ioutil.ReadFile(fileName)
	if err != nil {
		t.Fatalf("Could not open file %s, Err: %s", fileName, err)
	}

	writtenData := &structs.ClusterRecreationData{}
	json.Unmarshal(rawData, &writtenData)

	if !reflect.DeepEqual(recreationData, writtenData) {
		t.Fatalf("Written Data doesn't equal original. %v != %v", writtenData, recreationData)
	}
}
func TestPlan_Steps_NewCluster_MoveEverything(t *testing.T) {
	t.Parallel()

	testPrefix := "TestPlan_Steps_NewCluster_MoveEverything"
	logger := testutil.NewTestLogger(testPrefix, t)

	schedulerConfig := config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "cell1"},
			&config.Cell{GUID: "cell2"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}
	patroni := new(fakes.FakePatroni)
	patroni.ClusterLeaderStub = func(structs.ClusterID) (string, error) { return "a", nil }
	scheduler, err := NewScheduler(schedulerConfig, patroni, logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	clusterState := structs.ClusterState{
		InstanceID: "test",
		Nodes: []*structs.Node{
			&structs.Node{ID: "a", CellGUID: "cell-x-unavailable"},
			&structs.Node{ID: "b", CellGUID: "cell-y-unavailable"},
		},
	}
	clusterFeatures := structs.ClusterFeatures{
		NodeCount: 2,
		CellGUIDs: []string{"cell1", "cell2"},
	}
	clusterModel := state.NewClusterModel(&state.StateEtcd{}, clusterState)
	plan, err := scheduler.newPlan(clusterModel, clusterFeatures)
	if err != nil {
		t.Fatalf("scheduler.newPlan error: %v", err)
	}
	expectedStepTypes := []string{"AddNode", "AddNode", "WaitForAllMembers", "RemoveNode(b)", "WaitForAllMembers", "FailoverFrom(a)", "RemoveNode(a)", "WaitForLeader"}
	stepTypes := plan.stepTypes()
	if !reflect.DeepEqual(stepTypes, expectedStepTypes) {
		t.Fatalf("plan should have steps %v, got %v", expectedStepTypes, stepTypes)
	}
}
func TestState_DeleteCluster(t *testing.T) {
	t.Parallel()

	testPrefix := "TestState_DeleteClusterState"
	etcdApi := testutil.ResetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	state, err := NewStateEtcdWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create state", err)
	}

	instanceID := structs.ClusterID(uuid.New())
	planID := uuid.New()
	clusterState := structs.ClusterState{
		InstanceID:       instanceID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           planID,
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
	}
	err = state.SaveCluster(clusterState)
	if err != nil {
		t.Fatalf("SaveCluster failed %s", err)
	}

	err = state.DeleteCluster(instanceID)
	if err != nil {
		t.Fatalf("DeleteClusterState failed %s", err)
	}

	key := fmt.Sprintf("%s/service/%s/state", testPrefix, instanceID)
	_, err = etcdApi.Get(context.Background(), key, &etcd.GetOptions{})
	if err == nil {
		t.Fatalf("Was expecting error 'Key not found'")
	} else {
		notFoundRegExp, _ := regexp.Compile("Key not found")
		if notFoundRegExp.FindString(err.Error()) != "Key not found" {
			t.Fatalf("An error other than 'Key not found' occured %s", err)
		}
	}
}
func TestState_LoadCluster(t *testing.T) {
	t.Parallel()

	testPrefix := "TestState_LoadClusterState"
	etcdApi := testutil.ResetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	state, err := NewStateEtcdWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create state", err)
	}

	instanceID := structs.ClusterID(uuid.New())
	planID := uuid.New()

	node := structs.Node{ID: "node_id", CellGUID: "cell_guid"}
	clusterState := structs.ClusterState{
		InstanceID:       instanceID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           planID,
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
		SchedulingInfo: structs.SchedulingInfo{
			Status: structs.SchedulingStatusInProgress,
		},
	}
	clusterState.Nodes = []*structs.Node{&node}
	err = state.SaveCluster(clusterState)
	if err != nil {
		t.Fatalf("SaveCluster failed %s", err)
	}
	data, err := json.Marshal(node)
	key := fmt.Sprintf(
		"/%s/service/%s/nodes/%s", testPrefix, clusterState.InstanceID, node.ID)
	etcdApi.Set(context.Background(), key, string(data), &etcd.SetOptions{})

	loadedState, err := state.LoadCluster(instanceID)
	if !reflect.DeepEqual(clusterState, loadedState) {
		t.Fatalf("Failed to load ClusterState. Expected: %v, actual: %v", clusterState, loadedState)
	}
}
func TestRouter_InitialPort(t *testing.T) {
	t.Parallel()

	testPrefix := "TestRouter_InitialPort"
	resetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	router, err := NewRouterWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatal("Could not create a new router", err)
	}

	nextPort, err := router.AllocatePort()
	if err != nil {
		t.Fatal("Could not allocate port", err)
	}

	if nextPort != initialPort {
		t.Errorf("%s was not initialized in etcd", nextPortKey)
	}
}
func TestRouter_WithoutPrefix(t *testing.T) {
	t.Parallel()

	testPrefix := ""
	resetEtcd(t, "routing")
	logger := testutil.NewTestLogger(testPrefix, t)

	router, err := NewRouter(testutil.LocalEtcdConfig, logger)
	if err != nil {
		t.Fatal("Could not create a new router", err)
	}

	nextPort, err := router.AllocatePort()
	if err != nil {
		t.Fatal("Could not allocate port", err)
	}

	if nextPort != initialPort {
		t.Errorf("Allocated port did not equal initial port. Want %d, got %d", initialPort, nextPort)
	}
}
func TestPlan_Steps_NewCluster_MoveLeader(t *testing.T) {
	t.Parallel()

	testPrefix := "TestPlan_Steps_NewCluster_MoveLeader"
	logger := testutil.NewTestLogger(testPrefix, t)

	schedulerConfig := config.Scheduler{
		Cells: []*config.Cell{
			&config.Cell{GUID: "cell1"},
			&config.Cell{GUID: "cell2"},
		},
		Etcd: testutil.LocalEtcdConfig,
	}
	scheduler, err := NewScheduler(schedulerConfig, logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	clusterState := structs.ClusterState{
		InstanceID: "test",
		Nodes: []*structs.Node{
			&structs.Node{ID: "a", CellGUID: "cell-unavailable", Role: state.LeaderRole},
			&structs.Node{ID: "b", CellGUID: "cell2", Role: state.ReplicaRole},
		},
	}
	clusterFeatures := structs.ClusterFeatures{
		NodeCount: 2,
		CellGUIDs: []string{"cell1", "cell2"},
	}
	clusterModel := state.NewClusterModel(&state.StateEtcd{}, clusterState)
	plan, err := scheduler.newPlan(clusterModel, clusterFeatures)
	if err != nil {
		t.Fatalf("scheduler.newPlan error: %v", err)
	}
	expectedStepTypes := []string{"AddNode", "WaitForAllMembers", "RemoveLeader(a)", "WaitForAllMembers", "WaitForLeader"}
	stepTypes := plan.stepTypes()
	if !reflect.DeepEqual(stepTypes, expectedStepTypes) {
		t.Fatalf("plan should have steps %v, got %v", expectedStepTypes, stepTypes)
	}
}
func TestScheduler_VerifyClusterFeatures_UnknownCellGUIDs(t *testing.T) {
	t.Parallel()

	testPrefix := "TestScheduler_VerifyClusterFeatures"
	logger := testutil.NewTestLogger(testPrefix, t)
	scheduler, err := NewScheduler(config.Scheduler{
		Cells: []*config.Cell{},
		Etcd:  testutil.LocalEtcdConfig,
	}, new(fakes.FakePatroni), logger)
	if err != nil {
		t.Fatalf("NewScheduler error: %v", err)
	}

	features := structs.ClusterFeatures{
		NodeCount: 3,
		CellGUIDs: []string{"a", "b", "c"},
	}
	err = scheduler.VerifyClusterFeatures(features)
	if err == nil {
		t.Fatalf("Expect 'Cell GUIDs do not match available cells' error")
	}
}
func TestState_SaveCluster(t *testing.T) {
	t.Parallel()

	testPrefix := "TestState_SaveCluster"
	etcdApi := testutil.ResetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	state, err := NewStateEtcdWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create state", err)
	}

	clusterID := structs.ClusterID(uuid.New())
	planID := uuid.New()
	clusterState := structs.ClusterState{
		InstanceID:       clusterID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           planID,
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
	}
	err = state.SaveCluster(clusterState)
	if err != nil {
		t.Fatalf("SaveCluster failed %s", err)
	}

	resp, err := etcdApi.Get(context.Background(), fmt.Sprintf("%s/service/%s/state", testPrefix, clusterID), &etcd.GetOptions{})
	if err != nil {
		t.Fatalf("Could not load state from etcd %s", err)
	}

	retrievedState := structs.ClusterState{}
	json.Unmarshal([]byte(resp.Node.Value), &retrievedState)
	if !reflect.DeepEqual(clusterState, retrievedState) {
		t.Fatalf("Retrieved State does not match. Want %v, Got %v", clusterState, retrievedState)
	}
}
func TestRouter_ConcurrentPortAllocation(t *testing.T) {
	t.Parallel()

	testPrefix := "TestRouter_ConcurrentPortAllocation"
	resetEtcd(t, testPrefix)
	logger := testutil.NewTestLogger(testPrefix, t)

	router, err := NewRouterWithPrefix(testutil.LocalEtcdConfig, testPrefix, logger)
	if err != nil {
		t.Fatalf("Could not create a new router", err)
	}

	portChan := make(chan int)
	for i := 0; i < 5; i++ {
		go func() {
			nextPort, err := router.AllocatePort()
			if err != nil {
				portChan <- 0
				t.Error("Could not allocate port", err)
			}
			portChan <- nextPort
		}()
	}

	ports := []int{}
	for i := 0; i < 5; i++ {
		ports = append(ports, <-portChan)
	}

	sort.Ints(ports)
	for i := 0; i < 5; i++ {
		if want, got := initialPort+i, ports[i]; want != got {
			t.Errorf("Concurrent allocation of ports failed. Expected %d, got %d", want, got)
		}
	}
}
func TestCallbacks_RestoreRecreationData(t *testing.T) {
	t.Parallel()

	testPrefix := "TestCallbacks_WriteRecreationData"
	logger := testutil.NewTestLogger(testPrefix, t)

	testDir, err := ioutil.TempDir("", testPrefix)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer os.Remove(testDir)
	fileName := fmt.Sprintf("%s/%s", testDir, testPrefix)

	instanceID := structs.ClusterID(uuid.New())
	recreationData := &structs.ClusterRecreationData{
		InstanceID:       instanceID,
		OrganizationGUID: "OrganizationGUID",
		PlanID:           "PlanID",
		ServiceID:        "ServiceID",
		SpaceGUID:        "SpaceGUID",
		AdminCredentials: structs.PostgresCredentials{
			Username: "******",
			Password: "******",
		},
		AllocatedPort: 1234,
	}

	dataRaw, _ := json.Marshal(recreationData)
	err = ioutil.WriteFile(fileName, dataRaw, os.ModePerm)
	if err != nil {
		t.Fatalf("Could not write file")
	}

	cfg := config.Callbacks{
		ClusterDataRestore: &config.CallbackCommand{
			Command:   "cat",
			Arguments: []string{fileName},
		},
	}

	// test that it retrieves the data from stdout
	callbacks := NewCallbacks(cfg, logger)
	restoredData, err := callbacks.RestoreRecreationData(instanceID)
	if err != nil {
		t.Fatalf("Could not open file %s, Err: %s", fileName, err)
	}

	if !reflect.DeepEqual(recreationData, restoredData) {
		t.Fatalf("Retrieved Data doesn't equal original. %v != %v", restoredData, recreationData)
	}

	// test that it passes instanceID to stdin
	cfg = config.Callbacks{
		ClusterDataRestore: &config.CallbackCommand{
			Command:   "tee",
			Arguments: []string{fileName},
		},
	}
	callbacks = NewCallbacks(cfg, logger)
	_, err = callbacks.RestoreRecreationData(instanceID)

	fileContent, err := ioutil.ReadFile(fileName)
	if err != nil {
		t.Fatalf("Could not open file %s, Err: %s", fileName, err)
	}

	if structs.ClusterID(fileContent) != instanceID {
		t.Fatalf("InstanceID %s was not passed via stdin", instanceID)
	}
}