// Start 5 services that conflict with one another. Assert that only // 3 of the 5 are started. func TestScheduleGlobalConflicts(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() // Start with a simple three-node cluster if err := platform.CreateNClusterMembers(cluster, 3, platform.MachineConfig{}); err != nil { t.Fatal(err) } machines, err := cluster.WaitForNMachines(3) if err != nil { t.Fatal(err) } // Ensure we can SSH into each machine using fleetctl for _, machine := range machines { if _, _, err := cluster.Fleetctl("--strict-host-key-checking=false", "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: %v", err) } } for i := 0; i < 5; i++ { unit := fmt.Sprintf("fixtures/units/conflict.%d.service", i) _, _, err := cluster.Fleetctl("start", "--no-block", unit) if err != nil { t.Errorf("Failed starting unit %s: %v", unit, err) } } // All 5 services should be visible immediately and 3 should become // ACTIVE shortly thereafter stdout, _, err := cluster.Fleetctl("list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 5 { t.Fatalf("Did not find five units in cluster: \n%s", stdout) } states, err := cluster.WaitForNActiveUnits(3) if err != nil { t.Fatal(err) } machineSet := make(map[string]bool) for unit, unitState := range states { if len(unitState.Machine) == 0 { t.Errorf("Unit %s is not reporting machine", unit) } machineSet[unitState.Machine] = true } if len(machineSet) != 3 { t.Errorf("3 active units not running on 3 unique machines") } }
// TestUnitRunnable is the simplest test possible, deplying a single-node // cluster and ensuring a unit can enter an 'active' state func TestUnitRunnable(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() m0, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m0, 1) if err != nil { t.Fatal(err) } if stdout, stderr, err := cluster.Fleetctl(m0, "start", "fixtures/units/hello.service"); err != nil { t.Fatalf("Unable to start fleet unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units, err := cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } _, found := units["hello.service"] if len(units) != 1 || !found { t.Fatalf("Expected hello.service to be sole active unit, got %v", units) } }
// TestUnitRestart checks if a unit becomes started and restarted successfully. // First it starts a unit, and restarts the unit, verifies it's restarted. func TestUnitRestart(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } numUnits := 3 // first start units before restarting them unitFiles, err := launchUnitsCmd(cluster, m, "start", numUnits) if err != nil { t.Fatal(err) } if err := checkListUnits(cluster, m, "start", unitFiles, numUnits); err != nil { t.Fatal(err) } // now restart if err := unitStartCommon(cluster, m, "restart", numUnits); err != nil { t.Fatal(err) } }
func TestMachinesListBadNextPageToken(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } // Send an invalid GET request, should return failure resp, err := getHTTPResponse("GET", m.Endpoint()+"/fleet/v1/machines?nextPageToken=EwBMLg==", "") if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { t.Fatalf("Expected status %d, got %d.", http.StatusBadRequest, resp.StatusCode) } err = checkContentType(resp) if err != nil { t.Fatal(err) } }
func TestMachinesList(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } // Get a normal machine list, should return OK resp, err := getHTTPResponse("GET", m.Endpoint()+"/fleet/v1/machines", "") if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { t.Fatalf("Got HTTP response status %d.", resp.StatusCode) } err = checkContentType(resp) if err != nil { t.Fatal(err) } }
// Check clean shutdown of fleetd under normal circumstances func TestShutdown(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m0, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m0, 1) if err != nil { t.Fatal(err) } // Stop the fleet process. if _, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet"); err != nil { t.Fatal(err) } // Check expected state after stop. stdout, _ := cluster.MemberCommand(m0, "systemctl", "show", "--property=ActiveState", "fleet") if strings.TrimSpace(stdout) != "ActiveState=inactive" { t.Fatalf("Fleet unit not reported as inactive: %s", stdout) } stdout, _ = cluster.MemberCommand(m0, "systemctl", "show", "--property=Result", "fleet") if strings.TrimSpace(stdout) != "Result=success" { t.Fatalf("Result for fleet unit not reported as success: %s", stdout) } }
func TestScheduleGlobalUnits(t *testing.T) { // Create a three-member cluster cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) members, err := platform.CreateNClusterMembers(cluster, 3) if err != nil { t.Fatal(err) } m0 := members[0] machines, err := cluster.WaitForNMachines(m0, 3) if err != nil { t.Fatal(err) } // Launch a couple of simple units stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", "fixtures/units/hello.service", "fixtures/units/goodbye.service") if err != nil { t.Fatalf("Failed starting units: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Both units should show up active _, err = cluster.WaitForNActiveUnits(m0, 2) if err != nil { t.Fatal(err) } // Now add a global unit stdout, stderr, err = cluster.Fleetctl(m0, "start", "--no-block", "fixtures/units/global.service") if err != nil { t.Fatalf("Failed starting unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Should see 2 + 3 units states, err := cluster.WaitForNActiveUnits(m0, 5) if err != nil { t.Fatal(err) } // Each machine should have a single global unit us := states["global.service"] for _, mach := range machines { var found bool for _, state := range us { if state.Machine == mach { found = true break } } if !found { t.Fatalf("Did not find global unit on machine %v", mach) t.Logf("Found unit states:") for _, state := range states { t.Logf("%#v", state) } } } }
// Simulate the shutdown of a single fleet node func TestNodeShutdown(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() // Start with a single node and wait for it to come up m0, err := cluster.CreateMember() if err != nil { t.Fatal(err) } machines, err := cluster.WaitForNMachines(m0, 1) if err != nil { t.Fatal(err) } // Start a unit and ensure it comes up quickly unit := fmt.Sprintf("fixtures/units/[email protected]%s.service", machines[0]) _, _, err = cluster.Fleetctl(m0, "start", unit) if err != nil { t.Errorf("Failed starting unit: %v", err) } _, err = cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } // Create a second node, waiting for it m1, err := cluster.CreateMember() if err != nil { t.Fatal(err) } if _, err = cluster.WaitForNMachines(m0, 2); err != nil { t.Fatal(err) } // Stop the fleet process on the first member if _, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet"); err != nil { t.Fatal(err) } // The first member should quickly remove itself from the published // list of cluster members if _, err = cluster.WaitForNMachines(m1, 1); err != nil { t.Fatal(err) } // State for the member's unit should be purged from the Registry if _, err = cluster.WaitForNActiveUnits(m1, 0); err != nil { t.Fatal(err) } // The member's unit should actually stop running, too stdout, _ := cluster.MemberCommand(m0, "sudo", "systemctl", "status", "hello.service") if !strings.Contains(stdout, "Active: inactive") { t.Fatalf("Unit hello.service not reported as inactive:\n%s\n", stdout) } }
func TestUnitSubmit(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } // submit a unit and assert it shows up if _, _, err := cluster.Fleetctl(m, "submit", "fixtures/units/hello.service"); err != nil { t.Fatalf("Unable to submit fleet unit: %v", err) } stdout, _, err := cluster.Fleetctl(m, "list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 1 { t.Fatalf("Did not find 1 unit in cluster: \n%s", stdout) } // submitting the same unit should not fail if _, _, err = cluster.Fleetctl(m, "submit", "fixtures/units/hello.service"); err != nil { t.Fatalf("Expected no failure when double-submitting unit, got this: %v", err) } // destroy the unit and ensure it disappears from the unit list if _, _, err := cluster.Fleetctl(m, "destroy", "fixtures/units/hello.service"); err != nil { t.Fatalf("Failed to destroy unit: %v", err) } stdout, _, err = cluster.Fleetctl(m, "list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } if strings.TrimSpace(stdout) != "" { t.Fatalf("Did not find 0 units in cluster: \n%s", stdout) } // submitting the unit after destruction should succeed if _, _, err := cluster.Fleetctl(m, "submit", "fixtures/units/hello.service"); err != nil { t.Fatalf("Unable to submit fleet unit: %v", err) } stdout, _, err = cluster.Fleetctl(m, "list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } units = strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 1 { t.Fatalf("Did not find 1 unit in cluster: \n%s", stdout) } }
func TestCluster(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatalf(err.Error()) } defer cluster.DestroyAll() // Start with a simple three-node cluster if err := cluster.Create(3); err != nil { t.Fatalf(err.Error()) } machines, err := waitForNMachines(3) if err != nil { t.Fatalf(err.Error()) } // Ensure we can SSH into each machine using fleetctl for _, machine := range machines { if _, _, err := fleetctl("--strict-host-key-checking=false", "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: %v", err) } } // Start the 5 services for i := 0; i < 5; i++ { unitName := fmt.Sprintf("fixtures/units/conflict.%d.service", i) _, _, err := fleetctl("start", "--no-block", unitName) if err != nil { t.Errorf("Failed starting %s: %v", unitName, err) } } // All 5 services should be visible immediately and become ACTIVE // shortly thereafter stdout, _, err := fleetctl("list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 5 { t.Fatalf("Did not find five units in cluster: \n%s", stdout) } if err := waitForNActiveUnits(3); err != nil { t.Fatalf(err.Error()) } // Add two more machines to the cluster and ensure the remaining // unscheduled services are picked up. if err := cluster.Create(2); err != nil { t.Fatalf(err.Error()) } machines, err = waitForNMachines(5) if err != nil { t.Fatalf(err.Error()) } if err := waitForNActiveUnits(5); err != nil { t.Fatalf(err.Error()) } }
// Ensure units can be scheduled directly to a given machine using the // MachineID unit option. func TestScheduleConditionMachineID(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) // Start with a simple three-node cluster members, err := platform.CreateNClusterMembers(cluster, 3) if err != nil { t.Fatal(err) } m0 := members[0] machines, err := cluster.WaitForNMachines(m0, 3) if err != nil { t.Fatal(err) } // Start 3 units that are each scheduled to one of our machines schedule := make(map[string]string) for _, machine := range machines { contents := ` [Service] ExecStart=/bin/bash -c "while true; do echo Hello, World!; sleep 1; done" [X-Fleet] MachineID=%s ` unitFile, err := util.TempUnit(fmt.Sprintf(contents, machine)) if err != nil { t.Fatalf("Failed creating temporary unit: %v", err) } defer os.Remove(unitFile) stdout, stderr, err := cluster.Fleetctl(m0, "start", unitFile) if err != nil { t.Fatalf("Failed starting unit file %s: \nstdout: %s\nstderr: %s\nerr: %v", unitFile, stdout, stderr, err) } unit := filepath.Base(unitFile) schedule[unit] = machine } // Block until our three units have been started active, err := cluster.WaitForNActiveUnits(m0, 3) if err != nil { t.Fatal(err) } states, err := util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } for unit, unitState := range states { if unitState.Machine != schedule[unit] { t.Errorf("Unit %s was scheduled to %s, expected %s", unit, unitState.Machine, schedule[unit]) } } }
func TestKnownHostsVerification(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() members, err := platform.CreateNClusterMembers(cluster, 2) if err != nil { t.Fatal(err) } m0 := members[0] machines, err := cluster.WaitForNMachines(m0, 2) if err != nil { t.Fatal(err) } machine := machines[0] tmp, err := ioutil.TempFile(os.TempDir(), "known-hosts") if err != nil { t.Fatal(err) } tmp.Close() defer syscall.Unlink(tmp.Name()) khFile := tmp.Name() if stdout, stderr, err := cluster.FleetctlWithInput(m0, "yes", "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } _, err = cluster.ReplaceMember(members[1]) if err != nil { t.Fatalf("Failed replacing machine: %v", err) } machines, err = cluster.WaitForNMachines(m0, 2) if err != nil { t.Fatal(err) } machine = machines[0] // SSH'ing to the cluster member should now fail with a host key mismatch if _, _, err := cluster.Fleetctl(m0, "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err == nil { t.Errorf("Expected error while SSH'ing to fleet machine") } // Overwrite the known-hosts file to simulate removing the old host key if err := ioutil.WriteFile(khFile, []byte{}, os.FileMode(0644)); err != nil { t.Fatalf("Unable to overwrite known-hosts file: %v", err) } // And SSH should work again if stdout, stderr, err := cluster.FleetctlWithInput(m0, "yes", "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } }
func TestUnitSSHActions(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } if _, _, err := cluster.Fleetctl(m, "start", "--no-block", "fixtures/units/hello.service"); err != nil { t.Fatalf("Unable to start fleet unit: %v", err) } units, err := cluster.WaitForNActiveUnits(m, 1) if err != nil { t.Fatal(err) } _, found := units["hello.service"] if len(units) != 1 || !found { t.Fatalf("Expected hello.service to be sole active unit, got %v", units) } stdout, _, err := cluster.Fleetctl(m, "--strict-host-key-checking=false", "ssh", "hello.service", "echo", "foo") if err != nil { t.Errorf("Failure occurred while calling fleetctl ssh: %v", err) } if !strings.Contains(stdout, "foo") { t.Errorf("Could not find expected string in command output:\n%s", stdout) } stdout, _, err = cluster.Fleetctl(m, "--strict-host-key-checking=false", "status", "hello.service") if err != nil { t.Errorf("Failure occurred while calling fleetctl status: %v", err) } if !strings.Contains(stdout, "Active: active") { t.Errorf("Could not find expected string in status output:\n%s", stdout) } stdout, _, err = cluster.Fleetctl(m, "--strict-host-key-checking=false", "journal", "hello.service") if err != nil { t.Errorf("Failure occurred while calling fleetctl journal: %v", err) } if !strings.Contains(stdout, "Hello, World!") { t.Errorf("Could not find expected string in journal output:\n%s", stdout) } }
// TestListUnitFilesOrder simply checks if "fleetctl list-unit-files" returns // an ordered list of units func TestListUnitFilesOrder(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } // Combine units var units []string for i := 1; i <= 20; i++ { unit := fmt.Sprintf("fixtures/units/[email protected]%02d.service", i) stdout, stderr, err := cluster.Fleetctl(m, "submit", unit) if err != nil { t.Fatalf("Failed to submit a batch of units: \nstdout: %s\nstder: %s\nerr: %v", stdout, stderr, err) } units = append(units, unit) } // make sure that all unit files will show up _, err = cluster.WaitForNUnitFiles(m, 20) if err != nil { t.Fatal("Failed to run list-unit-files: %v", err) } stdout, _, err := cluster.Fleetctl(m, "list-unit-files", "--no-legend", "--fields", "unit") if err != nil { t.Fatal("Failed to run list-unit-files: %v", err) } outUnits := strings.Split(strings.TrimSpace(stdout), "\n") var sortable sort.StringSlice for _, name := range units { n := path.Base(name) sortable = append(sortable, n) } sortable.Sort() var inUnits []string for _, name := range sortable { inUnits = append(inUnits, name) } if !reflect.DeepEqual(inUnits, outUnits) { t.Fatalf("Failed to get a sorted list of units from list-unit-files") } }
func TestKnownHostsVerification(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() if err := cluster.CreateMember("1", platform.MachineConfig{}); err != nil { t.Fatal(err) } machines, err := cluster.WaitForNMachines(1) if err != nil { t.Fatal(err) } machine := machines[0] tmp, err := ioutil.TempFile(os.TempDir(), "known-hosts") if err != nil { t.Fatal(err) } tmp.Close() defer syscall.Unlink(tmp.Name()) khFile := tmp.Name() if _, _, err := cluster.FleetctlWithInput("yes", "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: %v", err) } // Recreation of the cluster simulates a change in the server's host key cluster.DestroyMember("1") cluster.CreateMember("1", platform.MachineConfig{}) machines, err = cluster.WaitForNMachines(1) if err != nil { t.Fatal(err) } machine = machines[0] // SSH'ing to the cluster member should now fail with a host key mismatch if _, _, err := cluster.Fleetctl("--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err == nil { t.Errorf("Expected error while SSH'ing to fleet machine") } // Overwrite the known-hosts file to simulate removing the old host key if err := ioutil.WriteFile(khFile, []byte{}, os.FileMode(0644)); err != nil { t.Fatalf("Unable to overwrite known-hosts file: %v", err) } // And SSH should work again if _, _, err := cluster.FleetctlWithInput("yes", "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: %v", err) } }
func TestUnitRestart(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() if err := platform.CreateNClusterMembers(cluster, 1, platform.MachineConfig{}); err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(1) if err != nil { t.Fatal(err) } if stdout, stderr, err := cluster.Fleetctl("start", "fixtures/units/hello.service"); err != nil { t.Fatalf("Unable to start fleet unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units, err := cluster.WaitForNActiveUnits(1) if err != nil { t.Fatal(err) } _, found := units["hello.service"] if len(units) != 1 || !found { t.Fatalf("Expected hello.service to be sole active unit, got %v", units) } if _, _, err := cluster.Fleetctl("stop", "hello.service"); err != nil { t.Fatal(err) } units, err = cluster.WaitForNActiveUnits(0) if err != nil { t.Fatal(err) } if len(units) != 0 { t.Fatalf("Zero units should be running, found %v", units) } if stdout, stderr, err := cluster.Fleetctl("start", "hello.service"); err != nil { t.Fatalf("Unable to start fleet unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units, err = cluster.WaitForNActiveUnits(1) if err != nil { t.Fatal(err) } _, found = units["hello.service"] if len(units) != 1 || !found { t.Fatalf("Expected hello.service to be sole active unit, got %v", units) } }
// Simulate the shutdown of a single fleet node func TestNodeShutdown(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() // Start with a single-node cluster if err := cluster.CreateMember("1", platform.MachineConfig{}); err != nil { t.Fatal(err) } if _, err = cluster.WaitForNMachines(1); err != nil { t.Fatal(err) } // Start a unit and ensure it comes up quickly if _, _, err := cluster.Fleetctl("start", "fixtures/units/hello.service"); err != nil { t.Errorf("Failed starting unit: %v", err) } _, err = cluster.WaitForNActiveUnits(1) if err != nil { t.Fatal(err) } // Stop the fleet process on our sole member if _, err = cluster.MemberCommand("1", "sudo", "systemctl", "stop", "fleet"); err != nil { t.Fatal(err) } // The member should immediately remove itself from the published // list of cluster members if _, err = cluster.WaitForNMachines(0); err != nil { t.Fatal(err) } // State for the members units should be purged from the Registry if _, err = cluster.WaitForNActiveUnits(0); err != nil { t.Fatal(err) } // The members units should actually stop running, too stdout, err := cluster.MemberCommand("1", "sudo", "systemctl", "status", "hello.service") if err != nil { t.Fatal(err) } if !strings.Contains(stdout, "Active: inactive") { t.Fatalf("Unit hello.service not reported as inactive:\n%s\n", stdout) } }
// Check clean shutdown of fleetd while automatic restart (after failed health check) is in progress func TestShutdownVsMonitor(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m0, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m0, 1) if err != nil { t.Fatal(err) } // Cut connection to etcd. // // This will result in a failed health check, and consequently the monitor will attempt a restart. stdout, err := cluster.MemberCommand(m0, "sudo", "iptables", "-I", "OUTPUT", "-p", "tcp", "-m", "multiport", "--dports=2379,4001", "-j", "DROP") if err != nil { t.Fatalf("Failed inserting iptables rule:\nstdout: %s\nerr: %v", stdout, err) } // Wait for the monitor to trigger the restart. // // This will never complete, as long as there is no connectivity. stdout, err = cluster.MemberCommand(m0, "sudo", "sh", "-c", `'until journalctl -u fleet | grep -q "Server monitor triggered: Monitor timed out before successful heartbeat"; do sleep 1; done'`) if err != nil { t.Fatalf("Failed checking journal message:\nstdout: %s\nerr: %v", stdout, err) } // Stop fleetd while the restart is still in progress. stdout, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet") if err != nil { t.Fatalf("Failed stopping fleet service:\nstdout: %s\nerr: %v", stdout, err) } // Verify that fleetd was shut down cleanly in spite of the concurrent restart. stdout, _ = cluster.MemberCommand(m0, "systemctl", "show", "--property=ActiveState", "fleet") if strings.TrimSpace(stdout) != "ActiveState=inactive" { t.Fatalf("Fleet unit not reported as inactive: %s", stdout) } stdout, _ = cluster.MemberCommand(m0, "systemctl", "show", "--property=Result", "fleet") if strings.TrimSpace(stdout) != "Result=success" { t.Fatalf("Result for fleet unit not reported as success: %s", stdout) } }
func TestScheduleOneWayConflict(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() // Start with a simple three-node cluster if err := platform.CreateNClusterMembers(cluster, 1, platform.MachineConfig{}); err != nil { t.Fatal(err) } if _, err := cluster.WaitForNMachines(1); err != nil { t.Fatal(err) } // Start a unit that conflicts with a yet-to-be-scheduled unit name := "fixtures/units/conflicts-with-hello.service" if _, _, err := cluster.Fleetctl("start", name); err != nil { t.Fatalf("Failed starting unit %s: %v", name, err) } // Start a unit that has not defined conflicts name = "fixtures/units/hello.service" if _, _, err := cluster.Fleetctl("start", name); err == nil { t.Fatalf("Unit %s unexpectedly started", name) } // Both units should show up, but only conflicts-with-hello.service // should report ACTIVE stdout, _, err := cluster.Fleetctl("list-units", "--no-legend") if err != nil { t.Fatalf("Failed to run list-units: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 2 { t.Fatalf("Did not find two units in cluster: \n%s", stdout) } states, err := cluster.WaitForNActiveUnits(1) if err != nil { t.Fatal(err) } for unit, _ := range states { if unit != "conflicts-with-hello.service" { t.Error("Incorrect unit started:", unit) } } }
// TestUnitCat simply compares body of a unit file with that of a unit fetched // from the remote cluster using "fleetctl cat". func TestUnitCat(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } // read a sample unit file to a buffer unitFile := "fixtures/units/hello.service" fileBuf, err := ioutil.ReadFile(unitFile) if err != nil { t.Fatal(err) } fileBody := strings.TrimSpace(string(fileBuf)) // submit a unit and assert it shows up _, _, err = cluster.Fleetctl(m, "submit", unitFile) if err != nil { t.Fatalf("Unable to submit fleet unit: %v", err) } // wait until the unit gets submitted up to 15 seconds _, err = cluster.WaitForNUnitFiles(m, 1) if err != nil { t.Fatalf("Failed to run list-units: %v", err) } // cat the unit file and compare it with the original unit body stdout, _, err := cluster.Fleetctl(m, "cat", path.Base(unitFile)) if err != nil { t.Fatalf("Unable to submit fleet unit: %v", err) } catBody := strings.TrimSpace(stdout) if strings.Compare(catBody, fileBody) != 0 { t.Fatalf("unit body changed across fleetctl cat: \noriginal:%s\nnew:%s", fileBody, catBody) } }
func TestSignedRequests(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy() cfg := platform.MachineConfig{VerifyUnits: true} if err := cluster.CreateMember("1", cfg); err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(1) if err != nil { t.Fatal(err) } _, _, err = cluster.Fleetctl("submit", "--sign=false", "fixtures/units/hello.service") if err != nil { t.Fatalf("Failed submitting hello.service: %v", err) } // The start command should succeed, but the unit should not actually get scheduled // and started on an agent since it is not signed. _, _, err = cluster.Fleetctl("load", "--no-block", "fixtures/units/hello.service") if err != nil { t.Fatalf("Failed calling load on hello.service: %v", err) } _, _, err = cluster.Fleetctl("start", "--no-block", "--sign=true", "fixtures/units/goodbye.service") if err != nil { t.Fatalf("Failed starting goodbye.service: %v", err) } units, err := cluster.WaitForNActiveUnits(1) if err != nil { t.Fatal(err) } _, ok := units["goodbye.service"] if len(units) != 1 || !ok { t.Fatalf("Expected goodbye.service to be sole active unit, got %v", units) } }
// TestUnitSubmit checks if a unit becomes submitted and destroyed successfully. // First it submits a unit, and destroys the unit, verifies it's destroyed, // finally submits the unit again. func TestUnitSubmit(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } if err := unitStartCommon(cluster, m, "submit", 9); err != nil { t.Fatal(err) } }
func TestMachineList(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatalf(err.Error()) } defer cluster.DestroyAll() if err := cluster.Create(3); err != nil { t.Fatalf(err.Error()) } stdout, _, err := fleetctl("list-machines", "--no-legend") if err != nil { t.Fatalf("Failed to run list-machines: %v", err) } stdout = strings.TrimSpace(stdout) machines := strings.Split(stdout, "\n") if len(machines) != 3 { t.Errorf("Did not find three machines running: \n%s", stdout) } }
func TestMachineSSH(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatalf(err.Error()) } defer cluster.DestroyAll() if err := cluster.Create(3); err != nil { t.Fatalf(err.Error()) } stdout, _, err := fleetctl("list-machines", "--no-legend", "-l") if err != nil { t.Fatalf("Failed to run list-machines: %v", err) } bootID := strings.SplitN(stdout, "\t", 2)[0] stdout, _, err = fleetctl("ssh", bootID, "uptime") if err != nil { t.Fatalf("Unable to SSH into fleet machine: %v", err) } }
// TestUnitStatus simply checks "fleetctl status hello.service" actually works. func TestUnitStatus(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } unitFile := "fixtures/units/hello.service" // Load a unit and print out status. // Without loading a unit, it's impossible to run fleetctl status _, _, err = cluster.Fleetctl(m, "load", unitFile) if err != nil { t.Fatalf("Unable to load a fleet unit: %v", err) } // wait until the unit gets loaded up to 15 seconds _, err = cluster.WaitForNUnits(m, 1) if err != nil { t.Fatalf("Failed to run list-units: %v", err) } stdout, stderr, err := cluster.Fleetctl(m, "--strict-host-key-checking=false", "status", path.Base(unitFile)) if !strings.Contains(stdout, "Loaded: loaded") { t.Errorf("Could not find expected string in status output:\n%s\nstderr:\n%s", stdout, stderr) } }
// TestFleetctlWithEnv runs simply fleetctl list-machines, but by setting an // environment variable FLEETCTL_ENDPOINT, instead of the cmdline option // '--endpoint'. func TestFleetctlWithEnv(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m, 1) if err != nil { t.Fatal(err) } stdout, stderr, err := cluster.FleetctlWithEnv(m, "list-machines") if err != nil { t.Fatalf("Failed to run with env var FLEETCTL_ENDPOINT:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } }
// TestReconfigureServer checks whether fleetd managed to keep its listeners // across reconfiguration of fleetd after receiving SIGHUP. func TestReconfigureServer(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) m0, err := cluster.CreateMember() if err != nil { t.Fatal(err) } _, err = cluster.WaitForNMachines(m0, 1) if err != nil { t.Fatal(err) } err = waitForFleetdSocket(cluster, m0) if err != nil { t.Fatalf("Failed to get a list of fleetd sockets: %v", err) } unit := fmt.Sprintf("fixtures/units/hello.service") stdout, stderr, err := cluster.Fleetctl(m0, "start", unit) if err != nil { t.Fatalf("Failed starting unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } _, err = cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } // Trigger AgentReconciler just here stdout, stderr, err = cluster.Fleetctl(m0, "unload", unit) if err != nil { t.Fatalf("Failed unloading unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Send a SIGHUP to fleetd, and periodically checks if a message // "Reloading configuration" appears in fleet's journal, up to timeout (15) seconds. stdout, stderr, err = cluster.MemberCommand(m0, "sudo", "systemctl", "kill", "-s", "SIGHUP", "fleet") if strings.TrimSpace(stdout) != "" { t.Fatalf("Sending SIGHUP to fleetd returned.\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Watch the logs if fleet was correctly reloaded errSigHup := waitForReloadConfig(cluster, m0) if errSigHup != nil { t.Logf("Failed to ensure that fleet was correctly reloaded: %v", errSigHup) } // check if fleetd is still running correctly, by running fleetctl status // Even if the log message do not show up this test may catch the error. stdout, stderr, err = cluster.Fleetctl(m0, "list-units") if err != nil { t.Fatalf("Unable to check list-units. Please check for fleetd socket\nstdout: %s\nstderr: %s\nerr:%v", stdout, stderr, err) } // Ensure that fleet received SIGHUP, if not then just skip this test // probably due to journald and or other delays. if errSigHup != nil { err = waitForReloadConfig(cluster, m0) if err != nil { // Just mark the test skipped since it did not fail, previous // list-units command did succeed. Missing logs can be caused // by journald delays or any other race. t.Skipf("Skipping Test: Failed to ensure that fleet was correctly reloaded: %v", err) } } // Check for HTTP listener error looking into the fleetd journal stdout, stderr, err = cluster.MemberCommand(m0, "journalctl _PID=$(pidof fleetd)") if strings.Contains(strings.TrimSpace(stdout), "Failed serving HTTP on listener:") { t.Fatalf("Fleetd log returned error on HTTP listeners.\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Check expected state after reconfiguring fleetd stdout, stderr, err = cluster.MemberCommand(m0, "systemctl", "show", "--property=ActiveState", "fleet") if strings.TrimSpace(stdout) != "ActiveState=active" { t.Fatalf("Fleet unit not reported as active.\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } stdout, stderr, err = cluster.MemberCommand(m0, "systemctl", "show", "--property=Result", "fleet") if strings.TrimSpace(stdout) != "Result=success" { t.Fatalf("Result for fleet unit not reported as success.\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } }
// Start three pairs of services, asserting each pair land on the same // machine due to the MachineOf options in the unit files. func TestScheduleMachineOf(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) // Start with a simple three-node cluster members, err := platform.CreateNClusterMembers(cluster, 3) if err != nil { t.Fatal(err) } m0 := members[0] machines, err := cluster.WaitForNMachines(m0, 3) if err != nil { t.Fatal(err) } // Ensure we can SSH into each machine using fleetctl for _, machine := range machines { if stdout, stderr, err := cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", machine, "uptime"); err != nil { t.Errorf("Unable to SSH into fleet machine: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } } // Start the 3 pairs of services for i := 0; i < 3; i++ { ping := fmt.Sprintf("fixtures/units/ping.%d.service", i) pong := fmt.Sprintf("fixtures/units/pong.%d.service", i) stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", ping, pong) if err != nil { t.Errorf("Failed starting units: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } } // All 6 services should be visible immediately and become ACTIVE // shortly thereafter stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { t.Fatalf("Failed to run list-unit-files: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 6 { t.Fatalf("Did not find six units in cluster: \n%s", stdout) } active, err := cluster.WaitForNActiveUnits(m0, 6) if err != nil { t.Fatal(err) } states, err := util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } for i := 0; i < 3; i++ { ping := fmt.Sprintf("ping.%d.service", i) pingState, ok := states[ping] if !ok { t.Errorf("Failed to find state for %s", ping) continue } pong := fmt.Sprintf("pong.%d.service", i) pongState, ok := states[pong] if !ok { t.Errorf("Failed to find state for %s", pong) continue } if len(pingState.Machine) == 0 { t.Errorf("Unit %s is not reporting machine", ping) } if len(pongState.Machine) == 0 { t.Errorf("Unit %s is not reporting machine", pong) } if pingState.Machine != pongState.Machine { t.Errorf("Units %s and %s are not on same machine", ping, pong) } } // Ensure a pair of units migrate together when their host goes down mach := states["ping.1.service"].Machine if _, _, err = cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", mach, "sudo", "systemctl", "stop", "fleet"); err != nil { t.Fatal(err) } var mN platform.Member if m0.ID() == states["ping.1.service"].Machine { mN = members[1] } else { mN = m0 } if _, err := cluster.WaitForNMachines(mN, 2); err != nil { t.Fatal(err) } active, err = cluster.WaitForNActiveUnits(mN, 6) if err != nil { t.Fatal(err) } states, err = util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } newPingMach := states["ping.1.service"].Machine if mach == newPingMach { t.Fatalf("Unit ping.1.service did not appear to migrate") } newPongMach := states["pong.1.service"].Machine if newPingMach != newPongMach { t.Errorf("Unit pong.1.service did not migrate with ping.1.service") } }
func TestScheduleOneWayConflict(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) // Start with a simple three-node cluster members, err := platform.CreateNClusterMembers(cluster, 1) if err != nil { t.Fatal(err) } m0 := members[0] if _, err := cluster.WaitForNMachines(m0, 1); err != nil { t.Fatal(err) } // Start a unit that conflicts with a yet-to-be-scheduled unit name := "fixtures/units/conflicts-with-hello.service" if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err) } active, err := cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } states, err := util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } // Start a unit that has not defined conflicts name = "fixtures/units/hello.service" if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err) } // Both units should show up, but only conflicts-with-hello.service // should report ACTIVE stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { t.Fatalf("Failed to run list-unit-files: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 2 { t.Fatalf("Did not find two units in cluster: \n%s", stdout) } active, err = cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } states, err = util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } for unit := range states { if unit != "conflicts-with-hello.service" { t.Error("Incorrect unit started:", unit) } } // Destroying the conflicting unit should allow the other to start name = "conflicts-with-hello.service" if _, _, err := cluster.Fleetctl(m0, "destroy", name); err != nil { t.Fatalf("Failed destroying %s", name) } // NOTE: we need to sleep here shortly to avoid occasional errors of // conflicts-with-hello.service being rescheduled even after being destroyed. // In that case, the conflicts unit remains active, while the original // hello.service remains inactive. Then the test TestScheduleOneWayConflict // fails at the end with a message "Incorrect unit started". // This error seems to occur frequently when enable_grpc turned on. // - dpark 20160615 time.Sleep(1 * time.Second) // Wait for the destroyed unit to actually disappear timeout, err := util.WaitForState( func() bool { stdout, _, err := cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine") if err != nil { return false } lines := strings.Split(strings.TrimSpace(stdout), "\n") states := util.ParseUnitStates(lines) for _, state := range states { if state.Name == name { return false } } return true }, ) if err != nil { t.Fatalf("Destroyed unit %s not gone within %v", name, timeout) } active, err = cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } states, err = util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } for unit := range states { if unit != "hello.service" { t.Error("Incorrect unit started:", unit) } } }
// TestScheduleReplace starts 1 unit, followed by starting another unit // that replaces the 1st unit. Then it verifies that the 2 units are // started on different machines. func TestScheduleReplace(t *testing.T) { cluster, err := platform.NewNspawnCluster("smoke") if err != nil { t.Fatal(err) } defer cluster.Destroy(t) // Start with a simple three-node cluster members, err := platform.CreateNClusterMembers(cluster, 2) if err != nil { t.Fatal(err) } m0 := members[0] if _, err := cluster.WaitForNMachines(m0, 2); err != nil { t.Fatal(err) } // Start a unit without Replaces uNames := []string{ "fixtures/units/replace.0.service", "fixtures/units/replace.1.service", } if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", uNames[0]); err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", uNames[0], stdout, stderr, err) } active, err := cluster.WaitForNActiveUnits(m0, 1) if err != nil { t.Fatal(err) } _, err = util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } // Start a unit that replaces the former one, replace.0.service if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", uNames[1]); err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", uNames[1], stdout, stderr, err) } // Check that both units should show up stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { t.Fatalf("Failed to run list-unit-files: %v", err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 2 { t.Fatalf("Did not find two units in cluster: \n%s", stdout) } active, err = cluster.WaitForNActiveUnits(m0, 2) if err != nil { t.Fatal(err) } states, err := util.ActiveToSingleStates(active) if err != nil { t.Fatal(err) } // Check that the unit 1 is located on a different machine from that of unit 0 nUnits := 2 uNameBase := make([]string, nUnits) machs := make([]string, nUnits) for i, uName := range uNames { uNameBase[i] = path.Base(uName) machs[i] = states[uNameBase[i]].Machine } if machs[0] == machs[1] { t.Fatalf("machine for %s is %s, the same as that of %s.", uNameBase[0], machs[0], uNameBase[1]) } // Check that circular replaces end up with 1 launched unit. // First of all, stop the existing unit replace.0.service. if stdout, stderr, err := cluster.Fleetctl(m0, "destroy", uNameBase[0]); err != nil { t.Fatalf("Failed to destroy unit %s: \nstdout: %s\nstderr: %s\nerr: %v", uNameBase[0], stdout, stderr, err) } // Generate a new service 0 derived by a fixture, make the new service // replace service 1, and store it under /tmp. uName0tmp := path.Join("/tmp", uNameBase[0]) err = util.GenNewFleetService(uName0tmp, uNames[1], "Replaces=replace.1.service", "Replaces=replace.0.service") if err != nil { t.Fatalf("Failed to generate a temp fleet service: %v", err) } // Start replace.0 unit that replaces replace.1.service, // then fleetctl list-unit-files should show only return 1 launched unit. // Note that we still need to run list-units once, before doing // list-unit-files, for reliable tests. stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", uName0tmp) if err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", uName0tmp, stdout, stderr, err) } stdout, _, err = cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { t.Fatalf("Failed to run list-unit-files: %v", err) } units = strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != nUnits { t.Fatalf("Did not find two units in cluster: \n%s", stdout) } _, err = cluster.WaitForNActiveUnits(m0, nUnits) if err != nil { t.Fatal(err) } ufs, err := cluster.WaitForNUnitFiles(m0, nUnits) if err != nil { t.Fatalf("Failed to run list-unit-files: %v", err) } uStates := make([][]util.UnitFileState, nUnits) var found bool for i, unb := range uNameBase { uStates[i], found = ufs[unb] if len(ufs) != nUnits || !found { t.Fatalf("Did not find %d launched unit as expected: got %d\n", nUnits, len(ufs)) } } nLaunched := 0 for _, us := range uStates { for _, state := range us { if strings.Contains(state.State, "launched") { nLaunched += 1 } } } if nLaunched != 1 { t.Fatalf("Did not find 1 launched unit as expected: got %d", nLaunched) } os.Remove(uName0tmp) }