Exemple #1
0
func (nc *nspawnCluster) WaitForNActiveUnits(m Member, count int) (map[string][]util.UnitState, error) {
	var nactive int
	states := make(map[string][]util.UnitState)

	timeout := 15 * time.Second
	alarm := time.After(timeout)

	ticker := time.Tick(250 * time.Millisecond)
loop:
	for {
		select {
		case <-alarm:
			return nil, fmt.Errorf("failed to find %d active units within %v (last found: %d)", count, timeout, nactive)
		case <-ticker:
			stdout, _, err := nc.Fleetctl(m, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
			stdout = strings.TrimSpace(stdout)
			if err != nil {
				continue
			}

			lines := strings.Split(stdout, "\n")
			allStates := util.ParseUnitStates(lines)
			active := util.FilterActiveUnits(allStates)
			nactive = len(active)
			if nactive != count {
				continue
			}

			for _, state := range active {
				name := state.Name
				if _, ok := states[name]; !ok {
					states[name] = []util.UnitState{}
				}
				states[name] = append(states[name], state)
			}
			break loop
		}
	}

	return states, nil
}
Exemple #2
0
// WaitForNUnits runs fleetctl list-units to verify the actual number of units
// matched with the given expected number. It periodically runs list-units
// waiting until list-units actually shows the expected units.
func (nc *nspawnCluster) WaitForNUnits(m Member, expectedUnits int) (map[string][]util.UnitState, error) {
	var nUnits int
	retStates := make(map[string][]util.UnitState)
	checkListUnits := func() bool {
		outListUnits, _, err := nc.Fleetctl(m, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
		if err != nil {
			return false
		}
		// NOTE: There's no need to check if outListUnits is expected to be empty,
		// because ParseUnitStates() implicitly filters out such cases.
		// However, in case of ParseUnitStates() going away, we should not
		// forget about such special cases.
		units := strings.Split(strings.TrimSpace(outListUnits), "\n")
		allStates := util.ParseUnitStates(units)
		nUnits = len(allStates)
		if nUnits != expectedUnits {
			return false
		}

		for _, state := range allStates {
			name := state.Name
			if _, ok := retStates[name]; !ok {
				retStates[name] = []util.UnitState{}
			}
			retStates[name] = append(retStates[name], state)
		}
		return true
	}

	timeout, err := util.WaitForState(checkListUnits)
	if err != nil {
		return nil, fmt.Errorf("failed to find %d units within %v (last found: %d)",
			expectedUnits, timeout, nUnits)
	}

	return retStates, nil
}
Exemple #3
0
func (nc *nspawnCluster) WaitForNActiveUnits(m Member, count int) (map[string][]util.UnitState, error) {
	var nactive int
	states := make(map[string][]util.UnitState)

	timeout, err := util.WaitForState(
		func() bool {
			stdout, _, err := nc.Fleetctl(m, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
			stdout = strings.TrimSpace(stdout)
			if err != nil {
				return false
			}

			lines := strings.Split(stdout, "\n")
			allStates := util.ParseUnitStates(lines)
			active := util.FilterActiveUnits(allStates)
			nactive = len(active)
			if nactive != count {
				return false
			}

			for _, state := range active {
				name := state.Name
				if _, ok := states[name]; !ok {
					states[name] = []util.UnitState{}
				}
				states[name] = append(states[name], state)
			}
			return true
		},
	)
	if err != nil {
		return nil, fmt.Errorf("failed to find %d active units within %v (last found: %d)", count, timeout, nactive)
	}

	return states, nil
}
Exemple #4
0
func TestScheduleOneWayConflict(t *testing.T) {
	cluster, err := platform.NewNspawnCluster("smoke")
	if err != nil {
		t.Fatal(err)
	}
	defer cluster.Destroy(t)

	// Start with a simple three-node cluster
	members, err := platform.CreateNClusterMembers(cluster, 1)
	if err != nil {
		t.Fatal(err)
	}
	m0 := members[0]
	if _, err := cluster.WaitForNMachines(m0, 1); err != nil {
		t.Fatal(err)
	}

	// Start a unit that conflicts with a yet-to-be-scheduled unit
	name := "fixtures/units/conflicts-with-hello.service"
	if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil {
		t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err)
	}

	active, err := cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err := util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}

	// Start a unit that has not defined conflicts
	name = "fixtures/units/hello.service"
	if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil {
		t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err)
	}

	// Both units should show up, but only conflicts-with-hello.service
	// should report ACTIVE
	stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend")
	if err != nil {
		t.Fatalf("Failed to run list-unit-files: %v", err)
	}
	units := strings.Split(strings.TrimSpace(stdout), "\n")
	if len(units) != 2 {
		t.Fatalf("Did not find two units in cluster: \n%s", stdout)
	}
	active, err = cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err = util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}

	for unit := range states {
		if unit != "conflicts-with-hello.service" {
			t.Error("Incorrect unit started:", unit)
		}
	}

	// Destroying the conflicting unit should allow the other to start
	name = "conflicts-with-hello.service"
	if _, _, err := cluster.Fleetctl(m0, "destroy", name); err != nil {
		t.Fatalf("Failed destroying %s", name)
	}

	// NOTE: we need to sleep here shortly to avoid occasional errors of
	// conflicts-with-hello.service being rescheduled even after being destroyed.
	// In that case, the conflicts unit remains active, while the original
	// hello.service remains inactive. Then the test TestScheduleOneWayConflict
	// fails at the end with a message "Incorrect unit started".
	// This error seems to occur frequently when enable_grpc turned on.
	// - dpark 20160615
	time.Sleep(1 * time.Second)

	// Wait for the destroyed unit to actually disappear
	timeout, err := util.WaitForState(
		func() bool {
			stdout, _, err := cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
			if err != nil {
				return false
			}
			lines := strings.Split(strings.TrimSpace(stdout), "\n")
			states := util.ParseUnitStates(lines)
			for _, state := range states {
				if state.Name == name {
					return false
				}
			}
			return true
		},
	)
	if err != nil {
		t.Fatalf("Destroyed unit %s not gone within %v", name, timeout)
	}

	active, err = cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err = util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}
	for unit := range states {
		if unit != "hello.service" {
			t.Error("Incorrect unit started:", unit)
		}
	}

}
Exemple #5
0
// Start three machines and test template units based on machines Metadata
func TestTemplatesWithSpecifiersInMetadata(t *testing.T) {
	cluster, err := platform.NewNspawnCluster("smoke")
	if err != nil {
		t.Fatal(err)
	}
	defer cluster.Destroy(t)

	members, err := platform.CreateNClusterMembers(cluster, 3)
	if err != nil {
		t.Fatal(err)
	}
	m0 := members[0]
	_, err = cluster.WaitForNMachines(m0, 3)
	if err != nil {
		t.Fatal(err)
	}

	// Submit one template
	if stdout, stderr, err := cluster.Fleetctl(m0, "submit", "fixtures/units/[email protected]"); err != nil {
		t.Fatalf("Unable to submit [email protected] template: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err)
	}

	// Start units based on template in backward order
	for i := len(members) - 1; i >= 0; i-- {
		if stdout, stderr, err := cluster.Fleetctl(m0, "start", fmt.Sprintf("fixtures/units/metadata@smoke%s.service", members[i].ID())); err != nil {
			t.Fatalf("Unable to start template based unit: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err)
		}
	}

	_, err = cluster.WaitForNActiveUnits(m0, 3)
	if err != nil {
		t.Fatal(err)
	}

	stdout, stderr, err := cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
	if err != nil {
		t.Fatalf("Unable to get submitted units: \nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err)
	}

	ndesired := 3
	stdout = strings.TrimSpace(stdout)
	lines := strings.Split(stdout, "\n")
	allStates := util.ParseUnitStates(lines)
	active := util.FilterActiveUnits(allStates)
	nactive := len(active)
	if nactive != ndesired {
		t.Fatalf("Failed to get %d active units: \nstdout: %s\nstderr: %s", ndesired, stdout, stderr)
	}

	for _, state := range active {
		re := regexp.MustCompile(`@([^.]*)`)
		desiredMachine := re.FindStringSubmatch(state.Name)
		if len(desiredMachine) < 2 {
			t.Fatalf("Cannot parse state.Name (%v): \nstdout: %s\nstderr: %s", state.Name, stdout, stderr)
		}
		currentMachine := fmt.Sprintf("smoke%s", state.Machine)
		if desiredMachine[1] != currentMachine {
			t.Fatalf("Template (%s) has been scheduled on wrong machine (%s): \nstdout: %s\nstderr: %s", state.Name, currentMachine, stdout, stderr)
		}
	}

	if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--block-attempts=20", "fixtures/units/[email protected]"); err == nil {
		t.Fatalf("metadata@invalid unit should not be scheduled: \nstdout: %s\nstderr: %s", stdout, stderr)
	}
}
Exemple #6
0
func TestScheduleOneWayConflict(t *testing.T) {
	cluster, err := platform.NewNspawnCluster("smoke")
	if err != nil {
		t.Fatal(err)
	}
	defer cluster.Destroy(t)

	// Start with a simple three-node cluster
	members, err := platform.CreateNClusterMembers(cluster, 1)
	if err != nil {
		t.Fatal(err)
	}
	m0 := members[0]
	if _, err := cluster.WaitForNMachines(m0, 1); err != nil {
		t.Fatal(err)
	}

	// Start a unit that conflicts with a yet-to-be-scheduled unit
	name := "fixtures/units/conflicts-with-hello.service"
	if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil {
		t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err)
	}

	active, err := cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err := util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}

	// Start a unit that has not defined conflicts
	name = "fixtures/units/hello.service"
	if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", name); err != nil {
		t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err)
	}

	// Both units should show up, but only conflicts-with-hello.service
	// should report ACTIVE
	stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend")
	if err != nil {
		t.Fatalf("Failed to run list-unit-files: %v", err)
	}
	units := strings.Split(strings.TrimSpace(stdout), "\n")
	if len(units) != 2 {
		t.Fatalf("Did not find two units in cluster: \n%s", stdout)
	}
	active, err = cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err = util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}

	for unit := range states {
		if unit != "conflicts-with-hello.service" {
			t.Error("Incorrect unit started:", unit)
		}
	}

	// Destroying the conflicting unit should allow the other to start
	name = "conflicts-with-hello.service"
	if _, _, err := cluster.Fleetctl(m0, "destroy", name); err != nil {
		t.Fatalf("Failed destroying %s", name)
	}

	// Wait for the destroyed unit to actually disappear
	timeout, err := util.WaitForState(
		func() bool {
			stdout, _, err := cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine")
			if err != nil {
				return false
			}
			lines := strings.Split(strings.TrimSpace(stdout), "\n")
			states := util.ParseUnitStates(lines)
			for _, state := range states {
				if state.Name == name {
					return false
				}
			}
			return true
		},
	)
	if err != nil {
		t.Fatalf("Destroyed unit %s not gone within %v", name, timeout)
	}

	active, err = cluster.WaitForNActiveUnits(m0, 1)
	if err != nil {
		t.Fatal(err)
	}
	states, err = util.ActiveToSingleStates(active)
	if err != nil {
		t.Fatal(err)
	}
	for unit := range states {
		if unit != "hello.service" {
			t.Error("Incorrect unit started:", unit)
		}
	}

}