Exemplo n.º 1
0
// Call out to the embedded CloudManager to spawn hosts.  Takes in a map of
// distro -> number of hosts to spawn for the distro.
// Returns a map of distro -> hosts spawned, and an error if one occurs.
func (s *Scheduler) spawnHosts(newHostsNeeded map[string]int) (
	map[string][]host.Host, error) {

	// loop over the distros, spawning up the appropriate number of hosts
	// for each distro
	hostsSpawnedPerDistro := make(map[string][]host.Host)
	for distroId, numHostsToSpawn := range newHostsNeeded {

		if numHostsToSpawn == 0 {
			continue
		}

		hostsSpawnedPerDistro[distroId] = make([]host.Host, 0, numHostsToSpawn)
		for i := 0; i < numHostsToSpawn; i++ {
			d, err := distro.FindOne(distro.ById(distroId))
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "Failed to find distro '%v': %v", distroId, err)
			}

			allDistroHosts, err := host.Find(host.ByDistroId(distroId))
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "Error getting hosts for distro %v: %v", distroId, err)
				continue
			}

			if len(allDistroHosts) >= d.PoolSize {
				evergreen.Logger.Logf(slogger.ERROR, "Already at max (%v) hosts for distro '%v'",
					distroId,
					d.PoolSize)
				continue
			}

			cloudManager, err := providers.GetCloudManager(d.Provider, s.Settings)
			if err != nil {
				evergreen.Logger.Errorf(slogger.ERROR, "Error getting cloud manager for distro: %v", err)
				continue
			}

			hostOptions := cloud.HostOptions{
				UserName: evergreen.User,
				UserHost: false,
			}
			newHost, err := cloudManager.SpawnInstance(d, hostOptions)
			if err != nil {
				evergreen.Logger.Errorf(slogger.ERROR, "Error spawning instance: %v,",
					err)
				continue
			}
			hostsSpawnedPerDistro[distroId] =
				append(hostsSpawnedPerDistro[distroId], *newHost)

		}
		// if none were spawned successfully
		if len(hostsSpawnedPerDistro[distroId]) == 0 {
			delete(hostsSpawnedPerDistro, distroId)
		}
	}
	return hostsSpawnedPerDistro, nil
}
Exemplo n.º 2
0
// flagExcessHosts is a hostFlaggingFunc to get all hosts that push their
// distros over the specified max hosts
func flagExcessHosts(distros []distro.Distro, s *evergreen.Settings) ([]host.Host, error) {
	// will ultimately contain all the hosts that can be terminated
	excessHosts := []host.Host{}

	// figure out the excess hosts for each distro
	for _, d := range distros {

		// fetch any hosts for the distro that count towards max hosts
		allHostsForDistro, err := host.Find(host.ByDistroId(d.Id))
		if err != nil {
			return nil, fmt.Errorf("error fetching hosts for distro %v: %v", d.Id, err)
		}

		// if there are more than the specified max hosts, then terminate
		// some, if they are not running tasks
		numExcessHosts := len(allHostsForDistro) - d.PoolSize
		if numExcessHosts > 0 {

			// track how many hosts for the distro are terminated
			counter := 0

			for _, host := range allHostsForDistro {

				// if the host is not dynamically spun up (and can
				// thus be terminated), skip it
				canTerminate, err := hostCanBeTerminated(host, s)
				if err != nil {
					return nil, fmt.Errorf("error checking if host %v can be terminated: %v", host.Id, err)
				}
				if !canTerminate {
					continue
				}

				// if the host is not running a task, it can be
				// safely terminated
				if host.RunningTask == "" {
					excessHosts = append(excessHosts, host)
					counter++
				}

				// break if we've marked enough to be terminated
				if counter == numExcessHosts {
					break
				}
			}

			evergreen.Logger.Logf(slogger.INFO, "Found %v excess hosts for distro %v", counter, d.Id)

		}

	}
	return excessHosts, nil
}