예제 #1
0
파일: setup.go 프로젝트: tychoish/evergreen
// setupHost runs the specified setup script for an individual host. Returns
// the output from running the script remotely, as well as any error that
// occurs. If the script exits with a non-zero exit code, the error will be non-nil.
func (init *HostInit) setupHost(targetHost *host.Host) (string, error) {

	// fetch the appropriate cloud provider for the host
	cloudMgr, err := providers.GetCloudManager(targetHost.Provider, init.Settings)
	if err != nil {
		return "",
			fmt.Errorf("failed to get cloud manager for host %v with provider %v: %v",
				targetHost.Id, targetHost.Provider, err)
	}

	// mark the host as initializing
	if err := targetHost.SetInitializing(); err != nil {
		if err == mgo.ErrNotFound {
			return "", ErrHostAlreadyInitializing
		} else {
			return "", fmt.Errorf("database error: %v", err)
		}
	}

	/* TESTING ONLY
	setupDebugSSHTunnel(path_to_ssh_key, targetHost.User, targetHost.Host)
	*/

	// run the function scheduled for when the host is up
	err = cloudMgr.OnUp(targetHost)
	if err != nil {
		// if this fails it is probably due to an API hiccup, so we keep going.
		evergreen.Logger.Logf(slogger.WARN, "OnUp callback failed for host '%v': '%v'", targetHost.Id, err)
	}
	cloudHost, err := providers.GetCloudHost(targetHost, init.Settings)
	if err != nil {
		return "", fmt.Errorf("failed to get cloud host for %v: %v", targetHost.Id, err)
	}
	sshOptions, err := cloudHost.GetSSHOptions()
	if err != nil {
		return "", fmt.Errorf("error getting ssh options for host %v: %v", targetHost.Id, err)
	}

	if targetHost.Distro.Teardown != "" {
		err = init.copyScript(targetHost, teardownScriptName, targetHost.Distro.Teardown)
		if err != nil {
			return "", fmt.Errorf("error copying script %v to host %v: %v",
				teardownScriptName, targetHost.Id, err)
		}
	}

	if targetHost.Distro.Setup != "" {
		err = init.copyScript(targetHost, setupScriptName, targetHost.Distro.Setup)
		if err != nil {
			return "", fmt.Errorf("error copying script %v to host %v: %v",
				setupScriptName, targetHost.Id, err)
		}
		logs, err := hostutil.RunRemoteScript(targetHost, setupScriptName, sshOptions)
		if err != nil {
			return logs, fmt.Errorf("error running setup script over ssh: %v", err)
		}
		return logs, nil
	}
	return "", nil
}
예제 #2
0
// Call out to the embedded CloudManager to spawn hosts.  Takes in a map of
// distro -> number of hosts to spawn for the distro.
// Returns a map of distro -> hosts spawned, and an error if one occurs.
func (s *Scheduler) spawnHosts(newHostsNeeded map[string]int) (
	map[string][]host.Host, error) {

	// loop over the distros, spawning up the appropriate number of hosts
	// for each distro
	hostsSpawnedPerDistro := make(map[string][]host.Host)
	for distroId, numHostsToSpawn := range newHostsNeeded {

		if numHostsToSpawn == 0 {
			continue
		}

		hostsSpawnedPerDistro[distroId] = make([]host.Host, 0, numHostsToSpawn)
		for i := 0; i < numHostsToSpawn; i++ {
			d, err := distro.FindOne(distro.ById(distroId))
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "Failed to find distro '%v': %v", distroId, err)
			}

			allDistroHosts, err := host.Find(host.ByDistroId(distroId))
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "Error getting hosts for distro %v: %v", distroId, err)
				continue
			}

			if len(allDistroHosts) >= d.PoolSize {
				evergreen.Logger.Logf(slogger.ERROR, "Already at max (%v) hosts for distro '%v'",
					distroId,
					d.PoolSize)
				continue
			}

			cloudManager, err := providers.GetCloudManager(d.Provider, s.Settings)
			if err != nil {
				evergreen.Logger.Errorf(slogger.ERROR, "Error getting cloud manager for distro: %v", err)
				continue
			}

			hostOptions := cloud.HostOptions{
				UserName: evergreen.User,
				UserHost: false,
			}
			newHost, err := cloudManager.SpawnInstance(d, hostOptions)
			if err != nil {
				evergreen.Logger.Errorf(slogger.ERROR, "Error spawning instance: %v,",
					err)
				continue
			}
			hostsSpawnedPerDistro[distroId] =
				append(hostsSpawnedPerDistro[distroId], *newHost)

		}
		// if none were spawned successfully
		if len(hostsSpawnedPerDistro[distroId]) == 0 {
			delete(hostsSpawnedPerDistro, distroId)
		}
	}
	return hostsSpawnedPerDistro, nil
}
예제 #3
0
// IsHostReady returns whether or not the specified host is ready for its setup script
// to be run.
func (init *HostInit) IsHostReady(host *host.Host) (bool, error) {

	// fetch the appropriate cloud provider for the host
	cloudMgr, err := providers.GetCloudManager(host.Distro.Provider, init.Settings)
	if err != nil {
		return false,
			fmt.Errorf("failed to get cloud manager for provider %v: %v", host.Distro.Provider, err)
	}

	// ask for the instance's status
	hostStatus, err := cloudMgr.GetInstanceStatus(host)
	if err != nil {
		return false, fmt.Errorf("error checking instance status of host %v: %v", host.Id, err)
	}

	// if the host isn't up yet, we can't do anything
	if hostStatus != cloud.StatusRunning {
		return false, nil
	}

	// set the host's dns name, if it is not set
	if host.Host == "" {

		// get the DNS name for the host
		hostDNS, err := cloudMgr.GetDNSName(host)
		if err != nil {
			return false, fmt.Errorf("error checking DNS name for host %v: %v", host.Id, err)
		}

		// sanity check for the host DNS name
		if hostDNS == "" {
			return false, fmt.Errorf("instance %v is running but not returning a DNS name",
				host.Id)
		}

		// update the host's DNS name
		if err := host.SetDNSName(hostDNS); err != nil {
			return false, fmt.Errorf("error setting DNS name for host %v: %v", host.Id, err)
		}

	}

	// check if the host is reachable via SSH
	cloudHost, err := providers.GetCloudHost(host, init.Settings)
	if err != nil {
		return false, fmt.Errorf("failed to get cloud host for %v: %v", host.Id, err)
	}
	reachable, err := cloudHost.IsSSHReachable()
	if err != nil {
		return false, fmt.Errorf("error checking if host %v is reachable: %v", host.Id, err)
	}

	// at this point, we can run the setup if the host is reachable
	return reachable, nil
}
예제 #4
0
// flagIdleHosts is a hostFlaggingFunc to get all hosts which have spent too
// long without running a task
func flagIdleHosts(d []distro.Distro, s *evergreen.Settings) ([]host.Host, error) {
	// will ultimately contain all of the hosts determined to be idle
	idleHosts := []host.Host{}

	// fetch all hosts not currently running a task
	freeHosts, err := host.Find(host.IsFree)
	if err != nil {
		return nil, fmt.Errorf("error finding free hosts: %v", err)
	}

	// go through the hosts, and see if they have idled long enough to
	// be terminated
	for _, host := range freeHosts {

		// ask the host how long it has been idle
		idleTime := host.IdleTime()

		// get a cloud manager for the host
		cloudManager, err := providers.GetCloudManager(host.Provider, s)
		if err != nil {
			return nil, fmt.Errorf("error getting cloud manager for host %v: %v", host.Id, err)
		}

		// if the host is not dynamically spun up (and can thus be terminated),
		// skip it
		canTerminate, err := hostCanBeTerminated(host, s)
		if err != nil {
			return nil, fmt.Errorf("error checking if host %v can be terminated: %v", host.Id, err)
		}
		if !canTerminate {
			continue
		}

		// ask how long until the next payment for the host
		tilNextPayment := cloudManager.TimeTilNextPayment(&host)

		// current determinants for idle:
		//  idle for at least 15 minutes and
		//  less than 5 minutes til next payment
		if idleTime >= 15*time.Minute && tilNextPayment <= 5*time.Minute {
			idleHosts = append(idleHosts, host)
		}
	}

	return idleHosts, nil
}
예제 #5
0
// helper to check if a host can be terminated
func hostCanBeTerminated(h host.Host, s *evergreen.Settings) (bool, error) {
	// get a cloud manager for the host
	cloudManager, err := providers.GetCloudManager(h.Provider, s)
	if err != nil {
		return false, fmt.Errorf("error getting cloud manager for host %v: %v", h.Id, err)
	}

	// if the host is not part of a spawnable distro, then it was not
	// dynamically spun up and as such cannot be terminated
	canSpawn, err := cloudManager.CanSpawn()
	if err != nil {
		return false, fmt.Errorf("error checking if cloud manager for host %v supports spawning: %v", h.Id, err)
	}

	return canSpawn, nil

}
// numNewHostsForDistro determine how many new hosts should be spun up for an
// individual distro
func (self *DeficitBasedHostAllocator) numNewHostsForDistro(
	hostAllocatorData *HostAllocatorData, distro distro.Distro, settings *evergreen.Settings) int {

	cloudManager, err := providers.GetCloudManager(distro.Provider, settings)

	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Couldn't get cloud manager for distro %v with provider %v: %v",
			distro.Id, distro.Provider, err)
		return 0
	}

	can, err := cloudManager.CanSpawn()
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Couldn't check if cloud provider %v is spawnable: %v",
			distro.Provider, err)
		return 0
	}
	if !can {
		return 0
	}

	existingDistroHosts := hostAllocatorData.existingDistroHosts[distro.Id]
	runnableDistroTasks := hostAllocatorData.
		taskQueueItems[distro.Id]

	freeHosts := make([]host.Host, 0, len(existingDistroHosts))
	for _, existingDistroHost := range existingDistroHosts {
		if existingDistroHost.RunningTask == "" {
			freeHosts = append(freeHosts, existingDistroHost)
		}
	}

	numNewHosts := util.Min(
		// the deficit of available hosts vs. tasks to be run
		len(runnableDistroTasks)-len(freeHosts),
		// the maximum number of new hosts we're allowed to spin up
		distro.PoolSize-len(existingDistroHosts),
	)

	// cap to zero as lower bound
	if numNewHosts < 0 {
		numNewHosts = 0
	}
	return numNewHosts
}
예제 #7
0
파일: api.go 프로젝트: tychoish/evergreen
// updateTaskCost determines a task's cost based on the host it ran on. Hosts that
// are unable to calculate their own costs will not set a task's Cost field. Errors
// are logged but not returned, since any number of API failures could happen and
// we shouldn't sacrifice a task's status for them.
func (as *APIServer) updateTaskCost(t *task.Task, h *host.Host, finishTime time.Time) {
	manager, err := providers.GetCloudManager(h.Provider, &as.Settings)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR,
			"Error loading provider for host %v cost calculation: %v ", t.HostId, err)
		return
	}
	if calc, ok := manager.(cloud.CloudCostCalculator); ok {
		evergreen.Logger.Logf(slogger.INFO, "Calculating cost for task %v", t.Id)
		cost, err := calc.CostForDuration(h, t.StartTime, finishTime)
		if err != nil {
			evergreen.Logger.Logf(slogger.ERROR,
				"Error calculating cost for task %v: %v ", t.Id, err)
			return
		}
		if err := t.SetCost(cost); err != nil {
			evergreen.Logger.Logf(slogger.ERROR,
				"Error updating cost for task %v: %v ", t.Id, err)
			return
		}
	}
}
// numNewHostsForDistro determine how many new hosts should be spun up for an
// individual distro.
func (self *DurationBasedHostAllocator) numNewHostsForDistro(
	hostAllocatorData *HostAllocatorData, distro distro.Distro,
	tasksAccountedFor map[string]bool,
	distroScheduleData map[string]DistroScheduleData, settings *evergreen.Settings) (numNewHosts int,
	err error) {

	projectTaskDurations := hostAllocatorData.projectTaskDurations
	existingDistroHosts := hostAllocatorData.existingDistroHosts[distro.Id]
	taskQueueItems := hostAllocatorData.taskQueueItems[distro.Id]
	taskRunDistros := hostAllocatorData.taskRunDistros

	// determine how many free hosts we have
	numFreeHosts := 0
	for _, existingDistroHost := range existingDistroHosts {
		if existingDistroHost.RunningTask == "" {
			numFreeHosts += 1
		}
	}

	// determine the total remaining running time of all
	// tasks currently running on the hosts for this distro
	runningTasksDuration, err := computeRunningTasksDuration(
		existingDistroHosts, projectTaskDurations)

	if err != nil {
		return numNewHosts, err
	}

	// construct the data needed by computeScheduledTasksDuration
	scheduledDistroTasksData := &ScheduledDistroTasksData{
		taskQueueItems:    taskQueueItems,
		tasksAccountedFor: tasksAccountedFor,
		taskRunDistros:    taskRunDistros,
		currentDistroId:   distro.Id,
	}

	// determine the total expected running time of all scheduled
	// tasks for this distro
	scheduledTasksDuration, sharedTasksDuration :=
		computeScheduledTasksDuration(scheduledDistroTasksData)

	// find the number of new hosts needed based on the total estimated
	// duration for all outstanding and in-flight tasks for this distro
	durationBasedNumNewHosts := computeDurationBasedNumNewHosts(
		scheduledTasksDuration, runningTasksDuration,
		float64(len(existingDistroHosts)), MaxDurationPerDistroHost)

	// revise the new host estimate based on the cap of the number of new hosts
	// and the number of free hosts
	numNewHosts = numNewDistroHosts(distro.PoolSize, len(existingDistroHosts),
		numFreeHosts, durationBasedNumNewHosts, len(taskQueueItems))

	// create an entry for this distro in the scheduling map
	distroScheduleData[distro.Id] = DistroScheduleData{
		nominalNumNewHosts:   numNewHosts,
		numFreeHosts:         numFreeHosts,
		poolSize:             distro.PoolSize,
		taskQueueLength:      len(taskQueueItems),
		sharedTasksDuration:  sharedTasksDuration,
		runningTasksDuration: runningTasksDuration,
		numExistingHosts:     len(existingDistroHosts),
		totalTasksDuration:   scheduledTasksDuration + runningTasksDuration,
	}

	cloudManager, err := providers.GetCloudManager(distro.Provider, settings)
	if err != nil {
		return 0, evergreen.Logger.Errorf(slogger.ERROR, "Couldn't get cloud manager for %v (%v): %v",
			distro.Provider, distro.Id, err)
	}

	can, err := cloudManager.CanSpawn()
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR,
			"Error checking if '%v' provider can spawn hosts: %v", distro.Provider, err)
		return 0, nil
	}
	if !can {
		return 0, nil
	}

	// revise the nominal number of new hosts if needed
	numNewHosts = orderedScheduleNumNewHosts(distroScheduleData, distro.Id,
		MaxDurationPerDistroHost, SharedTasksAllocationProportion)

	evergreen.Logger.Logf(slogger.INFO, "Spawning %v additional hosts for %v - "+
		"currently at %v existing hosts (%v free)", numNewHosts, distro.Id,
		len(existingDistroHosts), numFreeHosts)

	evergreen.Logger.Logf(slogger.INFO, "Total estimated time to process all '%v' "+
		"scheduled tasks is %v; %v running tasks at %v, %v pending tasks at "+
		"%v (shared tasks duration map: %v)",
		distro.Id,
		time.Duration(scheduledTasksDuration+runningTasksDuration)*time.Second,
		len(existingDistroHosts)-numFreeHosts,
		time.Duration(runningTasksDuration)*time.Second,
		len(taskQueueItems),
		time.Duration(scheduledTasksDuration)*time.Second,
		sharedTasksDuration)

	return numNewHosts, nil
}
예제 #9
0
// setupHost runs the specified setup script for an individual host. Returns
// the output from running the script remotely, as well as any error that
// occurs. If the script exits with a non-zero exit code, the error will be non-nil.
func (init *HostInit) setupHost(targetHost *host.Host) ([]byte, error) {

	// fetch the appropriate cloud provider for the host
	cloudMgr, err := providers.GetCloudManager(targetHost.Provider, init.Settings)
	if err != nil {
		return nil,
			fmt.Errorf("failed to get cloud manager for host %v with provider %v: %v",
				targetHost.Id, targetHost.Provider, err)
	}

	// mark the host as initializing
	if err := targetHost.SetInitializing(); err != nil {
		if err == mgo.ErrNotFound {
			return nil, ErrHostAlreadyInitializing
		} else {
			return nil, fmt.Errorf("database error: %v", err)
		}
	}

	// run the function scheduled for when the host is up
	err = cloudMgr.OnUp(targetHost)
	if err != nil {
		// if this fails it is probably due to an API hiccup, so we keep going.
		evergreen.Logger.Logf(slogger.WARN, "OnUp callback failed for host '%v': '%v'", targetHost.Id, err)
	}

	// run the remote setup script as sudo, if appropriate
	sudoStr := ""
	if targetHost.Distro.SetupAsSudo {
		sudoStr = "sudo "
	}

	// parse the hostname into the user, host and port
	hostInfo, err := util.ParseSSHInfo(targetHost.Host)
	if err != nil {
		return nil, err
	}
	user := targetHost.Distro.User
	if hostInfo.User != "" {
		user = hostInfo.User
	}

	// create a temp file for the setup script
	fileName := "setup.sh"
	file, err := ioutil.TempFile("", fileName)
	if err != nil {
		return nil, fmt.Errorf("error creating setup script: %v", err)
	}
	defer func() {
		file.Close()
		os.Remove(file.Name())
	}()

	// build the setup script
	setup, err := init.buildSetupScript(targetHost)
	if err != nil {
		return nil, fmt.Errorf("error building setup script for host %v: %v", targetHost.Id, err)
	}

	// write the setup script to the file
	if _, err := file.Write([]byte(setup)); err != nil {
		return nil, fmt.Errorf("error writing remote setup script: %v", err)
	}

	cloudHost, err := providers.GetCloudHost(targetHost, init.Settings)
	if err != nil {
		return nil, fmt.Errorf("Failed to get cloud host for %v: %v", targetHost.Id, err)
	}
	sshOptions, err := cloudHost.GetSSHOptions()
	if err != nil {
		return nil, fmt.Errorf("Error getting ssh options for host %v: %v", targetHost.Id, err)
	}

	// copy setup script over to the remote machine
	var scpSetupCmdStderr bytes.Buffer
	scpSetupCmd := &command.ScpCommand{
		Source:         file.Name(),
		Dest:           fileName,
		Stdout:         &scpSetupCmdStderr,
		Stderr:         &scpSetupCmdStderr,
		RemoteHostName: hostInfo.Hostname,
		User:           user,
		Options:        append([]string{"-P", hostInfo.Port}, sshOptions...),
	}

	// run the command to scp the setup script with a timeout
	err = util.RunFunctionWithTimeout(
		scpSetupCmd.Run,
		SCPTimeout,
	)
	if err != nil {
		if err == util.ErrTimedOut {
			scpSetupCmd.Stop()
			return nil, fmt.Errorf("scp-ing setup script timed out")
		}
		return nil, fmt.Errorf("error (%v) copying setup script to remote "+
			"machine: %v", err, scpSetupCmdStderr.String())
	}

	// run command to ssh into remote machine and execute setup script
	var sshSetupCmdStderr bytes.Buffer
	runSetupCmd := &command.RemoteCommand{
		CmdString:      sudoStr + "sh " + fileName,
		Stdout:         &sshSetupCmdStderr,
		Stderr:         &sshSetupCmdStderr,
		RemoteHostName: hostInfo.Hostname,
		User:           user,
		Options:        []string{"-p", hostInfo.Port},
		Background:     false,
	}

	// only force creation of a tty if sudo
	if targetHost.Distro.SetupAsSudo {
		runSetupCmd.Options = []string{"-t", "-t", "-p", hostInfo.Port}
	}
	runSetupCmd.Options = append(runSetupCmd.Options, sshOptions...)

	// run the ssh command with given timeout
	err = util.RunFunctionWithTimeout(
		runSetupCmd.Run,
		time.Duration(SSHTimeoutSeconds)*time.Second,
	)

	return sshSetupCmdStderr.Bytes(), err
}
예제 #10
0
// CreateHost spawns a host with the given options.
func (sm Spawn) CreateHost(so Options) (*host.Host, error) {

	// load in the appropriate distro
	d, err := distro.FindOne(distro.ById(so.Distro))
	if err != nil {
		return nil, err
	}

	// get the appropriate cloud manager
	cloudManager, err := providers.GetCloudManager(d.Provider, sm.settings)
	if err != nil {
		return nil, err
	}

	// spawn the host
	h, err := cloudManager.SpawnInstance(d, so.UserName, true)
	if err != nil {
		return nil, err
	}

	// set the expiration time for the host
	expireTime := h.CreationTime.Add(DefaultExpiration)
	err = h.SetExpirationTime(expireTime)
	if err != nil {
		return h, evergreen.Logger.Errorf(slogger.ERROR,
			"error setting expiration on host %v: %v", h.Id, err)
	}

	// set the user data, if applicable
	if so.UserData != "" {
		err = h.SetUserData(so.UserData)
		if err != nil {
			return h, evergreen.Logger.Errorf(slogger.ERROR,
				"Failed setting userData on host %v: %v", h.Id, err)
		}
	}

	// create a hostinit to take care of setting up the host
	init := &hostinit.HostInit{
		Settings: sm.settings,
	}

	// for making sure the host doesn't take too long to spawn
	startTime := time.Now()

	// spin until the host is ready for its setup script to be run
	for {

		// make sure we haven't been spinning for too long
		if time.Now().Sub(startTime) > 15*time.Minute {
			if err := h.SetDecommissioned(); err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "error decommissioning host %v: %v", h.Id, err)
			}
			return nil, fmt.Errorf("host took too long to come up")
		}

		time.Sleep(5000 * time.Millisecond)

		evergreen.Logger.Logf(slogger.INFO, "Checking if host %v is up and ready", h.Id)

		// see if the host is ready for its setup script to be run
		ready, err := init.IsHostReady(h)
		if err != nil {
			if err := h.SetDecommissioned(); err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "error decommissioning host %v: %v", h.Id, err)
			}
			return nil, fmt.Errorf("error checking on host %v; decommissioning to save resources: %v",
				h.Id, err)
		}

		// if the host is ready, move on to running the setup script
		if ready {
			break
		}

	}

	evergreen.Logger.Logf(slogger.INFO, "Host %v is ready for its setup script to be run", h.Id)

	// add any extra user-specified data into the setup script
	if h.Distro.UserData.File != "" {
		userDataCmd := fmt.Sprintf("echo \"%v\" > %v\n",
			strings.Replace(so.UserData, "\"", "\\\"", -1), h.Distro.UserData.File)
		// prepend the setup script to add the userdata file
		if strings.HasPrefix(h.Distro.Setup, "#!") {
			firstLF := strings.Index(h.Distro.Setup, "\n")
			h.Distro.Setup = h.Distro.Setup[0:firstLF+1] + userDataCmd + h.Distro.Setup[firstLF+1:]
		} else {
			h.Distro.Setup = userDataCmd + h.Distro.Setup
		}
	}

	// modify the setup script to add the user's public key
	h.Distro.Setup += fmt.Sprintf("\necho \"\n%v\" >> ~%v/.ssh/authorized_keys\n",
		so.PublicKey, h.Distro.User)

	// replace expansions in the script
	exp := command.NewExpansions(init.Settings.Expansions)
	h.Distro.Setup, err = exp.ExpandString(h.Distro.Setup)
	if err != nil {
		return nil, fmt.Errorf("expansions error: %v", err)
	}

	// provision the host
	err = init.ProvisionHost(h)
	if err != nil {
		return nil, fmt.Errorf("error provisioning host %v: %v", h.Id, err)
	}

	return h, nil
}
예제 #11
0
func (as *APIServer) hostReady(w http.ResponseWriter, r *http.Request) {
	hostObj, err := getHostFromRequest(r)
	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, err.Error())
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	// if the host failed
	setupSuccess := mux.Vars(r)["status"]
	if setupSuccess == evergreen.HostStatusFailed {
		evergreen.Logger.Logf(slogger.INFO, "Initializing host %v failed", hostObj.Id)
		// send notification to the Evergreen team about this provisioning failure
		subject := fmt.Sprintf("%v Evergreen provisioning failure on %v", notify.ProvisionFailurePreface, hostObj.Distro.Id)

		hostLink := fmt.Sprintf("%v/host/%v", as.Settings.Ui.Url, hostObj.Id)
		message := fmt.Sprintf("Provisioning failed on %v host -- %v (%v). %v",
			hostObj.Distro.Id, hostObj.Id, hostObj.Host, hostLink)
		if err = notify.NotifyAdmins(subject, message, &as.Settings); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Error sending email: %v", err)
		}

		// get/store setup logs
		setupLog, err := ioutil.ReadAll(r.Body)
		if err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}

		event.LogProvisionFailed(hostObj.Id, string(setupLog))

		err = hostObj.SetUnprovisioned()
		if err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}

		as.WriteJSON(w, http.StatusOK, fmt.Sprintf("Initializing host %v failed", hostObj.Id))
		return
	}

	cloudManager, err := providers.GetCloudManager(hostObj.Provider, &as.Settings)
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		subject := fmt.Sprintf("%v Evergreen provisioning completion failure on %v",
			notify.ProvisionFailurePreface, hostObj.Distro.Id)
		message := fmt.Sprintf("Failed to get cloud manager for host %v with provider %v: %v",
			hostObj.Id, hostObj.Provider, err)
		if err = notify.NotifyAdmins(subject, message, &as.Settings); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Error sending email: %v", err)
		}
		return
	}

	dns, err := cloudManager.GetDNSName(hostObj)
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	// mark host as provisioned
	if err := hostObj.MarkAsProvisioned(); err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	evergreen.Logger.Logf(slogger.INFO, "Successfully marked host “%v” with dns “%v” as provisioned", hostObj.Id, dns)
}
예제 #12
0
// ensureHasRequiredFields check that the distro configuration has all the required fields
func ensureHasRequiredFields(d *distro.Distro, s *evergreen.Settings) []ValidationError {
	errs := []ValidationError{}

	if d.Id == "" {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.IdKey),
			Level:   Error,
		})
	}

	if d.Arch == "" {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.ArchKey),
			Level:   Error,
		})
	}

	if d.User == "" {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.UserKey),
			Level:   Error,
		})
	}

	if d.WorkDir == "" {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.WorkDirKey),
			Level:   Error,
		})
	}

	if d.SSHKey == "" && d.Provider != static.ProviderName {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.SSHKeyKey),
			Level:   Error,
		})
	}

	if d.Provider == "" {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' cannot be blank", distro.ProviderKey),
			Level:   Error,
		})
		return errs
	}

	mgr, err := providers.GetCloudManager(d.Provider, s)
	if err != nil {
		errs = append(errs, ValidationError{
			Message: err.Error(),
			Level:   Error,
		})
		return errs
	}

	settings := mgr.GetSettings()

	if err = mapstructure.Decode(d.ProviderSettings, settings); err != nil {
		errs = append(errs, ValidationError{
			Message: fmt.Sprintf("distro '%v' decode error: %v", distro.ProviderSettingsKey, err),
			Level:   Error,
		})
		return errs
	}

	if err := settings.Validate(); err != nil {
		errs = append(errs, ValidationError{Error, err.Error()})
	}

	return errs
}
예제 #13
0
파일: spawn.go 프로젝트: tychoish/evergreen
// CreateHost spawns a host with the given options.
func (sm Spawn) CreateHost(so Options, owner *user.DBUser) error {

	// load in the appropriate distro
	d, err := distro.FindOne(distro.ById(so.Distro))
	if err != nil {
		return err
	}
	// add any extra user-specified data into the setup script
	if d.UserData.File != "" {
		userDataCmd := fmt.Sprintf("echo \"%v\" > %v\n",
			strings.Replace(so.UserData, "\"", "\\\"", -1), d.UserData.File)
		// prepend the setup script to add the userdata file
		if strings.HasPrefix(d.Setup, "#!") {
			firstLF := strings.Index(d.Setup, "\n")
			d.Setup = d.Setup[0:firstLF+1] + userDataCmd + d.Setup[firstLF+1:]
		} else {
			d.Setup = userDataCmd + d.Setup
		}
	}

	// modify the setup script to add the user's public key
	d.Setup += fmt.Sprintf("\necho \"\n%v\" >> ~%v/.ssh/authorized_keys\n", so.PublicKey, d.User)

	// replace expansions in the script
	exp := command.NewExpansions(sm.settings.Expansions)
	d.Setup, err = exp.ExpandString(d.Setup)
	if err != nil {
		return fmt.Errorf("expansions error: %v", err)
	}

	// fake out replacing spot instances with on-demand equivalents
	if d.Provider == ec2.SpotProviderName {
		d.Provider = ec2.OnDemandProviderName
	}

	// get the appropriate cloud manager
	cloudManager, err := providers.GetCloudManager(d.Provider, sm.settings)
	if err != nil {
		return err
	}

	// spawn the host
	provisionOptions := &host.ProvisionOptions{
		LoadCLI: true,
		TaskId:  so.TaskId,
		OwnerId: owner.Id,
	}
	expiration := DefaultExpiration
	hostOptions := cloud.HostOptions{
		ProvisionOptions:   provisionOptions,
		UserName:           so.UserName,
		ExpirationDuration: &expiration,
		UserData:           so.UserData,
		UserHost:           true,
	}

	_, err = cloudManager.SpawnInstance(d, hostOptions)
	if err != nil {
		return err
	}

	return nil
}