Пример #1
0
func (uis *UIServer) hostPage(w http.ResponseWriter, r *http.Request) {
	projCtx := MustHaveProjectContext(r)

	vars := mux.Vars(r)
	id := vars["host_id"]

	h, err := host.FindOne(host.ById(id))
	if err != nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	if h == nil {
		http.Error(w, "Host not found", http.StatusNotFound)
		return
	}

	events, err := event.Find(event.MostRecentHostEvents(id, 50))
	if err != nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	flashes := PopFlashes(uis.CookieStore, r, w)
	uis.WriteHTML(w, http.StatusOK, struct {
		Flashes     []interface{}
		Events      []event.Event
		Host        *host.Host
		User        *user.DBUser
		ProjectData projectContext
	}{flashes, events, h, GetUser(r), projCtx},
		"base", "host.html", "base_angular.html", "menu.html")
}
Пример #2
0
// GetDistro loads the task's distro and sends it to the requester.
func (as *APIServer) GetDistro(w http.ResponseWriter, r *http.Request) {
	task := MustHaveTask(r)

	// Get the distro for this task
	h, err := host.FindOne(host.ByRunningTaskId(task.Id))
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	// Fall back to checking host field on task doc
	if h == nil && len(task.HostId) > 0 {
		h, err = host.FindOne(host.ById(task.HostId))
		if err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		h.SetRunningTask(task.Id, h.AgentRevision, h.TaskDispatchTime)
	}

	if h == nil {
		message := fmt.Errorf("No host found running task %v", task.Id)
		as.LoggedError(w, r, http.StatusInternalServerError, message)
		return
	}

	// agent can't properly unmarshal provider settings map
	h.Distro.ProviderSettings = nil
	as.WriteJSON(w, http.StatusOK, h.Distro)
}
Пример #3
0
func (as *APIServer) StartTask(w http.ResponseWriter, r *http.Request) {
	task := MustHaveTask(r)

	if !getGlobalLock(r.RemoteAddr, task.Id) {
		as.LoggedError(w, r, http.StatusInternalServerError, ErrLockTimeout)
		return
	}
	defer releaseGlobalLock(r.RemoteAddr, task.Id)

	evergreen.Logger.Logf(slogger.INFO, "Marking task started: %v", task.Id)

	taskStartInfo := &apimodels.TaskStartRequest{}
	if err := util.ReadJSONInto(r.Body, taskStartInfo); err != nil {
		http.Error(w, fmt.Sprintf("Error reading task start request for %v: %v", task.Id, err), http.StatusBadRequest)
		return
	}

	if err := task.MarkStart(); err != nil {
		message := fmt.Errorf("Error marking task '%v' started: %v", task.Id, err)
		as.LoggedError(w, r, http.StatusInternalServerError, message)
		return
	}

	h, err := host.FindOne(host.ByRunningTaskId(task.Id))
	if err != nil {
		message := fmt.Errorf("Error finding host running task %v: %v", task.Id, err)
		as.LoggedError(w, r, http.StatusInternalServerError, message)
		return
	}

	// Fall back to checking host field on task doc
	if h == nil && len(task.HostId) > 0 {
		evergreen.Logger.Logf(slogger.DEBUG, "Falling back to host field of task: %v", task.Id)
		h, err = host.FindOne(host.ById(task.HostId))
		if err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		h.SetRunningTask(task.Id, h.AgentRevision, h.TaskDispatchTime)
	}

	if h == nil {
		message := fmt.Errorf("No host found running task %v", task.Id)
		as.LoggedError(w, r, http.StatusInternalServerError, message)
		return
	}

	if err := h.SetTaskPid(taskStartInfo.Pid); err != nil {
		message := fmt.Errorf("Error calling set pid on task %v : %v", task.Id, err)
		as.LoggedError(w, r, http.StatusInternalServerError, message)
		return
	}
	as.WriteJSON(w, http.StatusOK, fmt.Sprintf("Task %v started on host %v", task.Id, h.Id))
}
Пример #4
0
func getHostData(hostId string) (*uiHost, error) {
	hostAsUI := &uiHost{}
	dbHost, err := host.FindOne(host.ById(hostId))
	if err != nil {
		return nil, err
	}
	if dbHost == nil {
		return nil, fmt.Errorf("Could not find host")
	}
	hostAsUI.Host = *dbHost
	return hostAsUI, nil
}
Пример #5
0
// Get the host with the id specified in the request
func getHostFromRequest(r *http.Request) (*host.Host, error) {
	// get id and secret from the request.
	vars := mux.Vars(r)
	tag := vars["tag"]
	if len(tag) == 0 {
		return nil, fmt.Errorf("no host tag supplied")
	}
	// find the host
	host, err := host.FindOne(host.ById(tag))
	if host == nil {
		return nil, fmt.Errorf("no host with tag: %v", tag)
	}
	if err != nil {
		return nil, err
	}
	return host, nil
}
Пример #6
0
// returns info on the host specified
func (as *APIServer) hostInfo(w http.ResponseWriter, r *http.Request) {
	vars := mux.Vars(r)
	instanceId := vars["instance_id"]

	host, err := host.FindOne(host.ById(instanceId))
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	if host == nil {
		http.Error(w, "not found", http.StatusNotFound)
		return
	}

	as.WriteJSON(w, http.StatusOK, spawnResponse{HostInfo: *host})
}
Пример #7
0
func (as *APIServer) modifyHost(w http.ResponseWriter, r *http.Request) {
	vars := mux.Vars(r)
	instanceId := vars["instance_id"]
	hostAction := r.FormValue("action")

	host, err := host.FindOne(host.ById(instanceId))
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}
	if host == nil {
		http.Error(w, "not found", http.StatusNotFound)
		return
	}

	user := GetUser(r)
	if user == nil || user.Id != host.StartedBy {
		message := fmt.Sprintf("Only %v is authorized to terminate this host", host.StartedBy)
		http.Error(w, message, http.StatusUnauthorized)
		return
	}

	switch hostAction {
	case "terminate":
		if host.Status == evergreen.HostTerminated {
			message := fmt.Sprintf("Host %v is already terminated", host.Id)
			http.Error(w, message, http.StatusBadRequest)
			return
		}

		cloudHost, err := providers.GetCloudHost(host, &as.Settings)
		if err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		if err = cloudHost.TerminateInstance(); err != nil {
			as.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Failed to terminate spawn host: %v", err))
			return
		}
		as.WriteJSON(w, http.StatusOK, spawnResponse{HostInfo: *host})
	default:
		http.Error(w, fmt.Sprintf("Unrecognized action %v", hostAction), http.StatusBadRequest)
	}

}
Пример #8
0
func (uis *UIServer) modifyHost(w http.ResponseWriter, r *http.Request) {
	_ = MustHaveUser(r)

	vars := mux.Vars(r)
	id := vars["host_id"]

	host, err := host.FindOne(host.ById(id))
	if err != nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	if host == nil {
		http.Error(w, "Host not found", http.StatusNotFound)
		return
	}

	opts := &uiParams{}
	err = util.ReadJSONInto(r.Body, opts)
	if err != nil {
		uis.LoggedError(w, r, http.StatusBadRequest, err)
		return
	}

	// determine what action needs to be taken
	switch opts.Action {
	case "updateStatus":
		currentStatus := host.Status
		newStatus := opts.Status
		if !util.SliceContains(validUpdateToStatuses, newStatus) {
			http.Error(w, fmt.Sprintf("'%v' is not a valid status", newStatus), http.StatusBadRequest)
			return
		}
		err := host.SetStatus(newStatus)
		if err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error updating host: %v", err))
			return
		}
		msg := NewSuccessFlash(fmt.Sprintf("Host status successfully updated from '%v' to '%v'", currentStatus, host.Status))
		PushFlash(uis.CookieStore, r, w, msg)
		uis.WriteJSON(w, http.StatusOK, "Successfully updated host status")
	default:
		uis.WriteJSON(w, http.StatusBadRequest, fmt.Sprintf("Unrecognized action: %v", opts.Action))
	}
}
Пример #9
0
// function to clean up a single task
func cleanUpTask(wrapper doomedTaskWrapper, projects map[string]model.Project) error {

	// find the appropriate project for the task
	project, ok := projects[wrapper.task.Project]
	if !ok {
		return fmt.Errorf("could not find project %v for task %v",
			wrapper.task.Project, wrapper.task.Id)
	}

	// get the host for the task
	host, err := host.FindOne(host.ById(wrapper.task.HostId))
	if err != nil {
		return fmt.Errorf("error finding host %v for task %v: %v",
			wrapper.task.HostId, wrapper.task.Id, err)
	}

	// if there's no relevant host, something went wrong
	if host == nil {
		evergreen.Logger.Logf(slogger.ERROR, "no entry found for host %v", wrapper.task.HostId)
		return wrapper.task.MarkUnscheduled()
	}

	// sanity check that the host is actually running the task
	if host.RunningTask != wrapper.task.Id {
		return fmt.Errorf("task %v says it is running on host %v, but the"+
			" host thinks it is running task %v", wrapper.task.Id, host.Id,
			host.RunningTask)
	}

	// take different action, depending on the type of task death
	switch wrapper.reason {
	case HeartbeatTimeout:
		err = cleanUpTimedOutHeartbeat(wrapper.task, project, host)
	default:
		return fmt.Errorf("unknown reason for cleaning up task: %v", wrapper.reason)
	}

	if err != nil {
		return fmt.Errorf("error cleaning up task %v: %v", wrapper.task.Id, err)
	}

	return nil

}
Пример #10
0
func TestTaskEndEndpoint(t *testing.T) {
	setupTlsConfigs(t)
	for tlsString, tlsConfig := range tlsConfigs {
		testTask, _, err := setupAPITestData(testConfig, "random", "linux-64", NoPatch, t)
		testutil.HandleTestingErr(err, t, "Couldn't make test data: %v", err)

		Convey("With a live api server, agent, and test task over "+tlsString, t, func() {
			testServer, err := apiserver.CreateTestServer(testConfig, tlsConfig, plugin.APIPlugins, Verbose)
			testutil.HandleTestingErr(err, t, "Couldn't create apiserver: %v", err)
			testAgent, err := createAgent(testServer, testTask)
			testutil.HandleTestingErr(err, t, "failed to create agent: %v")
			testAgent.heartbeater.Interval = 10 * time.Second
			testAgent.StartBackgroundActions(&NoopSignalHandler{})

			Convey("calling end() should update task's/host's status properly "+
				"and start running the next task", func() {
				subsequentTaskId := testTask.Id + "Two"
				details := &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}
				taskEndResp, err := testAgent.End(details)
				time.Sleep(1 * time.Second)
				So(err, ShouldBeNil)

				taskUpdate, err := model.FindTask(testTask.Id)
				So(err, ShouldBeNil)
				So(taskUpdate.Status, ShouldEqual, evergreen.TaskSucceeded)

				testHost, err := host.FindOne(host.ById(testTask.HostId))
				So(err, ShouldBeNil)
				So(testHost.RunningTask, ShouldEqual, subsequentTaskId)

				taskUpdate, err = model.FindTask(subsequentTaskId)
				So(err, ShouldBeNil)
				So(taskUpdate.Status, ShouldEqual, evergreen.TaskDispatched)

				So(taskEndResp, ShouldNotBeNil)
				So(taskEndResp.RunNext, ShouldBeTrue)
				So(taskEndResp.TaskId, ShouldEqual, subsequentTaskId)
			})
		})
	}
}
Пример #11
0
// taskFinished constructs the appropriate response for each markEnd
// request the API server receives from an agent. The two possible responses are:
// 1. Inform the agent of another task to run
// 2. Inform the agent that it should terminate immediately
// The first case is the usual expected flow. The second case however, could
// occur for a number of reasons including:
// a. The version of the agent running on the remote machine is stale
// b. The host the agent is running on has been decommissioned
// c. There is no currently queued dispatchable and activated task
// In any of these aforementioned cases, the agent in question should terminate
// immediately and cease running any tasks on its host.
func (as *APIServer) taskFinished(w http.ResponseWriter, task *model.Task, finishTime time.Time) {
	taskEndResponse := &apimodels.TaskEndResponse{}

	// a. fetch the host this task just completed on to see if it's
	// now decommissioned
	host, err := host.FindOne(host.ById(task.HostId))
	if err != nil {
		message := fmt.Sprintf("Error locating host for task %v - set to %v: %v", task.Id,
			task.HostId, err)
		evergreen.Logger.Logf(slogger.ERROR, message)
		taskEndResponse.Message = message
		as.WriteJSON(w, http.StatusInternalServerError, taskEndResponse)
		return
	}
	if host == nil {
		message := fmt.Sprintf("Error finding host running for task %v - set to %v", task.Id,
			task.HostId)
		evergreen.Logger.Logf(slogger.ERROR, message)
		taskEndResponse.Message = message
		as.WriteJSON(w, http.StatusInternalServerError, taskEndResponse)
		return
	}
	if host.Status == evergreen.HostDecommissioned || host.Status == evergreen.HostQuarantined {
		markHostRunningTaskFinished(host, task, "")
		message := fmt.Sprintf("Host %v - running %v - is in state '%v'. Agent will terminate",
			task.HostId, task.Id, host.Status)
		evergreen.Logger.Logf(slogger.INFO, message)
		taskEndResponse.Message = message
		as.WriteJSON(w, http.StatusOK, taskEndResponse)
		return
	}

	// b. check if the agent needs to be rebuilt
	taskRunnerInstance := taskrunner.NewTaskRunner(&as.Settings)
	agentRevision, err := taskRunnerInstance.HostGateway.GetAgentRevision()
	if err != nil {
		markHostRunningTaskFinished(host, task, "")
		evergreen.Logger.Logf(slogger.ERROR, "failed to get agent revision: %v", err)
		taskEndResponse.Message = err.Error()
		as.WriteJSON(w, http.StatusInternalServerError, taskEndResponse)
		return
	}
	if host.AgentRevision != agentRevision {
		markHostRunningTaskFinished(host, task, "")
		message := fmt.Sprintf("Remote agent needs to be rebuilt")
		evergreen.Logger.Logf(slogger.INFO, message)
		taskEndResponse.Message = message
		as.WriteJSON(w, http.StatusOK, taskEndResponse)
		return
	}

	// c. fetch the task's distro queue to dispatch the next pending task
	nextTask, err := getNextDistroTask(task, host)
	if err != nil {
		markHostRunningTaskFinished(host, task, "")
		evergreen.Logger.Logf(slogger.ERROR, err.Error())
		taskEndResponse.Message = err.Error()
		as.WriteJSON(w, http.StatusOK, taskEndResponse)
		return
	}
	if nextTask == nil {
		markHostRunningTaskFinished(host, task, "")
		taskEndResponse.Message = "No next task on queue"
	} else {
		taskEndResponse.Message = "Proceed with next task"
		taskEndResponse.RunNext = true
		taskEndResponse.TaskId = nextTask.Id
		taskEndResponse.TaskSecret = nextTask.Secret
		markHostRunningTaskFinished(host, task, nextTask.Id)
	}

	// give the agent the green light to keep churning
	as.WriteJSON(w, http.StatusOK, taskEndResponse)
}
Пример #12
0
func (uis *UIServer) taskPage(w http.ResponseWriter, r *http.Request) {
	projCtx := MustHaveProjectContext(r)

	if projCtx.Task == nil {
		http.Error(w, "Not found", http.StatusNotFound)
		return
	}

	if projCtx.Build == nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("build not found"))
		return
	}

	if projCtx.Version == nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("version not found"))
		return
	}

	if projCtx.ProjectRef == nil {
		evergreen.Logger.Logf(slogger.ERROR, "Project ref is nil")
		uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("version not found"))
		return
	}

	executionStr := mux.Vars(r)["execution"]
	archived := false
	if executionStr != "" {
		// otherwise we can look in either tasks or old_tasks
		// where tasks are looked up in the old_tasks collection with key made up of
		// the original key and the execution number joined by an "_"
		// and the tasks are looked up in the tasks collection by key and execution
		// number, so that we avoid finding the wrong execution in the tasks
		// collection
		execution, err := strconv.Atoi(executionStr)
		if err != nil {
			http.Error(w, fmt.Sprintf("Bad execution number: %v", executionStr), http.StatusBadRequest)
			return
		}
		oldTaskId := fmt.Sprintf("%v_%v", projCtx.Task.Id, executionStr)
		taskFromDb, err := model.FindOneOldTask(bson.M{"_id": oldTaskId}, db.NoProjection, db.NoSort)
		if err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		archived = true
		if taskFromDb == nil {
			// for backwards compatibility with tasks without an execution
			if execution == 0 {
				taskFromDb, err = model.FindOneTask(bson.M{
					"$and": []bson.M{
						bson.M{"_id": projCtx.Task.Id},
						bson.M{"$or": []bson.M{bson.M{"execution": 0}, bson.M{"execution": nil}}}}},
					db.NoProjection,
					db.NoSort)
			} else {
				taskFromDb, err = model.FindOneTask(bson.M{"_id": projCtx.Task.Id, "execution": execution},
					db.NoProjection, db.NoSort)
			}
			if err != nil {
				uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error finding old task: %v", err))
				return
			}
		}
		projCtx.Task = taskFromDb
	}

	// Build a struct containing the subset of task data needed for display in the UI

	task := uiTaskData{
		Id:                  projCtx.Task.Id,
		DisplayName:         projCtx.Task.DisplayName,
		Revision:            projCtx.Task.Revision,
		Status:              projCtx.Task.Status,
		TaskEndDetails:      projCtx.Task.Details,
		Distro:              projCtx.Task.DistroId,
		BuildVariant:        projCtx.Task.BuildVariant,
		BuildId:             projCtx.Task.BuildId,
		Activated:           projCtx.Task.Activated,
		Restarts:            projCtx.Task.Restarts,
		Execution:           projCtx.Task.Execution,
		Requester:           projCtx.Task.Requester,
		StartTime:           projCtx.Task.StartTime.UnixNano(),
		DispatchTime:        projCtx.Task.DispatchTime.UnixNano(),
		FinishTime:          projCtx.Task.FinishTime.UnixNano(),
		ExpectedDuration:    projCtx.Task.ExpectedDuration,
		PushTime:            projCtx.Task.PushTime,
		TimeTaken:           projCtx.Task.TimeTaken,
		Priority:            projCtx.Task.Priority,
		TestResults:         projCtx.Task.TestResults,
		Aborted:             projCtx.Task.Aborted,
		CurrentTime:         time.Now().UnixNano(),
		BuildVariantDisplay: projCtx.Build.DisplayName,
		Message:             projCtx.Version.Message,
		Project:             projCtx.Version.Identifier,
		Author:              projCtx.Version.Author,
		AuthorEmail:         projCtx.Version.AuthorEmail,
		VersionId:           projCtx.Version.Id,
		RepoOwner:           projCtx.ProjectRef.Owner,
		Repo:                projCtx.ProjectRef.Repo,
		Archived:            archived,
	}

	deps, taskWaiting, err := getTaskDependencies(projCtx.Task)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	task.DependsOn = deps
	task.TaskWaiting = taskWaiting

	// Activating and deactivating tasks should clear out the
	// MinQueuePos but just in case, lets not show it if we shouldn't
	if projCtx.Task.Status == evergreen.TaskUndispatched && projCtx.Task.Activated {
		task.MinQueuePos = projCtx.Task.MinQueuePos
	}

	if projCtx.Task.HostId != "" {
		task.HostDNS = projCtx.Task.HostId
		task.HostId = projCtx.Task.HostId
		taskHost, err := host.FindOne(host.ById(projCtx.Task.HostId))
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		if taskHost != nil {
			task.HostDNS = taskHost.Host
		}
	}

	if projCtx.Patch != nil {
		taskOnBaseCommit, err := projCtx.Task.FindTaskOnBaseCommit()
		if err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		taskPatch := &uiPatch{Patch: *projCtx.Patch}
		if taskOnBaseCommit != nil {
			taskPatch.BaseTaskId = taskOnBaseCommit.Id
		}
		taskPatch.StatusDiffs = model.StatusDiffTasks(taskOnBaseCommit, projCtx.Task).Tests
		task.PatchInfo = taskPatch
	}

	flashes := PopFlashes(uis.CookieStore, r, w)

	pluginContext := projCtx.ToPluginContext(uis.Settings, GetUser(r))
	pluginContent := getPluginDataAndHTML(uis, plugin.TaskPage, pluginContext)

	uis.WriteHTML(w, http.StatusOK, struct {
		ProjectData   projectContext
		User          *user.DBUser
		Flashes       []interface{}
		Task          uiTaskData
		PluginContent pluginData
	}{projCtx, GetUser(r), flashes, task, pluginContent}, "base",
		"task.html", "base_angular.html", "menu.html")
}
Пример #13
0
func startEC2Instance(ec2Handle *ec2.EC2, options *ec2.RunInstancesOptions,
	intentHost *host.Host) (*host.Host, *ec2.RunInstancesResp, error) {
	// start the instance
	resp, err := ec2Handle.RunInstances(options)

	if err != nil {
		// remove the intent host document
		rmErr := intentHost.Remove()
		if rmErr != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Could not remove intent host "+
				"“%v”: %v", intentHost.Id, rmErr)
		}
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR,
			"EC2 RunInstances API call returned error: %v", err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Spawned %v instance", len(resp.Instances))

	// the instance should have been successfully spawned
	instance := resp.Instances[0]
	evergreen.Logger.Logf(slogger.DEBUG, "Started %v", instance.InstanceId)
	evergreen.Logger.Logf(slogger.DEBUG, "Key name: %v", string(options.KeyName))

	// find old intent host
	host, err := host.FindOne(host.ById(intentHost.Id))
	if host == nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v”", intentHost.Id)
	}
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v” due to error: %v",
			intentHost.Id, err)
	}

	// we found the old document now we can insert the new one
	host.Id = instance.InstanceId
	err = host.Insert()
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert "+
			"updated host information for “%v” with “%v”: %v", intentHost.Id,
			host.Id, err)
	}

	// remove the intent host document
	err = intentHost.Remove()
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove "+
			"insert host “%v” (replaced by “%v”): %v", intentHost.Id, host.Id,
			err)
	}

	var infoResp *ec2.DescribeInstancesResp
	instanceInfoRetryCount := 0
	instanceInfoMaxRetries := 5
	for {
		infoResp, err = ec2Handle.DescribeInstances([]string{instance.InstanceId}, nil)
		if err != nil {
			instanceInfoRetryCount++
			if instanceInfoRetryCount == instanceInfoMaxRetries {
				evergreen.Logger.Errorf(slogger.ERROR, "There was an error querying for the "+
					"instance's information and retries are exhausted. The insance may "+
					"be up.")
				return nil, resp, err
			}

			evergreen.Logger.Errorf(slogger.DEBUG, "There was an error querying for the "+
				"instance's information. Retrying in 30 seconds. Error: %v", err)
			time.Sleep(30 * time.Second)
			continue
		}
		break
	}

	reservations := infoResp.Reservations
	if len(reservations) < 1 {
		return nil, resp, fmt.Errorf("Reservation was returned as nil, you " +
			"may have to check manually")
	}

	instancesInfo := reservations[0].Instances
	if len(instancesInfo) < 1 {
		return nil, resp, fmt.Errorf("Reservation appears to have no " +
			"associated instances")
	}
	return host, resp, nil
}
Пример #14
0
//SpawnInstance creates a new droplet for the given distro.
func (digoMgr *DigitalOceanManager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) {
	if d.Provider != ProviderName {
		return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", ProviderName, d.Id, d.Provider)
	}

	digoSettings := &Settings{}
	if err := mapstructure.Decode(d.ProviderSettings, digoSettings); err != nil {
		return nil, fmt.Errorf("Error decoding params for distro %v: %v", d.Id, err)
	}

	if err := digoSettings.Validate(); err != nil {
		return nil, fmt.Errorf("Invalid DigitalOcean settings in distro %v: %v", d.Id, err)
	}

	instanceName := "droplet-" +
		fmt.Sprintf("%v", rand.New(rand.NewSource(time.Now().UnixNano())).Int())
	intentHost := &host.Host{
		Id:               instanceName,
		User:             d.User,
		Distro:           *d,
		Tag:              instanceName,
		CreationTime:     time.Now(),
		Status:           evergreen.HostUninitialized,
		TerminationTime:  util.ZeroTime,
		TaskDispatchTime: util.ZeroTime,
		Provider:         ProviderName,
		StartedBy:        owner,
	}

	dropletReq := &digo.Droplet{
		SizeId:   digoSettings.SizeId,
		ImageId:  digoSettings.ImageId,
		RegionId: digoSettings.RegionId,
		Name:     instanceName,
		SshKey:   digoSettings.SSHKeyId,
	}

	if err := intentHost.Insert(); err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert intent "+
			"host '%v': %v", intentHost.Id, err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted intent host '%v' "+
		"for distro '%v' to signal cloud instance spawn intent", instanceName,
		d.Id)

	newDroplet, err := digoMgr.account.CreateDroplet(dropletReq)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "DigitalOcean create droplet API call failed"+
			" for intent host '%v': %v", intentHost.Id, err)

		// remove the intent host document
		rmErr := intentHost.Remove()
		if rmErr != nil {
			return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove intent host "+
				"'%v': %v", intentHost.Id, rmErr)
		}
		return nil, err
	}

	// find old intent host
	host, err := host.FindOne(host.ById(intentHost.Id))
	if host == nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v”", intentHost.Id)
	}
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed to look up intent host %v: %v",
			intentHost.Id, err)
	}

	host.Id = fmt.Sprintf("%v", newDroplet.Id)
	host.Host = newDroplet.IpAddress
	err = host.Insert()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed to insert new host %v"+
			"for intent host %v: %v", host.Id, intentHost.Id, err)
	}

	// remove the intent host document
	err = intentHost.Remove()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove "+
			"insert host “%v” (replaced by “%v”): %v", intentHost.Id, host.Id,
			err)
	}
	return host, nil

}
Пример #15
0
func TestWarnSlowProvisioningHosts(t *testing.T) {

	testConfig := evergreen.TestConfig()

	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(testConfig))

	Convey("When building warnings for hosts that are taking a long time to"+
		" provision", t, func() {

		// reset the db
		testutil.HandleTestingErr(db.ClearCollections(host.Collection),
			t, "error clearing hosts collection")

		Convey("hosts that have not hit the threshold should not trigger a"+
			" warning", func() {

			host1 := &host.Host{
				Id:           "h1",
				StartedBy:    evergreen.User,
				CreationTime: time.Now().Add(-10 * time.Minute),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := slowProvisioningWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

		Convey("hosts that have already triggered a notification should not"+
			" trigger another", func() {

			host1 := &host.Host{
				Id:           "h1",
				StartedBy:    evergreen.User,
				CreationTime: time.Now().Add(-1 * time.Hour),
				Notifications: map[string]bool{
					slowProvisioningWarning: true,
				},
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := slowProvisioningWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

		Convey("terminated hosts should not trigger a warning", func() {

			host1 := &host.Host{
				Id:           "h1",
				StartedBy:    evergreen.User,
				Status:       evergreen.HostTerminated,
				CreationTime: time.Now().Add(-1 * time.Hour),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := slowProvisioningWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

		Convey("hosts that are at the threshold and have not previously"+
			" triggered a warning should trigger one", func() {

			host1 := &host.Host{
				Id:           "h1",
				StartedBy:    evergreen.User,
				CreationTime: time.Now().Add(-1 * time.Hour),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := slowProvisioningWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 1)

			// make sure running the callback sets the notification key
			So(warnings[0].callback(warnings[0].host, warnings[0].threshold), ShouldBeNil)
			host1, err = host.FindOne(host.ById("h1"))
			So(err, ShouldBeNil)
			So(host1.Notifications[slowProvisioningWarning], ShouldBeTrue)

		})

	})
}
Пример #16
0
func TestWarnExpiringSpawnedHosts(t *testing.T) {

	testConfig := evergreen.TestConfig()

	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(testConfig))

	Convey("When building warnings for spawned hosts that will be expiring"+
		" soon", t, func() {

		// reset the db
		testutil.HandleTestingErr(db.ClearCollections(host.Collection),
			t, "error clearing hosts collection")

		Convey("any hosts not expiring within a threshold should not trigger"+
			" warnings", func() {

			// this host does not expire within the first notification
			// threshold
			host1 := &host.Host{
				Id:             "h1",
				ExpirationTime: time.Now().Add(time.Hour * 15),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := spawnHostExpirationWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

		Convey("any thresholds for which warnings have already been sent"+
			" should be ignored", func() {

			// this host meets the first notification warning threshold
			host1 := &host.Host{
				Id:             "h1",
				ExpirationTime: time.Now().Add(time.Hour * 10),
				Notifications: map[string]bool{
					"720": true, // the first threshold in minutes
				},
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := spawnHostExpirationWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

		Convey("the most recent threshold crossed should be used to create"+
			" the warning", func() {

			// this host meets both notification warning thresholds
			host1 := &host.Host{
				Id:             "h1",
				ExpirationTime: time.Now().Add(time.Minute * 10),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			warnings, err := spawnHostExpirationWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 1)

			// execute the callback, make sure the correct threshold is set
			So(warnings[0].callback(warnings[0].host, warnings[0].threshold), ShouldBeNil)
			host1, err = host.FindOne(host.ById("h1"))
			So(err, ShouldBeNil)
			So(host1.Notifications["120"], ShouldBeTrue)

		})

		Convey("hosts that are quarantined or have already expired should not"+
			" merit warnings", func() {

			// quarantined host
			host1 := &host.Host{
				Id:             "h1",
				Status:         evergreen.HostQuarantined,
				ExpirationTime: time.Now().Add(time.Minute * 10),
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			// terminated host
			host2 := &host.Host{
				Id:             "h2",
				Status:         evergreen.HostTerminated,
				ExpirationTime: time.Now().Add(time.Minute * 10),
			}
			testutil.HandleTestingErr(host2.Insert(), t, "error inserting host")

			// past the expiration. no warning needs to be sent since this host
			// is theoretically about to be terminated, at which time a
			// notification will be sent
			host3 := &host.Host{
				Id:             "h3",
				ExpirationTime: time.Now().Add(-time.Minute * 10),
			}
			testutil.HandleTestingErr(host3.Insert(), t, "error inserting host")

			warnings, err := spawnHostExpirationWarnings(testConfig)
			So(err, ShouldBeNil)
			So(len(warnings), ShouldEqual, 0)

		})

	})

}
Пример #17
0
func TestMonitorReachability(t *testing.T) {

	testConfig := evergreen.TestConfig()

	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(testConfig))

	Convey("When checking the reachability of hosts", t, func() {

		// reset the db
		testutil.HandleTestingErr(db.ClearCollections(host.Collection),
			t, "error clearing hosts collection")

		Convey("hosts that have been checked up on recently should"+
			" not be checked", func() {

			// mock provider hosts are always returned as reachable. therefore,
			// if this host were picked up by the monitoring check then it
			// would be updated to be reachable
			h := &host.Host{
				Id: "h1",
				LastReachabilityCheck: time.Now().Add(-time.Minute),
				Status:                evergreen.HostUnreachable,
				Provider:              mock.ProviderName,
			}
			testutil.HandleTestingErr(h.Insert(), t, "error inserting host")

			So(monitorReachability(nil), ShouldBeNil)

			// refresh the host - its status should not have been updated
			h, err := host.FindOne(host.ById("h1"))
			So(err, ShouldBeNil)
			So(h.Status, ShouldEqual, evergreen.HostUnreachable)

		})

		Convey("hosts eligible for a check should have their statuses"+
			" updated appropriately", func() {

			// this host should be picked up and updated to running
			host1 := &host.Host{
				Id: "h1",
				LastReachabilityCheck: time.Now().Add(-15 * time.Minute),
				Status:                evergreen.HostUnreachable,
				Provider:              mock.ProviderName,
			}
			testutil.HandleTestingErr(host1.Insert(), t, "error inserting host")

			// this host should not be picked up, since it is quarantined
			host2 := &host.Host{
				Id: "h2",
				LastReachabilityCheck: time.Now().Add(-15 * time.Minute),
				Status:                evergreen.HostQuarantined,
				Provider:              mock.ProviderName,
			}
			testutil.HandleTestingErr(host2.Insert(), t, "error inserting host")

			So(monitorReachability(nil), ShouldBeNil)

			// refresh the first host - its status should have been updated
			host1, err := host.FindOne(host.ById("h1"))
			So(err, ShouldBeNil)
			So(host1.Status, ShouldEqual, evergreen.HostUnreachable)

			// refresh the second host - its status should not have been updated
			host1, err = host.FindOne(host.ById("h2"))
			So(err, ShouldBeNil)
			So(host1.Status, ShouldEqual, evergreen.HostQuarantined)

		})

	})

}
Пример #18
0
// loadAlertContext fetches details from the database for all documents that are associated with the
// AlertRequest. For example, it populates the task/build/version/project using the
// task/build/version/project ids in the alert request document.
func (qp *QueueProcessor) loadAlertContext(a *alert.AlertRequest) (*AlertContext, error) {
	aCtx := &AlertContext{AlertRequest: a}
	aCtx.Settings = qp.config
	taskId, projectId, buildId, versionId := a.TaskId, a.ProjectId, a.BuildId, a.VersionId
	patchId := a.PatchId
	var err error
	if len(a.HostId) > 0 {
		aCtx.Host, err = host.FindOne(host.ById(a.HostId))
		if err != nil {
			return nil, err
		}
	}
	// Fetch task if there's a task ID present; if we find one, populate build/version IDs from it
	if len(taskId) > 0 {
		aCtx.Task, err = model.FindTask(taskId)
		if err != nil {
			return nil, err
		}
		if aCtx.Task != nil && aCtx.Task.Execution != a.Execution {
			oldTaskId := fmt.Sprintf("%s_%v", taskId, a.Execution)
			aCtx.Task, err = model.FindOneOldTask(bson.M{"_id": oldTaskId}, db.NoProjection, db.NoSort)
			if err != nil {
				return nil, err
			}
		}

		if aCtx.Task != nil {
			// override build and version ID with the ones this task belongs to
			buildId = aCtx.Task.BuildId
			versionId = aCtx.Task.Version
			projectId = aCtx.Task.Project
			aCtx.FailedTests = []model.TestResult{}
			for _, test := range aCtx.Task.TestResults {
				if test.Status == "fail" {
					aCtx.FailedTests = append(aCtx.FailedTests, test)
				}
			}
		}
	}

	// Fetch build if there's a build ID present; if we find one, populate version ID from it
	if len(buildId) > 0 {
		aCtx.Build, err = build.FindOne(build.ById(buildId))
		if err != nil {
			return nil, err
		}
		if aCtx.Build != nil {
			versionId = aCtx.Build.Version
			projectId = aCtx.Build.Project
		}
	}
	if len(versionId) > 0 {
		aCtx.Version, err = version.FindOne(version.ById(versionId))
		if err != nil {
			return nil, err
		}
		if aCtx.Version != nil {
			projectId = aCtx.Version.Identifier
		}
	}

	if len(patchId) > 0 {
		if !patch.IsValidId(patchId) {
			return nil, fmt.Errorf("patch id '%v' is not an object id", patchId)
		}
		aCtx.Patch, err = patch.FindOne(patch.ById(patch.NewId(patchId)).Project(patch.ExcludePatchDiff))
	} else if aCtx.Version != nil {
		// patch isn't in URL but the version in context has one, get it
		aCtx.Patch, err = patch.FindOne(patch.ByVersion(aCtx.Version.Id).Project(patch.ExcludePatchDiff))
	}

	// If there's a finalized patch loaded into context but not a version, load the version
	// associated with the patch as the context's version.
	if aCtx.Version == nil && aCtx.Patch != nil && aCtx.Patch.Version != "" {
		aCtx.Version, err = version.FindOne(version.ById(aCtx.Patch.Version).WithoutFields(version.ConfigKey))
		if err != nil {
			return nil, err
		}
	}

	if len(projectId) > 0 {
		aCtx.ProjectRef, err = qp.findProject(projectId)
		if err != nil {
			return nil, err
		}
	}
	return aCtx, nil
}
Пример #19
0
func (uis *UIServer) modifySpawnHost(w http.ResponseWriter, r *http.Request) {
	_ = MustHaveUser(r)
	updateParams := struct {
		Action   string `json:"action"`
		HostId   string `json:"host_id"`
		RDPPwd   string `json:"rdp_pwd"`
		AddHours string `json:"add_hours"`
	}{}

	if err := util.ReadJSONInto(r.Body, &updateParams); err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
	}

	hostId := updateParams.HostId
	host, err := host.FindOne(host.ById(hostId))
	if err != nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("error finding host with id %v: %v", hostId, err))
		return
	}
	if host == nil {
		uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("No host with id %v found", hostId))
		return
	}
	// determine what action needs to be taken
	switch updateParams.Action {
	case HostTerminate:
		if host.Status == evergreen.HostTerminated {
			uis.WriteJSON(w, http.StatusBadRequest, fmt.Sprintf("Host %v is already terminated", host.Id))
			return
		}
		cloudHost, err := providers.GetCloudHost(host, &uis.Settings)
		if err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		if err = cloudHost.TerminateInstance(); err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		uis.WriteJSON(w, http.StatusOK, "host terminated")
		return
	case HostPasswordUpdate:
		pwdUpdateCmd, err := constructPwdUpdateCommand(&uis.Settings, host, updateParams.RDPPwd)
		if err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error constructing host RDP password: %v", err))
			return
		}
		// update RDP and sshd password
		if err = pwdUpdateCmd.Run(); err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error updating host RDP password: %v", err))
			return
		}
		PushFlash(uis.CookieStore, r, w, NewSuccessFlash("Host RDP password successfully updated."))
		uis.WriteJSON(w, http.StatusOK, "Successfully updated host password")
	case HostExpirationExtension:
		addtHours, err := strconv.Atoi(updateParams.AddHours)
		if err != nil {
			http.Error(w, "bad hours param", http.StatusBadRequest)
			return
		}
		// ensure this request is valid
		addtHourDuration := time.Duration(addtHours) * time.Hour
		futureExpiration := host.ExpirationTime.Add(addtHourDuration)
		expirationExtensionDuration := futureExpiration.Sub(time.Now()).Hours()
		if expirationExtensionDuration > MaxExpirationDurationHours {
			http.Error(w, fmt.Sprintf("Can not extend %v expiration by %v hours. "+
				"Maximum extension is limited to %v hours", hostId,
				int(expirationExtensionDuration), MaxExpirationDurationHours), http.StatusBadRequest)
			return

		}
		if err = host.SetExpirationTime(futureExpiration); err != nil {
			uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error extending host expiration time: %v", err))
			return
		}
		PushFlash(uis.CookieStore, r, w, NewSuccessFlash(fmt.Sprintf("Host expiration "+
			"extension successful; %v will expire on %v", hostId,
			futureExpiration.Format(time.RFC850))))
		uis.WriteJSON(w, http.StatusOK, "Successfully extended host expiration time")
		return
	default:
		http.Error(w, fmt.Sprintf("Unrecognized action: %v", updateParams.Action), http.StatusBadRequest)
		return
	}
}
Пример #20
0
func (cloudManager *EC2SpotManager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) {
	if d.Provider != SpotProviderName {
		return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", SpotProviderName, d.Id, d.Provider)
	}
	ec2Handle := getUSEast(*cloudManager.awsCredentials)

	//Decode and validate the ProviderSettings into the ec2-specific ones.
	ec2Settings := &EC2SpotSettings{}
	if err := mapstructure.Decode(d.ProviderSettings, ec2Settings); err != nil {
		return nil, fmt.Errorf("Error decoding params for distro %v: %v", d.Id, err)
	}

	if err := ec2Settings.Validate(); err != nil {
		return nil, fmt.Errorf("Invalid EC2 spot settings in distro %v: %v", d.Id, err)
	}

	blockDevices, err := makeBlockDeviceMappings(ec2Settings.MountPoints)
	if err != nil {
		return nil, err
	}

	instanceName := generateName(d.Id)
	intentHost := &host.Host{
		Id:               instanceName,
		User:             d.User,
		Distro:           *d,
		Tag:              instanceName,
		CreationTime:     time.Now(),
		Status:           evergreen.HostUninitialized,
		TerminationTime:  model.ZeroTime,
		TaskDispatchTime: model.ZeroTime,
		Provider:         SpotProviderName,
		InstanceType:     ec2Settings.InstanceType,
		RunningTask:      "",
		Provisioned:      false,
		StartedBy:        owner,
		UserHost:         userHost,
	}

	// record this 'intent host'
	if err := intentHost.Insert(); err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert intent "+
			"host “%v”: %v", intentHost.Id, err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted intent host “%v” "+
		"for distro “%v” to signal cloud instance spawn intent", instanceName,
		d.Id)

	spotRequest := &ec2.RequestSpotInstances{
		SpotPrice:      fmt.Sprintf("%v", ec2Settings.BidPrice),
		InstanceCount:  1,
		ImageId:        ec2Settings.AMI,
		KeyName:        ec2Settings.KeyName,
		InstanceType:   ec2Settings.InstanceType,
		SecurityGroups: ec2.SecurityGroupNames(ec2Settings.SecurityGroup),
		BlockDevices:   blockDevices,
	}

	spotResp, err := ec2Handle.RequestSpotInstances(spotRequest)
	if err != nil {
		//Remove the intent host if the API call failed
		if err := intentHost.Remove(); err != nil {
			evergreen.Logger.Logf(slogger.ERROR, "Failed to remove intent host %v: %v", intentHost.Id, err)
		}
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed starting spot instance "+
			" for distro '%v' on intent host %v: %v", d.Id, intentHost.Id, err)
	}

	spotReqRes := spotResp.SpotRequestResults[0]
	if spotReqRes.State != SpotStatusOpen && spotReqRes.State != SpotStatusActive {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Spot request %v was found in "+
			" state %v on intent host %v", spotReqRes.SpotRequestId, spotReqRes.State, intentHost.Id)
	}

	intentHost.Id = spotReqRes.SpotRequestId
	err = intentHost.Insert()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert updated host info with id %v"+
			" for intent host %v: %v", intentHost.Id, instanceName, err)
	}

	//find the old intent host and remove it, since we now have the real
	//host doc successfully stored.
	oldIntenthost, err := host.FindOne(host.ById(instanceName))
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host '%v' due to error: %v",
			instanceName, err)
	}
	if oldIntenthost == nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host '%v'", instanceName)
	}

	err = oldIntenthost.Remove()
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Could not remove intent host "+
			"“%v”: %v", oldIntenthost.Id, err)
		return nil, err
	}

	// create some tags based on user, hostname, owner, time, etc.
	tags := makeTags(intentHost)

	// attach the tags to this instance
	err = attachTags(ec2Handle, tags, intentHost.Id)

	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Unable to attach tags for %v: %v",
			intentHost.Id, err)
	} else {
		evergreen.Logger.Logf(slogger.DEBUG, "Attached tag name “%v” for “%v”",
			instanceName, intentHost.Id)
	}
	return intentHost, nil
}
Пример #21
0
func (as *APIServer) spawnHostReady(w http.ResponseWriter, r *http.Request) {
	vars := mux.Vars(r)
	instanceId := vars["instance_id"]
	status := vars["status"]

	// mark the host itself as provisioned
	host, err := host.FindOne(host.ById(instanceId))
	if err != nil {
		as.LoggedError(w, r, http.StatusInternalServerError, err)
		return
	}

	if host == nil {
		http.Error(w, "host not found", http.StatusNotFound)
		return
	}

	if status == evergreen.HostStatusSuccess {
		if err := host.SetRunning(); err != nil {
			evergreen.Logger.Logf(slogger.ERROR, "Error marking host id %v as %v: %v", instanceId, evergreen.HostStatusSuccess, err)
		}
	} else {
		alerts.RunHostProvisionFailTriggers(host)
		if err = host.SetDecommissioned(); err != nil {
			evergreen.Logger.Logf(slogger.ERROR, "Error marking host %v for user %v as decommissioned: %v", host.Host, host.StartedBy, err)
		}
		evergreen.Logger.Logf(slogger.INFO, "Decommissioned %v for user %v because provisioning failed", host.Host, host.StartedBy)

		// send notification to the Evergreen team about this provisioning failure
		subject := fmt.Sprintf("%v Spawn provisioning failure on %v", notify.ProvisionFailurePreface, host.Distro.Id)
		message := fmt.Sprintf("Provisioning failed on %v host %v for user %v", host.Distro.Id, host.Host, host.StartedBy)
		if err = notify.NotifyAdmins(subject, message, &as.Settings); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Error sending email: %v", err)
		}

		// get/store setup logs
		setupLog, err := ioutil.ReadAll(r.Body)
		if err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, fmt.Sprintf("error reading request: %v", err))
			as.LoggedError(w, r, http.StatusInternalServerError, err)
			return
		}
		event.LogProvisionFailed(instanceId, string(setupLog))
	}

	message := fmt.Sprintf(`
		Host with id %v spawned.
		The host's dns name is %v.
		To ssh in: ssh -i <your private key> %v@%v`,
		host.Id, host.Host, host.User, host.Host)

	if status == evergreen.HostStatusFailed {
		message += fmt.Sprintf("\nUnfortunately, the host's setup script did not run fully - check the setup.log " +
			"file in the machine's home directory to see more details")
	}
	err = notify.TrySendNotificationToUser(host.StartedBy, "Your host is ready", message, notify.ConstructMailer(as.Settings.Notify))
	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error sending email: %v", err)
	}

	as.WriteJSON(w, http.StatusOK, spawnResponse{HostInfo: *host})
}
Пример #22
0
func TestCleanupTask(t *testing.T) {

	testConfig := evergreen.TestConfig()

	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(testConfig))

	Convey("When cleaning up a task", t, func() {

		// reset the db
		testutil.HandleTestingErr(db.ClearCollections(task.Collection),
			t, "error clearing tasks collection")
		testutil.HandleTestingErr(db.ClearCollections(host.Collection),
			t, "error clearing hosts collection")

		Convey("an error should be thrown if the passed-in projects slice"+
			" does not contain the task's project", func() {

			wrapper := doomedTaskWrapper{
				task: task.Task{
					Project: "proj",
				},
			}
			projects := map[string]model.Project{}
			err := cleanUpTask(wrapper, projects)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "could not find project")

		})

		Convey("an error should be thrown if the task's host is marked with"+
			" the wrong running task id", func() {

			wrapper := doomedTaskWrapper{
				task: task.Task{
					Id:      "t1",
					HostId:  "h1",
					Project: "proj",
				},
			}
			projects := map[string]model.Project{
				"proj": {
					Identifier: "proj",
				},
			}
			host := &host.Host{
				Id:          "h1",
				RunningTask: "nott1",
			}
			So(host.Insert(), ShouldBeNil)

			err := cleanUpTask(wrapper, projects)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "but the host thinks it"+
				" is running task")

		})

		Convey("if the task's heartbeat timed out", func() {

			// reset the db
			testutil.HandleTestingErr(db.ClearCollections(task.Collection),
				t, "error clearing tasks collection")
			testutil.HandleTestingErr(db.ClearCollections(host.Collection),
				t, "error clearing hosts collection")
			testutil.HandleTestingErr(db.ClearCollections(build.Collection),
				t, "error clearing builds collection")
			testutil.HandleTestingErr(db.ClearCollections(task.OldCollection),
				t, "error clearing old tasks collection")
			testutil.HandleTestingErr(db.ClearCollections(version.Collection),
				t, "error clearing versions collection")

			Convey("the task should be reset", func() {

				newTask := &task.Task{
					Id:       "t1",
					Status:   "started",
					HostId:   "h1",
					BuildId:  "b1",
					Project:  "proj",
					Restarts: 1,
				}
				testutil.HandleTestingErr(newTask.Insert(), t, "error inserting task")

				wrapper := doomedTaskWrapper{
					reason: HeartbeatTimeout,
					task:   *newTask,
				}

				projects := map[string]model.Project{
					"proj": {
						Identifier: "proj",
						Stepback:   false,
					},
				}

				host := &host.Host{
					Id:          "h1",
					RunningTask: "t1",
				}
				So(host.Insert(), ShouldBeNil)

				build := &build.Build{
					Id:      "b1",
					Tasks:   []build.TaskCache{{Id: "t1"}},
					Version: "v1",
				}
				So(build.Insert(), ShouldBeNil)

				v := &version.Version{
					Id: "v1",
				}
				So(v.Insert(), ShouldBeNil)

				// cleaning up the task should work
				So(cleanUpTask(wrapper, projects), ShouldBeNil)

				// refresh the task - it should be reset
				newTask, err := task.FindOne(task.ById("t1"))
				So(err, ShouldBeNil)
				So(newTask.Status, ShouldEqual, evergreen.TaskUndispatched)
				So(newTask.Restarts, ShouldEqual, 2)

			})

			Convey("the running task field on the task's host should be"+
				" reset", func() {

				newTask := &task.Task{
					Id:       "t1",
					Status:   "started",
					HostId:   "h1",
					BuildId:  "b1",
					Project:  "proj",
					Restarts: 1,
				}
				testutil.HandleTestingErr(newTask.Insert(), t, "error inserting task")

				wrapper := doomedTaskWrapper{
					reason: HeartbeatTimeout,
					task:   *newTask,
				}

				projects := map[string]model.Project{
					"proj": {
						Identifier: "proj",
						Stepback:   false,
					},
				}

				h := &host.Host{
					Id:          "h1",
					RunningTask: "t1",
				}
				So(h.Insert(), ShouldBeNil)

				build := &build.Build{
					Id:      "b1",
					Tasks:   []build.TaskCache{{Id: "t1"}},
					Version: "v1",
				}
				So(build.Insert(), ShouldBeNil)

				v := &version.Version{Id: "v1"}
				So(v.Insert(), ShouldBeNil)

				// cleaning up the task should work
				So(cleanUpTask(wrapper, projects), ShouldBeNil)

				// refresh the host, make sure its running task field has
				// been reset
				h, err := host.FindOne(host.ById("h1"))
				So(err, ShouldBeNil)
				So(h.RunningTask, ShouldEqual, "")

			})

		})

	})

}