func setupAPITestData(testConfig *evergreen.Settings, taskDisplayName string,
	variant string, patchMode patchTestMode, t *testing.T) (*model.Task, *build.Build, error) {
	//ignore errs here because the ns might just not exist.
	clearDataMsg := "Failed to clear test data collection"
	testCollections := []string{
		model.TasksCollection, build.Collection, host.Collection,
		distro.Collection, version.Collection, patch.Collection,
		model.PushlogCollection, model.ProjectVarsCollection, model.TaskQueuesCollection,
		manifest.Collection, model.ProjectRefCollection}
	testutil.HandleTestingErr(dbutil.ClearCollections(testCollections...), t, clearDataMsg)
	projectVars := &model.ProjectVars{
		Id: "evergreen-ci-render",
		Vars: map[string]string{
			"aws_key":    testConfig.Providers.AWS.Id,
			"aws_secret": testConfig.Providers.AWS.Secret,
			"fetch_key":  "fetch_expansion_value",
		},
	}
	_, err := projectVars.Upsert()
	testutil.HandleTestingErr(err, t, clearDataMsg)

	taskOne := &model.Task{
		Id:           "testTaskId",
		BuildId:      "testBuildId",
		DistroId:     "test-distro-one",
		BuildVariant: variant,
		Project:      "evergreen-ci-render",
		DisplayName:  taskDisplayName,
		HostId:       "testHost",
		Secret:       "testTaskSecret",
		Version:      "testVersionId",
		Status:       evergreen.TaskDispatched,
		Requester:    evergreen.RepotrackerVersionRequester,
	}

	taskTwo := &model.Task{
		Id:           "testTaskIdTwo",
		BuildId:      "testBuildId",
		DistroId:     "test-distro-one",
		BuildVariant: variant,
		Project:      "evergreen-ci-render",
		DisplayName:  taskDisplayName,
		HostId:       "",
		Secret:       "testTaskSecret",
		Activated:    true,
		Version:      "testVersionId",
		Status:       evergreen.TaskUndispatched,
		Requester:    evergreen.RepotrackerVersionRequester,
	}

	if patchMode != NoPatch {
		taskOne.Requester = evergreen.PatchVersionRequester
	}

	testutil.HandleTestingErr(taskOne.Insert(), t, "failed to insert taskOne")
	testutil.HandleTestingErr(taskTwo.Insert(), t, "failed to insert taskTwo")

	// set up task queue for task end tests
	taskQueue := &model.TaskQueue{
		Distro: "test-distro-one",
		Queue: []model.TaskQueueItem{
			model.TaskQueueItem{
				Id:          "testTaskIdTwo",
				DisplayName: taskDisplayName,
			},
		},
	}
	testutil.HandleTestingErr(taskQueue.Save(), t, "failed to insert taskqueue")
	workDir, err := ioutil.TempDir("", "agent_test_")
	testutil.HandleTestingErr(err, t, "failed to create working directory")

	host := &host.Host{
		Id:   "testHost",
		Host: "testHost",
		Distro: distro.Distro{
			Id:         "test-distro-one",
			WorkDir:    workDir,
			Expansions: []distro.Expansion{{"distro_exp", "DISTRO_EXP"}},
		},
		RunningTask:   "testTaskId",
		StartedBy:     evergreen.User,
		AgentRevision: agentRevision,
	}
	testutil.HandleTestingErr(host.Insert(), t, "failed to insert host")

	// read in the project configuration
	projectFile := "testdata/config_test_plugin/project/evergreen-ci-render.yml"
	projectConfig, err := ioutil.ReadFile(projectFile)
	testutil.HandleTestingErr(err, t, "failed to read project config")

	projectRef := &model.ProjectRef{
		Identifier: "evergreen-ci-render",
		Owner:      "evergreen-ci",
		Repo:       "render",
		RepoKind:   "github",
		Branch:     "master",
		Enabled:    true,
		BatchTime:  180,
	}

	testutil.HandleTestingErr(projectRef.Insert(), t, "failed to insert projectRef")

	err = testutil.CreateTestLocalConfig(testConfig, "evergreen-ci-render", "testdata/config_test_plugin/project/evergreen-ci-render.yml")
	testutil.HandleTestingErr(err, t, "failed to marshall project config")

	// unmarshall the project configuration into a struct
	project := &model.Project{}
	testutil.HandleTestingErr(yaml.Unmarshal(projectConfig, project), t, "failed to unmarshal project config")

	// now then marshall the project YAML for storage
	projectYamlBytes, err := yaml.Marshal(project)
	testutil.HandleTestingErr(err, t, "failed to marshall project config")

	// insert the version document
	v := &version.Version{
		Id:       "testVersionId",
		BuildIds: []string{taskOne.BuildId},
		Config:   string(projectYamlBytes),
	}

	testutil.HandleTestingErr(v.Insert(), t, "failed to insert version")
	if patchMode != NoPatch {
		mainPatchContent, err := ioutil.ReadFile("testdata/test.patch")
		testutil.HandleTestingErr(err, t, "failed to read test patch file")
		modulePatchContent, err := ioutil.ReadFile("testdata/testmodule.patch")
		testutil.HandleTestingErr(err, t, "failed to read test module patch file")

		ptch := &patch.Patch{
			Status:  evergreen.PatchCreated,
			Version: v.Id,
		}
		if patchMode == InlinePatch {
			ptch.Patches = []patch.ModulePatch{
				{
					ModuleName: "",
					Githash:    "1e5232709595db427893826ce19289461cba3f75",
					PatchSet:   patch.PatchSet{Patch: string(mainPatchContent)},
				},
				{
					ModuleName: "recursive",
					Githash:    "1e5232709595db427893826ce19289461cba3f75",
					PatchSet:   patch.PatchSet{Patch: string(modulePatchContent)},
				},
			}
		} else {
			p1Id, p2Id := bson.NewObjectId().Hex(), bson.NewObjectId().Hex()
			So(dbutil.WriteGridFile(patch.GridFSPrefix, p1Id, strings.NewReader(string(mainPatchContent))), ShouldBeNil)
			So(dbutil.WriteGridFile(patch.GridFSPrefix, p2Id, strings.NewReader(string(modulePatchContent))), ShouldBeNil)
			ptch.Patches = []patch.ModulePatch{
				{
					ModuleName: "",
					Githash:    "1e5232709595db427893826ce19289461cba3f75",
					PatchSet:   patch.PatchSet{PatchFileId: p1Id},
				},
				{
					ModuleName: "recursive",
					Githash:    "1e5232709595db427893826ce19289461cba3f75",
					PatchSet:   patch.PatchSet{PatchFileId: p2Id},
				},
			}
		}
		testutil.HandleTestingErr(ptch.Insert(), t, "failed to insert patch")
	}

	session, _, err := dbutil.GetGlobalSessionFactory().GetSession()
	testutil.HandleTestingErr(err, t, "couldn't get db session!")

	// Remove any logs for our test task from previous runs.
	_, err = session.DB(model.TaskLogDB).C(model.TaskLogCollection).
		RemoveAll(bson.M{"t_id": bson.M{"$in": []string{taskOne.Id, taskTwo.Id}}})
	testutil.HandleTestingErr(err, t, "failed to remove logs")

	build := &build.Build{
		Id: "testBuildId",
		Tasks: []build.TaskCache{
			build.NewTaskCache(taskOne.Id, taskOne.DisplayName, true),
			build.NewTaskCache(taskTwo.Id, taskTwo.DisplayName, true),
		},
		Version: "testVersionId",
	}

	testutil.HandleTestingErr(build.Insert(), t, "failed to insert build")
	return taskOne, build, nil
}
Example #2
0
//SpawnInstance creates a new droplet for the given distro.
func (digoMgr *DigitalOceanManager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) {
	if d.Provider != ProviderName {
		return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", ProviderName, d.Id, d.Provider)
	}

	digoSettings := &Settings{}
	if err := mapstructure.Decode(d.ProviderSettings, digoSettings); err != nil {
		return nil, fmt.Errorf("Error decoding params for distro %v: %v", d.Id, err)
	}

	if err := digoSettings.Validate(); err != nil {
		return nil, fmt.Errorf("Invalid DigitalOcean settings in distro %v: %v", d.Id, err)
	}

	instanceName := "droplet-" +
		fmt.Sprintf("%v", rand.New(rand.NewSource(time.Now().UnixNano())).Int())
	intentHost := &host.Host{
		Id:               instanceName,
		User:             d.User,
		Distro:           *d,
		Tag:              instanceName,
		CreationTime:     time.Now(),
		Status:           evergreen.HostUninitialized,
		TerminationTime:  util.ZeroTime,
		TaskDispatchTime: util.ZeroTime,
		Provider:         ProviderName,
		StartedBy:        owner,
	}

	dropletReq := &digo.Droplet{
		SizeId:   digoSettings.SizeId,
		ImageId:  digoSettings.ImageId,
		RegionId: digoSettings.RegionId,
		Name:     instanceName,
		SshKey:   digoSettings.SSHKeyId,
	}

	if err := intentHost.Insert(); err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert intent "+
			"host '%v': %v", intentHost.Id, err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted intent host '%v' "+
		"for distro '%v' to signal cloud instance spawn intent", instanceName,
		d.Id)

	newDroplet, err := digoMgr.account.CreateDroplet(dropletReq)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "DigitalOcean create droplet API call failed"+
			" for intent host '%v': %v", intentHost.Id, err)

		// remove the intent host document
		rmErr := intentHost.Remove()
		if rmErr != nil {
			return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove intent host "+
				"'%v': %v", intentHost.Id, rmErr)
		}
		return nil, err
	}

	// find old intent host
	host, err := host.FindOne(host.ById(intentHost.Id))
	if host == nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v”", intentHost.Id)
	}
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed to look up intent host %v: %v",
			intentHost.Id, err)
	}

	host.Id = fmt.Sprintf("%v", newDroplet.Id)
	host.Host = newDroplet.IpAddress
	err = host.Insert()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed to insert new host %v"+
			"for intent host %v: %v", host.Id, intentHost.Id, err)
	}

	// remove the intent host document
	err = intentHost.Remove()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove "+
			"insert host “%v” (replaced by “%v”): %v", intentHost.Id, host.Id,
			err)
	}
	return host, nil

}
Example #3
0
func startEC2Instance(ec2Handle *ec2.EC2, options *ec2.RunInstancesOptions,
	intentHost *host.Host) (*host.Host, *ec2.RunInstancesResp, error) {
	// start the instance
	resp, err := ec2Handle.RunInstances(options)

	if err != nil {
		// remove the intent host document
		rmErr := intentHost.Remove()
		if rmErr != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Could not remove intent host "+
				"“%v”: %v", intentHost.Id, rmErr)
		}
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR,
			"EC2 RunInstances API call returned error: %v", err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Spawned %v instance", len(resp.Instances))

	// the instance should have been successfully spawned
	instance := resp.Instances[0]
	evergreen.Logger.Logf(slogger.DEBUG, "Started %v", instance.InstanceId)
	evergreen.Logger.Logf(slogger.DEBUG, "Key name: %v", string(options.KeyName))

	// find old intent host
	host, err := host.FindOne(host.ById(intentHost.Id))
	if host == nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v”", intentHost.Id)
	}
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Can't locate "+
			"record inserted for intended host “%v” due to error: %v",
			intentHost.Id, err)
	}

	// we found the old document now we can insert the new one
	host.Id = instance.InstanceId
	err = host.Insert()
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not insert "+
			"updated host information for “%v” with “%v”: %v", intentHost.Id,
			host.Id, err)
	}

	// remove the intent host document
	err = intentHost.Remove()
	if err != nil {
		return nil, nil, evergreen.Logger.Errorf(slogger.ERROR, "Could not remove "+
			"insert host “%v” (replaced by “%v”): %v", intentHost.Id, host.Id,
			err)
	}

	var infoResp *ec2.DescribeInstancesResp
	instanceInfoRetryCount := 0
	instanceInfoMaxRetries := 5
	for {
		infoResp, err = ec2Handle.DescribeInstances([]string{instance.InstanceId}, nil)
		if err != nil {
			instanceInfoRetryCount++
			if instanceInfoRetryCount == instanceInfoMaxRetries {
				evergreen.Logger.Errorf(slogger.ERROR, "There was an error querying for the "+
					"instance's information and retries are exhausted. The insance may "+
					"be up.")
				return nil, resp, err
			}

			evergreen.Logger.Errorf(slogger.DEBUG, "There was an error querying for the "+
				"instance's information. Retrying in 30 seconds. Error: %v", err)
			time.Sleep(30 * time.Second)
			continue
		}
		break
	}

	reservations := infoResp.Reservations
	if len(reservations) < 1 {
		return nil, resp, fmt.Errorf("Reservation was returned as nil, you " +
			"may have to check manually")
	}

	instancesInfo := reservations[0].Instances
	if len(instancesInfo) < 1 {
		return nil, resp, fmt.Errorf("Reservation appears to have no " +
			"associated instances")
	}
	return host, resp, nil
}
func TestDBHostFinder(t *testing.T) {

	var users []string
	var taskIds []string
	var hostIds []string
	var hosts []*host.Host
	var hostFinder *DBHostFinder

	Convey("When querying for available hosts", t, func() {

		hostFinder = &DBHostFinder{}

		users = []string{"u1"}
		taskIds = []string{"t1"}
		hostIds = []string{"h1", "h2", "h3"}
		hosts = []*host.Host{
			&host.Host{Id: hostIds[0], StartedBy: evergreen.User,
				Status: evergreen.HostRunning},
			&host.Host{Id: hostIds[1], StartedBy: evergreen.User,
				Status: evergreen.HostRunning},
			&host.Host{Id: hostIds[2], StartedBy: evergreen.User,
				Status: evergreen.HostRunning},
		}

		So(db.Clear(host.Collection), ShouldBeNil)

		Convey("hosts started by users other than the MCI user should not"+
			" be returned", func() {
			hosts[2].StartedBy = users[0]
			for _, host := range hosts {
				testutil.HandleTestingErr(host.Insert(), t, "Error inserting"+
					" host into database")
			}

			availableHosts, err := hostFinder.FindAvailableHosts()
			testutil.HandleTestingErr(err, t, "Error finding available hosts")
			So(len(availableHosts), ShouldEqual, 2)
			So(availableHosts[0].Id, ShouldEqual, hosts[0].Id)
			So(availableHosts[1].Id, ShouldEqual, hosts[1].Id)
		})

		Convey("hosts with currently running tasks should not be returned",
			func() {
				hosts[2].RunningTask = taskIds[0]
				for _, host := range hosts {
					testutil.HandleTestingErr(host.Insert(), t, "Error inserting"+
						" host into database")
				}

				availableHosts, err := hostFinder.FindAvailableHosts()
				testutil.HandleTestingErr(err, t, "Error finding available hosts")
				So(len(availableHosts), ShouldEqual, 2)
				So(availableHosts[0].Id, ShouldEqual, hosts[0].Id)
				So(availableHosts[1].Id, ShouldEqual, hosts[1].Id)
			})

		Convey("hosts that are not in the 'running' state should not be"+
			" returned", func() {
			hosts[2].Status = evergreen.HostUninitialized
			for _, host := range hosts {
				testutil.HandleTestingErr(host.Insert(), t, "Error inserting host"+
					" into database")
			}

			availableHosts, err := hostFinder.FindAvailableHosts()
			testutil.HandleTestingErr(err, t, "Error finding available hosts")
			So(len(availableHosts), ShouldEqual, 2)
			So(availableHosts[0].Id, ShouldEqual, hosts[0].Id)
			So(availableHosts[1].Id, ShouldEqual, hosts[1].Id)
		})

	})

}
func setupAPITestData(testConfig *evergreen.Settings, taskDisplayName string,
	variant string, projectFile string, patchMode patchTestMode, t *testing.T) (*model.Task, *build.Build, error) {
	// Ignore errs here because the ns might just not exist.
	clearDataMsg := "Failed to clear test data collection"
	testCollections := []string{
		model.TasksCollection, build.Collection, host.Collection,
		distro.Collection, version.Collection, patch.Collection,
		model.PushlogCollection, model.ProjectVarsCollection, model.TaskQueuesCollection,
		manifest.Collection, model.ProjectRefCollection}
	testutil.HandleTestingErr(dbutil.ClearCollections(testCollections...), t, clearDataMsg)

	// Read in the project configuration
	projectConfig, err := ioutil.ReadFile(projectFile)
	testutil.HandleTestingErr(err, t, "failed to read project config")

	// Unmarshall the project configuration into a struct
	project := &model.Project{}
	testutil.HandleTestingErr(yaml.Unmarshal(projectConfig, project), t, "failed to unmarshal project config")

	// Marshall the project YAML for storage
	projectYamlBytes, err := yaml.Marshal(project)
	testutil.HandleTestingErr(err, t, "failed to marshall project config")

	// Create the ref for the project
	projectRef := &model.ProjectRef{
		Identifier:  project.DisplayName,
		Owner:       project.Owner,
		Repo:        project.Repo,
		RepoKind:    project.RepoKind,
		Branch:      project.Branch,
		Enabled:     project.Enabled,
		BatchTime:   project.BatchTime,
		LocalConfig: string(projectConfig),
	}
	testutil.HandleTestingErr(projectRef.Insert(), t, "failed to insert projectRef")

	// Save the project variables
	projectVars := &model.ProjectVars{
		Id: project.DisplayName,
		Vars: map[string]string{
			"aws_key":    testConfig.Providers.AWS.Id,
			"aws_secret": testConfig.Providers.AWS.Secret,
			"fetch_key":  "fetch_expansion_value",
		},
	}
	_, err = projectVars.Upsert()
	testutil.HandleTestingErr(err, t, clearDataMsg)

	// Create and insert two tasks
	taskOne := &model.Task{
		Id:           "testTaskId",
		BuildId:      "testBuildId",
		DistroId:     "test-distro-one",
		BuildVariant: variant,
		Project:      project.DisplayName,
		DisplayName:  taskDisplayName,
		HostId:       "testHost",
		Secret:       "testTaskSecret",
		Version:      "testVersionId",
		Status:       evergreen.TaskDispatched,
		Requester:    evergreen.RepotrackerVersionRequester,
	}
	if patchMode != NoPatch {
		taskOne.Requester = evergreen.PatchVersionRequester
	}
	testutil.HandleTestingErr(taskOne.Insert(), t, "failed to insert taskOne")

	taskTwo := &model.Task{
		Id:           "testTaskIdTwo",
		BuildId:      "testBuildId",
		DistroId:     "test-distro-one",
		BuildVariant: variant,
		Project:      project.DisplayName,
		DisplayName:  taskDisplayName,
		HostId:       "",
		Secret:       "testTaskSecret",
		Version:      "testVersionId",
		Status:       evergreen.TaskUndispatched,
		Requester:    evergreen.RepotrackerVersionRequester,
		Activated:    true,
	}
	testutil.HandleTestingErr(taskTwo.Insert(), t, "failed to insert taskTwo")

	// Set up a task queue for task end tests
	taskQueue := &model.TaskQueue{
		Distro: "test-distro-one",
		Queue: []model.TaskQueueItem{
			model.TaskQueueItem{
				Id:          "testTaskIdTwo",
				DisplayName: taskDisplayName,
			},
		},
	}
	testutil.HandleTestingErr(taskQueue.Save(), t, "failed to insert taskqueue")

	// Insert the version document
	v := &version.Version{
		Id:       "testVersionId",
		BuildIds: []string{taskOne.BuildId},
		Config:   string(projectYamlBytes),
	}
	testutil.HandleTestingErr(v.Insert(), t, "failed to insert version")

	// Insert the build that contains the tasks
	build := &build.Build{
		Id: "testBuildId",
		Tasks: []build.TaskCache{
			build.NewTaskCache(taskOne.Id, taskOne.DisplayName, true),
			build.NewTaskCache(taskTwo.Id, taskTwo.DisplayName, true),
		},
		Version: v.Id,
	}
	testutil.HandleTestingErr(build.Insert(), t, "failed to insert build")

	workDir, err := ioutil.TempDir("", "agent_test_")
	testutil.HandleTestingErr(err, t, "failed to create working directory")

	// Insert the host info for running the tests
	host := &host.Host{
		Id:   "testHost",
		Host: "testHost",
		Distro: distro.Distro{
			Id:         "test-distro-one",
			WorkDir:    workDir,
			Expansions: []distro.Expansion{{"distro_exp", "DISTRO_EXP"}},
		},
		RunningTask:   taskOne.Id,
		StartedBy:     evergreen.User,
		AgentRevision: agentRevision,
	}
	testutil.HandleTestingErr(host.Insert(), t, "failed to insert host")

	session, _, err := dbutil.GetGlobalSessionFactory().GetSession()
	testutil.HandleTestingErr(err, t, "couldn't get db session!")

	// Remove any logs for our test task from previous runs.
	_, err = session.DB(model.TaskLogDB).C(model.TaskLogCollection).
		RemoveAll(bson.M{"t_id": bson.M{"$in": []string{taskOne.Id, taskTwo.Id}}})
	testutil.HandleTestingErr(err, t, "failed to remove logs")

	return taskOne, build, nil
}
Example #6
0
// SpawnInstance creates and starts a new Docker container
func (dockerMgr *DockerManager) SpawnInstance(d *distro.Distro, owner string, userHost bool) (*host.Host, error) {
	var err error

	if d.Provider != ProviderName {
		return nil, fmt.Errorf("Can't spawn instance of %v for distro %v: provider is %v", ProviderName, d.Id, d.Provider)
	}

	// Initialize client
	dockerClient, settings, err := generateClient(d)
	if err != nil {
		return nil, err
	}

	// Create HostConfig structure
	hostConfig := &docker.HostConfig{}
	err = populateHostConfig(hostConfig, d)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Unable to populate docker host config for host '%s': %v", settings.HostIp, err)
		return nil, err
	}

	// Build container
	containerName := "docker-" + bson.NewObjectId().Hex()
	newContainer, err := dockerClient.CreateContainer(
		docker.CreateContainerOptions{
			Name: containerName,
			Config: &docker.Config{
				Cmd: []string{"/usr/sbin/sshd", "-D"},
				ExposedPorts: map[docker.Port]struct{}{
					SSHDPort: struct{}{},
				},
				Image: settings.ImageId,
			},
			HostConfig: hostConfig,
		},
	)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Docker create container API call failed for host '%s': %v", settings.HostIp, err)
		return nil, err
	}

	// Start container
	err = dockerClient.StartContainer(newContainer.ID, nil)
	if err != nil {
		// Clean up
		err2 := dockerClient.RemoveContainer(
			docker.RemoveContainerOptions{
				ID:    newContainer.ID,
				Force: true,
			},
		)
		if err2 != nil {
			err = fmt.Errorf("%v. And was unable to clean up container %v: %v", err, newContainer.ID, err2)
		}
		evergreen.Logger.Logf(slogger.ERROR, "Docker start container API call failed for host '%s': %v", settings.HostIp, err)
		return nil, err
	}

	// Retrieve container details
	newContainer, err = dockerClient.InspectContainer(newContainer.ID)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Docker inspect container API call failed for host '%s': %v", settings.HostIp, err)
		return nil, err
	}

	hostPort, err := retrieveOpenPortBinding(newContainer)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR, "Error with docker container '%v': %v", newContainer.ID, err)
		return nil, err
	}
	hostStr := fmt.Sprintf("%s:%s", settings.BindIp, hostPort)

	// Add host info to db
	instanceName := "container-" +
		fmt.Sprintf("%d", rand.New(rand.NewSource(time.Now().UnixNano())).Int())
	host := &host.Host{
		Id:               newContainer.ID,
		Host:             hostStr,
		User:             d.User,
		Tag:              instanceName,
		Distro:           *d,
		CreationTime:     newContainer.Created,
		Status:           evergreen.HostUninitialized,
		TerminationTime:  model.ZeroTime,
		TaskDispatchTime: model.ZeroTime,
		Provider:         ProviderName,
		StartedBy:        owner,
	}

	err = host.Insert()
	if err != nil {
		return nil, evergreen.Logger.Errorf(slogger.ERROR, "Failed to insert new host '%s': %v", host.Id, err)
	}

	evergreen.Logger.Logf(slogger.DEBUG, "Successfully inserted new host '%v' for distro '%v'", host.Id, d.Id)

	return host, nil
}
Example #7
0
func TestCleanupTask(t *testing.T) {

	testConfig := evergreen.TestConfig()

	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(testConfig))

	Convey("When cleaning up a task", t, func() {

		// reset the db
		testutil.HandleTestingErr(db.ClearCollections(task.Collection),
			t, "error clearing tasks collection")
		testutil.HandleTestingErr(db.ClearCollections(host.Collection),
			t, "error clearing hosts collection")

		Convey("an error should be thrown if the passed-in projects slice"+
			" does not contain the task's project", func() {

			wrapper := doomedTaskWrapper{
				task: task.Task{
					Project: "proj",
				},
			}
			projects := map[string]model.Project{}
			err := cleanUpTask(wrapper, projects)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "could not find project")

		})

		Convey("an error should be thrown if the task's host is marked with"+
			" the wrong running task id", func() {

			wrapper := doomedTaskWrapper{
				task: task.Task{
					Id:      "t1",
					HostId:  "h1",
					Project: "proj",
				},
			}
			projects := map[string]model.Project{
				"proj": {
					Identifier: "proj",
				},
			}
			host := &host.Host{
				Id:          "h1",
				RunningTask: "nott1",
			}
			So(host.Insert(), ShouldBeNil)

			err := cleanUpTask(wrapper, projects)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "but the host thinks it"+
				" is running task")

		})

		Convey("if the task's heartbeat timed out", func() {

			// reset the db
			testutil.HandleTestingErr(db.ClearCollections(task.Collection),
				t, "error clearing tasks collection")
			testutil.HandleTestingErr(db.ClearCollections(host.Collection),
				t, "error clearing hosts collection")
			testutil.HandleTestingErr(db.ClearCollections(build.Collection),
				t, "error clearing builds collection")
			testutil.HandleTestingErr(db.ClearCollections(task.OldCollection),
				t, "error clearing old tasks collection")
			testutil.HandleTestingErr(db.ClearCollections(version.Collection),
				t, "error clearing versions collection")

			Convey("the task should be reset", func() {

				newTask := &task.Task{
					Id:       "t1",
					Status:   "started",
					HostId:   "h1",
					BuildId:  "b1",
					Project:  "proj",
					Restarts: 1,
				}
				testutil.HandleTestingErr(newTask.Insert(), t, "error inserting task")

				wrapper := doomedTaskWrapper{
					reason: HeartbeatTimeout,
					task:   *newTask,
				}

				projects := map[string]model.Project{
					"proj": {
						Identifier: "proj",
						Stepback:   false,
					},
				}

				host := &host.Host{
					Id:          "h1",
					RunningTask: "t1",
				}
				So(host.Insert(), ShouldBeNil)

				build := &build.Build{
					Id:      "b1",
					Tasks:   []build.TaskCache{{Id: "t1"}},
					Version: "v1",
				}
				So(build.Insert(), ShouldBeNil)

				v := &version.Version{
					Id: "v1",
				}
				So(v.Insert(), ShouldBeNil)

				// cleaning up the task should work
				So(cleanUpTask(wrapper, projects), ShouldBeNil)

				// refresh the task - it should be reset
				newTask, err := task.FindOne(task.ById("t1"))
				So(err, ShouldBeNil)
				So(newTask.Status, ShouldEqual, evergreen.TaskUndispatched)
				So(newTask.Restarts, ShouldEqual, 2)

			})

			Convey("the running task field on the task's host should be"+
				" reset", func() {

				newTask := &task.Task{
					Id:       "t1",
					Status:   "started",
					HostId:   "h1",
					BuildId:  "b1",
					Project:  "proj",
					Restarts: 1,
				}
				testutil.HandleTestingErr(newTask.Insert(), t, "error inserting task")

				wrapper := doomedTaskWrapper{
					reason: HeartbeatTimeout,
					task:   *newTask,
				}

				projects := map[string]model.Project{
					"proj": {
						Identifier: "proj",
						Stepback:   false,
					},
				}

				h := &host.Host{
					Id:          "h1",
					RunningTask: "t1",
				}
				So(h.Insert(), ShouldBeNil)

				build := &build.Build{
					Id:      "b1",
					Tasks:   []build.TaskCache{{Id: "t1"}},
					Version: "v1",
				}
				So(build.Insert(), ShouldBeNil)

				v := &version.Version{Id: "v1"}
				So(v.Insert(), ShouldBeNil)

				// cleaning up the task should work
				So(cleanUpTask(wrapper, projects), ShouldBeNil)

				// refresh the host, make sure its running task field has
				// been reset
				h, err := host.FindOne(host.ById("h1"))
				So(err, ShouldBeNil)
				So(h.RunningTask, ShouldEqual, "")

			})

		})

	})

}