func TestPatchPluginAPI(t *testing.T) {
	testConfig := evergreen.TestConfig()
	Convey("With a running api server and installed plugin", t, func() {
		registry := plugin.NewSimpleRegistry()
		gitPlugin := &GitPlugin{}
		err := registry.Register(gitPlugin)
		testutil.HandleTestingErr(err, t, "Couldn't register patch plugin")
		server, err := apiserver.CreateTestServer(testConfig, nil, plugin.APIPlugins, false)
		testutil.HandleTestingErr(err, t, "Couldn't set up testing server")
		taskConfig, _ := plugintest.CreateTestConfig("testdata/plugin_patch.yml", t)
		testCommand := GitApplyPatchCommand{"dir"}
		_, _, err = plugintest.SetupAPITestData("testTask", true, t)
		testutil.HandleTestingErr(err, t, "Couldn't set up test documents")
		testTask, err := model.FindTask("testTaskId")
		testutil.HandleTestingErr(err, t, "Couldn't set up test patch task")

		sliceAppender := &evergreen.SliceAppender{[]*slogger.Log{}}
		logger := agent.NewTestLogger(sliceAppender)

		Convey("calls to existing tasks with patches should succeed", func() {
			httpCom := plugintest.TestAgentCommunicator(testTask.Id, testTask.Secret, server.URL)
			pluginCom := &agent.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err, ShouldBeNil)
			So(patch, ShouldNotBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})
		Convey("calls to non-existing tasks should fail", func() {
			v := version.Version{Id: ""}
			testutil.HandleTestingErr(v.Insert(), t, "Couldn't insert dummy version")
			httpCom := plugintest.TestAgentCommunicator("BAD_TASK_ID", "", server.URL)
			pluginCom := &agent.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err.Error(), ShouldContainSubstring, "not found")
			So(err, ShouldNotBeNil)
			So(patch, ShouldBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})
		Convey("calls to existing tasks without patches should fail", func() {
			noPatchTask := model.Task{Id: "noPatchTask", BuildId: "a"}
			testutil.HandleTestingErr(noPatchTask.Insert(), t, "Couldn't insert patch task")
			noPatchVersion := version.Version{Id: "noPatchVersion", BuildIds: []string{"a"}}
			testutil.HandleTestingErr(noPatchVersion.Insert(), t, "Couldn't insert patch version")
			v := version.Version{Id: ""}
			testutil.HandleTestingErr(v.Insert(), t, "Couldn't insert dummy version")
			httpCom := plugintest.TestAgentCommunicator(noPatchTask.Id, "", server.URL)
			pluginCom := &agent.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "no patch found for task")
			So(patch, ShouldBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})

	})
}
Example #2
0
// clean up a task whose heartbeat has timed out
func cleanUpTimedOutHeartbeat(task model.Task, project model.Project, host *host.Host) error {
	// mock up the failure details of the task
	detail := &apimodels.TaskEndDetail{
		Description: model.AgentHeartbeat,
		TimedOut:    true,
	}

	// try to reset the task
	if err := task.TryReset("", RunnerName, &project, detail); err != nil {
		return fmt.Errorf("error trying to reset task %v: %v", task.Id, err)
	}

	// clear out the host's running task
	if err := host.UpdateRunningTask(task.Id, "", time.Now()); err != nil {
		return fmt.Errorf("error clearing running task %v from host %v: %v",
			task.Id, host.Id, err)
	}

	// success
	return nil
}
func TestTaskToJQL(t *testing.T) {
	Convey("Given a task with with two failed tests and one successful test, "+
		"the jql should contian only the failed test names", t, func() {
		task1 := model.Task{}
		task1.TestResults = []model.TestResult{
			{Status: "fail", TestFile: "foo.js"},
			{Status: "success", TestFile: "bar.js"},
			{Status: "fail", TestFile: "baz.js"},
		}
		task1.DisplayName = "foobar"
		jQL1 := taskToJQL(&task1)
		referenceJQL1 := fmt.Sprintf(JQLBFQuery, "summary~\"foo.js\" or summary~\"baz.js\"")
		So(jQL1, ShouldEqual, referenceJQL1)
	})

	Convey("Given a task with with oo failed tests, "+
		"the jql should contian only the failed task name", t, func() {
		task2 := model.Task{}
		task2.TestResults = []model.TestResult{}
		task2.DisplayName = "foobar"
		jQL2 := taskToJQL(&task2)
		referenceJQL2 := fmt.Sprintf(JQLBFQuery, "summary~\"foobar\"")
		So(jQL2, ShouldEqual, referenceJQL2)
	})
}
Example #4
0
func TestAttachFilesApi(t *testing.T) {
	Convey("With a running api server and installed api hook", t, func() {
		reset(t)
		taskConfig, _ := plugintest.CreateTestConfig("testdata/plugin_attach_files.yml", t)
		registry := plugin.NewSimpleRegistry()
		attachPlugin := &AttachPlugin{}
		err := registry.Register(attachPlugin)
		testutil.HandleTestingErr(err, t, "Couldn't register patch plugin")
		server, err := apiserver.CreateTestServer(evergreen.TestConfig(), nil, plugin.Published, true)
		testutil.HandleTestingErr(err, t, "Couldn't set up testing server")
		sliceAppender := &evergreen.SliceAppender{[]*slogger.Log{}}
		logger := agent.NewTestLogger(sliceAppender)

		testTask := model.Task{Id: "test1", DisplayName: "TASK!!!", BuildId: "build1"}
		testutil.HandleTestingErr(testTask.Insert(), t, "couldn't insert test task")
		taskConfig.Task = &testTask

		httpCom := plugintest.TestAgentCommunicator(testTask.Id, testTask.Secret, server.URL)
		pluginCom := &agent.TaskJSONCommunicator{attachPlugin.Name(), httpCom}

		Convey("using a well-formed api call", func() {
			testCommand := AttachTaskFilesCommand{
				artifact.Params{
					"upload":   "gopher://evergreen.equipment",
					"coverage": "http://www.blankets.com",
				},
			}
			err := testCommand.SendTaskFiles(taskConfig, logger, pluginCom)
			So(err, ShouldBeNil)

			Convey("the given values should be written to the db", func() {
				entry, err := artifact.FindOne(artifact.ByTaskId(testTask.Id))
				So(err, ShouldBeNil)
				So(entry, ShouldNotBeNil)
				So(entry.TaskId, ShouldEqual, testTask.Id)
				So(entry.TaskDisplayName, ShouldEqual, testTask.DisplayName)
				So(entry.BuildId, ShouldEqual, testTask.BuildId)
				So(len(entry.Files), ShouldEqual, 2)
			})

			Convey("with a second api call", func() {
				testCommand := AttachTaskFilesCommand{
					artifact.Params{
						"3x5":      "15",
						"$b.o.o.l": "{\"json\":false}",
						"coverage": "http://tumblr.com/tagged/tarp",
					},
				}
				err := testCommand.SendTaskFiles(taskConfig, logger, pluginCom)
				So(err, ShouldBeNil)
				entry, err := artifact.FindOne(artifact.ByTaskId(testTask.Id))
				So(err, ShouldBeNil)
				So(entry, ShouldNotBeNil)

				Convey("new values should be added", func() {
					Convey("and old values should still remain", func() {
						So(len(entry.Files), ShouldEqual, 5)
					})
				})
			})
		})

		Convey("but the following malformed calls should fail:", func() {
			Convey("- calls with garbage content", func() {
				resp, err := pluginCom.TaskPostJSON(
					AttachTaskFilesAPIEndpoint,
					"I am not a proper post request for this endpoint",
				)
				So(err, ShouldBeNil)
				So(resp, ShouldNotBeNil)
				So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
			})

			Convey("- calls with nested subdocs", func() {
				resp, err := pluginCom.TaskPostJSON(
					AttachTaskFilesAPIEndpoint,
					map[string]interface{}{
						"cool": map[string]interface{}{
							"this_is": "a",
							"broken":  "test",
						},
					})
				So(err, ShouldBeNil)
				So(resp, ShouldNotBeNil)
				So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
			})
		})
	})
}
func TestDurationBasedHostAllocator(t *testing.T) {
	var taskIds []string
	var runningTaskIds []string
	var hostIds []string
	var dist distro.Distro
	var testTaskDuration time.Duration
	var taskDurations model.ProjectTaskDurations
	var durationBasedHostAllocator *DurationBasedHostAllocator

	Convey("With a duration based host allocator,"+
		" determining the number of new hosts to spin up", t, func() {

		durationBasedHostAllocator = &DurationBasedHostAllocator{}
		taskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		hostIds = []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"}
		dist = distro.Distro{Provider: "ec2"}
		testTaskDuration = time.Duration(2) * time.Minute
		taskDurations = model.ProjectTaskDurations{
			TaskDurationByProject: map[string]*model.BuildVariantTaskDurations{
				"": &model.BuildVariantTaskDurations{
					TaskDurationByBuildVariant: map[string]*model.TaskDurations{
						"": &model.TaskDurations{
							TaskDurationByDisplayName: map[string]time.Duration{
								"": testTaskDuration,
							},
						},
					},
				},
			},
		}

		So(db.Clear(model.TasksCollection), ShouldBeNil)

		Convey("if there are no tasks to run, no new hosts should be needed",
			func() {
				hosts := []host.Host{
					host.Host{Id: hostIds[0]},
					host.Host{Id: hostIds[1]},
					host.Host{Id: hostIds[2]},
				}
				dist.PoolSize = len(hosts) + 5

				hostAllocatorData := &HostAllocatorData{
					existingDistroHosts: map[string][]host.Host{
						"": hosts,
					},
					distros: map[string]distro.Distro{
						"": dist,
					},
				}

				tasksAccountedFor := make(map[string]bool)
				distroScheduleData := make(map[string]DistroScheduleData)

				newHosts, err := durationBasedHostAllocator.
					numNewHostsForDistro(hostAllocatorData, dist,
						tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
				So(err, ShouldBeNil)
				So(newHosts, ShouldEqual, 0)
			})

		Convey("if the number of existing hosts equals the max hosts, no new"+
			" hosts can be spawned", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			dist.PoolSize = 0

			hostAllocatorData := &HostAllocatorData{
				existingDistroHosts: map[string][]host.Host{},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
			}
			dist.PoolSize = len(hosts)

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of existing hosts exceeds the max hosts, no new"+
			" hosts can be spawned", func() {

			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
			}
			dist.PoolSize = 1

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run is less than the number of free"+
			" hosts, no new hosts are needed", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
			}
			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)

		})

		Convey("if the number of tasks to run is equal to the number of free"+
			" hosts, no new hosts are needed", func() {
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
			}
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}

			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := model.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run exceeds the number of free"+
			" hosts, new hosts are needed up to the maximum allowed for the"+
			" dist", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 9

			// In this test:
			//
			// 1. Total distro duration is:
			//		(len(taskQueueItems) * expDur ) +
			//		time left on hosts with running tasks
			// which comes out to:
			//		(5 * 200 * 60) + (60 * 3) ~ 60180 (in seconds)
			//
			// 2. MAX_DURATION_PER_DISTRO = 7200 (2 hours)
			//
			// 3. We have 5 existing hosts
			//
			// Thus, our duration based host allocator will always return 8 -
			// which is greater than what distro.PoolSize-len(existingDistroHosts)
			// will ever return in this situation.
			//
			// Hence, we should always expect to use that minimum.
			//
			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := model.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// total running duration here is
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)

			dist.PoolSize = 8
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
			dist.PoolSize = 7
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)

			dist.PoolSize = 6

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor = make(map[string]bool)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 1)
		})

		Convey("if the distro cannot be used to spawn hosts, then no new "+
			"hosts can be spawned", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4]},
			}

			dist.PoolSize = 20
			dist.Provider = "static"

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, the estimate of new "+
			"\nhosts should be used", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := model.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, but greater than "+
			"\nthe difference between the number of runnable tasks and the "+
			"\nnumber of free hosts, that difference should be used", func() {
			expDur := time.Duration(400) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := model.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 11
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than both the maximum "+
			"\nnumber of new hosts allowed for this distro, and the "+
			"\ndifference between the number of runnable tasks and the "+
			"\nnumber of free hosts, then the duration based estimate should "+
			"be used", func() {
			expDur := time.Duration(180) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
				host.Host{Id: hostIds[5]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := model.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 2
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)
		})
	})

}