示例#1
0
func (uis *UIServer) addDistro(w http.ResponseWriter, r *http.Request) {
	id, hasId := mux.Vars(r)["distro_id"]

	u := MustHaveUser(r)

	b, err := ioutil.ReadAll(r.Body)
	if err != nil {
		message := fmt.Sprintf("error adding distro: %v", err)
		PushFlash(uis.CookieStore, r, w, NewErrorFlash(message))
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	defer r.Body.Close()

	var d distro.Distro

	if err = json.Unmarshal(b, &d); err != nil {
		message := fmt.Sprintf("error adding distro: %v", err)
		PushFlash(uis.CookieStore, r, w, NewErrorFlash(message))
		http.Error(w, message, http.StatusBadRequest)
		return
	}

	if hasId {
		d.Id = id
	}

	vErrs, err := validator.CheckDistro(&d, &uis.Settings, true)
	if err != nil {
		message := fmt.Sprintf("error retrieving distroIds: %v", err)
		PushFlash(uis.CookieStore, r, w, NewErrorFlash(message))
		http.Error(w, message, http.StatusInternalServerError)
		return
	}

	if len(vErrs) != 0 {
		for _, e := range vErrs {
			PushFlash(uis.CookieStore, r, w, NewErrorFlash(e.Error()))
		}
		uis.WriteJSON(w, http.StatusBadRequest, vErrs)
		return
	}

	if err = d.Insert(); err != nil {
		message := fmt.Sprintf("error inserting distro '%v': %v", d.Id, err)
		PushFlash(uis.CookieStore, r, w, NewErrorFlash(message))
		uis.WriteJSON(w, http.StatusInternalServerError, err)
		return
	}

	event.LogDistroAdded(d.Id, u.Username(), d)

	PushFlash(uis.CookieStore, r, w, NewSuccessFlash(fmt.Sprintf("Distro %v successfully added.", d.Id)))
	uis.WriteJSON(w, http.StatusOK, "distro successfully added")
}
func TestAverageStatistics(t *testing.T) {
	testutil.HandleTestingErr(db.ClearCollections(task.Collection), t, "couldnt reset host")
	Convey("With a distro sampleDistro inserted", t, func() {
		d := distro.Distro{
			Id: "sampleDistro",
		}
		err := d.Insert()
		So(err, ShouldBeNil)
		distroId := d.Id
		Convey("With a set of tasks that have different scheduled -> start times over a given time period", func() {
			now := time.Now()
			bucketSize := 10 * time.Second
			numberBuckets := 3

			task1 := task.Task{Id: "task1", ScheduledTime: now,
				StartTime: now.Add(time.Duration(5) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}

			So(task1.Insert(), ShouldBeNil)

			task2 := task.Task{Id: "task2", ScheduledTime: now,
				StartTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}

			So(task2.Insert(), ShouldBeNil)

			task3 := task.Task{Id: "task3", ScheduledTime: now.Add(time.Duration(10) * time.Second),
				StartTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}
			So(task3.Insert(), ShouldBeNil)

			frameBounds := FrameBounds{
				StartTime:     now,
				EndTime:       now.Add(time.Duration(numberBuckets) * bucketSize),
				NumberBuckets: numberBuckets,
				BucketSize:    bucketSize,
			}
			avgBuckets, err := AverageStatistics(distroId, frameBounds)
			So(err, ShouldBeNil)

			So(avgBuckets[0].AverageTime, ShouldEqual, 5*time.Second)
			So(avgBuckets[1].AverageTime, ShouldEqual, 0)
			So(avgBuckets[2].AverageTime, ShouldEqual, 15*time.Second)

			Convey("if the distro id given does not exist, it shoud return an empty list", func() {
				_, err := AverageStatistics("noId", frameBounds)
				So(err, ShouldNotBeNil)
			})
		})
	})

}
示例#3
0
func TestStoreRepositoryRevisions(t *testing.T) {
	dropTestDB(t)
	testutil.ConfigureIntegrationTest(t, testConfig, "TestStoreRepositoryRevisions")
	Convey("When storing revisions gotten from a repository...", t, func() {
		err := testutil.CreateTestLocalConfig(testConfig, "mci-test", "")
		So(err, ShouldBeNil)
		repoTracker := RepoTracker{testConfig, projectRef, NewGithubRepositoryPoller(projectRef,
			testConfig.Credentials["github"])}

		// insert distros used in testing.
		d := distro.Distro{Id: "test-distro-one"}
		So(d.Insert(), ShouldBeNil)
		d.Id = "test-distro-two"
		So(d.Insert(), ShouldBeNil)

		Convey("On storing a single repo revision, we expect a version to be created"+
			" in the database for this project, which should be retrieved when we search"+
			" for this project's most recent version", func() {
			createTime := time.Now()
			revisionOne := *createTestRevision("firstRevision", createTime)
			revisions := []model.Revision{revisionOne}

			resultVersion, err := repoTracker.StoreRevisions(revisions)
			testutil.HandleTestingErr(err, t, "Error storing repository revisions %v")

			newestVersion, err := version.FindOne(version.ByMostRecentForRequester(projectRef.String(), evergreen.RepotrackerVersionRequester))
			testutil.HandleTestingErr(err, t, "Error retreiving newest version %v")

			So(resultVersion, ShouldResemble, newestVersion)
		})

		Convey("On storing several repo revisions, we expect a version to be created "+
			"for each revision", func() {
			createTime := time.Now()
			laterCreateTime := createTime.Add(time.Duration(4 * time.Hour))

			revisionOne := *createTestRevision("one", laterCreateTime)
			revisionTwo := *createTestRevision("two", createTime)

			revisions := []model.Revision{revisionOne, revisionTwo}

			_, err := repoTracker.StoreRevisions(revisions)
			testutil.HandleTestingErr(err, t, "Error storing repository revisions %v")

			versionOne, err := version.FindOne(version.ByProjectIdAndRevision(projectRef.Identifier, revisionOne.Revision))
			testutil.HandleTestingErr(err, t, "Error retrieving first stored version %v")
			versionTwo, err := version.FindOne(version.ByProjectIdAndRevision(projectRef.Identifier, revisionTwo.Revision))
			testutil.HandleTestingErr(err, t, "Error retreiving second stored version %v")

			So(versionOne.Revision, ShouldEqual, revisionOne.Revision)
			So(versionTwo.Revision, ShouldEqual, revisionTwo.Revision)
		})

		Reset(func() {
			dropTestDB(t)
		})
	})

	Convey("When storing versions from repositories with remote configuration files...", t, func() {

		project := createTestProject(nil, nil)

		revisions := []model.Revision{
			*createTestRevision("foo", time.Now().Add(1*time.Minute)),
		}

		poller := NewMockRepoPoller(project, revisions)

		repoTracker := RepoTracker{
			testConfig,
			&model.ProjectRef{
				Identifier: "testproject",
				BatchTime:  10,
			},
			poller,
		}

		// insert distros used in testing.
		d := distro.Distro{Id: "test-distro-one"}
		So(d.Insert(), ShouldBeNil)
		d.Id = "test-distro-two"
		So(d.Insert(), ShouldBeNil)

		Convey("We should not fetch configs for versions we already have stored.",
			func() {
				So(poller.ConfigGets, ShouldBeZeroValue)
				// Store revisions the first time
				_, err := repoTracker.StoreRevisions(revisions)
				So(err, ShouldBeNil)
				// We should have fetched the config once for each revision
				So(poller.ConfigGets, ShouldEqual, len(revisions))

				// Store them again
				_, err = repoTracker.StoreRevisions(revisions)
				So(err, ShouldBeNil)
				// We shouldn't have fetched the config any additional times
				// since we have already stored these versions
				So(poller.ConfigGets, ShouldEqual, len(revisions))
			},
		)

		Convey("We should handle invalid configuration files gracefully by storing a stub version",
			func() {
				errStrs := []string{"Someone dun' goof'd"}
				poller.setNextError(projectConfigError{errStrs})
				stubVersion, err := repoTracker.StoreRevisions(revisions)
				// We want this error to get swallowed so a config error
				// doesn't stop additional versions from getting created
				So(err, ShouldBeNil)
				So(stubVersion.Errors, ShouldResemble, errStrs)
			},
		)

		Convey("If there is an error other than a config error while fetching a config, we should fail hard",
			func() {
				unexpectedError := errors.New("Something terrible has happened!!")
				poller.setNextError(unexpectedError)
				v, err := repoTracker.StoreRevisions(revisions)
				So(v, ShouldBeNil)
				So(err, ShouldEqual, unexpectedError)
			},
		)

		Reset(func() {
			dropTestDB(t)
		})

	})
}
示例#4
0
func TestBatchTimes(t *testing.T) {
	dropTestDB(t)
	Convey("When deciding whether or not to activate variants for the most recently stored version", t, func() {
		// We create a version with an activation time of now so that all the bvs have a last activation time of now.
		previouslyActivatedVersion := version.Version{
			Id:         "previously activated",
			Identifier: "testproject",
			BuildVariants: []version.BuildStatus{
				{
					BuildVariant: "bv1",
					Activated:    true,
					ActivateAt:   time.Now(),
				},
				{
					BuildVariant: "bv2",
					Activated:    true,
					ActivateAt:   time.Now(),
				},
			},
			RevisionOrderNumber: 0,
			Requester:           evergreen.RepotrackerVersionRequester,
		}

		So(previouslyActivatedVersion.Insert(), ShouldBeNil)

		// insert distros used in testing.
		d := distro.Distro{Id: "test-distro-one"}
		So(d.Insert(), ShouldBeNil)
		d.Id = "test-distro-two"
		So(d.Insert(), ShouldBeNil)

		Convey("If the project's batch time has not elapsed, and no buildvariants "+
			"have overriden their batch times, no variants should be activated", func() {
			project := createTestProject(nil, nil)
			revisions := []model.Revision{
				*createTestRevision("foo", time.Now()),
			}

			repoTracker := RepoTracker{
				testConfig,
				&model.ProjectRef{
					Identifier: "testproject",
					BatchTime:  1,
				},
				NewMockRepoPoller(project, revisions),
			}
			v, err := repoTracker.StoreRevisions(revisions)
			So(v, ShouldNotBeNil)
			So(err, ShouldBeNil)
			So(len(v.BuildVariants), ShouldEqual, 2)
			So(repoTracker.activateElapsedBuilds(v), ShouldBeNil)
			So(v.BuildVariants[0].Activated, ShouldBeFalse)
			So(v.BuildVariants[1].Activated, ShouldBeFalse)
		})

		Convey("If the project's batch time has elapsed, and no buildvariants "+
			"have overridden their batch times, all variants should be activated", func() {
			project := createTestProject(nil, nil)
			revisions := []model.Revision{
				*createTestRevision("bar", time.Now().Add(time.Duration(-6*time.Minute))),
			}
			repoTracker := RepoTracker{
				testConfig,
				&model.ProjectRef{
					Identifier: "testproject",
					BatchTime:  0,
				},
				NewMockRepoPoller(project, revisions),
			}
			version, err := repoTracker.StoreRevisions(revisions)
			So(version, ShouldNotBeNil)
			So(err, ShouldBeNil)
			So(repoTracker.activateElapsedBuilds(version), ShouldBeNil)
			bv1, found := findStatus(version, "bv1")
			So(found, ShouldBeTrue)
			So(bv1.Activated, ShouldBeTrue)
			bv2, found := findStatus(version, "bv2")
			So(found, ShouldBeTrue)
			So(bv2.Activated, ShouldBeTrue)
		})

		Convey("If the project's batch time has elapsed, but both variants "+
			"have overridden their batch times (which have not elapsed)"+
			", no variants should be activated", func() {
			// need to assign pointer vals
			twoforty := 240
			onetwenty := 120

			project := createTestProject(&twoforty, &onetwenty)

			revisions := []model.Revision{
				*createTestRevision("baz", time.Now()),
			}

			repoTracker := RepoTracker{
				testConfig,
				&model.ProjectRef{
					Identifier: "testproject",
					BatchTime:  60,
				},
				NewMockRepoPoller(project, revisions),
			}
			version, err := repoTracker.StoreRevisions(revisions)
			So(version, ShouldNotBeNil)
			So(err, ShouldBeNil)
			So(repoTracker.activateElapsedBuilds(version), ShouldBeNil)
			bv1, found := findStatus(version, "bv1")
			So(found, ShouldBeTrue)
			So(bv1.Activated, ShouldBeFalse)
			bv2, found := findStatus(version, "bv2")
			So(found, ShouldBeTrue)
			So(bv2.Activated, ShouldBeFalse)
		})

		Convey("If the project's batch time has not elapsed, but one variant "+
			"has overridden their batch times to be shorter"+
			", that variant should be activated", func() {
			zero := 0

			project := createTestProject(&zero, nil)

			revisions := []model.Revision{
				*createTestRevision("garply", time.Now()),
			}

			repoTracker := RepoTracker{
				testConfig,
				&model.ProjectRef{
					Identifier: "testproject",
					BatchTime:  60,
				},
				NewMockRepoPoller(project, revisions),
			}
			version, err := repoTracker.StoreRevisions(revisions)
			So(version, ShouldNotBeNil)
			So(err, ShouldBeNil)
			So(repoTracker.activateElapsedBuilds(version), ShouldBeNil)
			bv1, found := findStatus(version, "bv1")
			So(found, ShouldBeTrue)
			So(bv1.Activated, ShouldBeTrue)
			bv2, found := findStatus(version, "bv2")
			So(found, ShouldBeTrue)
			So(bv2, ShouldNotBeNil)
			So(bv2.Activated, ShouldBeFalse)
		})

		Reset(func() {
			dropTestDB(t)
		})
	})
}
示例#5
0
func TestSpawnHosts(t *testing.T) {

	Convey("When spawning hosts", t, func() {

		distroIds := []string{"d1", "d2", "d3"}

		schedulerInstance := &Scheduler{
			schedulerTestConf,
			&MockTaskFinder{},
			&MockTaskPrioritizer{},
			&MockTaskDurationEstimator{},
			&MockTaskQueuePersister{},
			&MockHostAllocator{},
		}

		Convey("if there are no hosts to be spawned, the Scheduler should not"+
			" make any calls to the CloudManager", func() {
			newHostsNeeded := map[string]int{
				distroIds[0]: 0,
				distroIds[1]: 0,
				distroIds[2]: 0,
			}

			newHostsSpawned, err := schedulerInstance.spawnHosts(newHostsNeeded)
			So(err, ShouldBeNil)
			So(len(newHostsSpawned[distroIds[0]]), ShouldEqual, 0)
			So(len(newHostsSpawned[distroIds[1]]), ShouldEqual, 0)
			So(len(newHostsSpawned[distroIds[2]]), ShouldEqual, 0)
		})

		Convey("if there are hosts to be spawned, the Scheduler should make"+
			" one call to the CloudManager for each host, and return the"+
			" results bucketed by distro", func() {

			newHostsNeeded := map[string]int{
				distroIds[0]: 3,
				distroIds[1]: 0,
				distroIds[2]: 1,
			}

			for _, id := range distroIds {
				d := distro.Distro{Id: id, PoolSize: 1, Provider: mock.ProviderName}
				So(d.Insert(), ShouldBeNil)
			}

			newHostsSpawned, err := schedulerInstance.spawnHosts(newHostsNeeded)
			So(err, ShouldBeNil)
			distroZeroHosts := newHostsSpawned[distroIds[0]]
			distroOneHosts := newHostsSpawned[distroIds[1]]
			distroTwoHosts := newHostsSpawned[distroIds[2]]
			So(len(distroZeroHosts), ShouldEqual, 3)
			So(distroZeroHosts[0].Distro.Id, ShouldEqual, distroIds[0])
			So(distroZeroHosts[1].Distro.Id, ShouldEqual, distroIds[0])
			So(distroZeroHosts[2].Distro.Id, ShouldEqual, distroIds[0])
			So(len(distroOneHosts), ShouldEqual, 0)
			So(len(distroTwoHosts), ShouldEqual, 1)
			So(distroTwoHosts[0].Distro.Id, ShouldEqual, distroIds[2])
		})

		Reset(func() {
			db.Clear(distro.Collection)
		})

	})

}
func TestCheckDistro(t *testing.T) {
	Convey("When validating a distro", t, func() {

		Convey("if a new distro passes all of the validation tests, no errors should be returned", func() {
			d := &distro.Distro{Id: "a", Arch: "a", User: "******", SSHKey: "a", WorkDir: "a",
				Provider: ec2.OnDemandProviderName,
				ProviderSettings: &map[string]interface{}{
					"ami":            "a",
					"key_name":       "a",
					"instance_type":  "a",
					"security_group": "a",
					"mount_points":   nil,
				},
			}

			So(CheckDistro(d, conf, true), ShouldResemble, []ValidationError{})
		})

		Convey("if a new distro fails a validation test, an error should be returned", func() {
			d := &distro.Distro{Id: "a", Arch: "a", User: "******", SSHKey: "a", WorkDir: "a",
				Provider: ec2.OnDemandProviderName,
				ProviderSettings: &map[string]interface{}{
					"ami":            "a",
					"key_name":       "a",
					"instance_type":  "a",
					"security_group": "a",
					"mount_points":   nil,
				},
			}
			// simulate duplicate id
			dupe := distro.Distro{Id: "a"}
			So(dupe.Insert(), ShouldBeNil)
			So(CheckDistro(d, conf, true), ShouldNotResemble, []ValidationError{})
		})

		Convey("if an existing distro passes all of the validation tests, no errors should be returned", func() {
			d := &distro.Distro{Id: "a", Arch: "a", User: "******", SSHKey: "a", WorkDir: "a",
				Provider: ec2.OnDemandProviderName,
				ProviderSettings: &map[string]interface{}{
					"ami":            "a",
					"key_name":       "a",
					"instance_type":  "a",
					"security_group": "a",
					"mount_points":   nil,
				},
			}

			So(CheckDistro(d, conf, false), ShouldResemble, []ValidationError{})
		})

		Convey("if an existing distro fails a validation test, an error should be returned", func() {
			d := &distro.Distro{Id: "a", Arch: "a", User: "******", SSHKey: "a", WorkDir: "a",
				Provider: ec2.OnDemandProviderName,
				ProviderSettings: &map[string]interface{}{
					"ami":            "",
					"key_name":       "a",
					"instance_type":  "a",
					"security_group": "a",
					"mount_points":   nil,
				},
			}
			// empty ami for provider
			So(CheckDistro(d, conf, false), ShouldNotResemble, []ValidationError{})
		})

		Reset(func() {
			db.Clear(distro.Collection)
		})
	})
}
func TestDeficitBasedHostAllocator(t *testing.T) {
	var taskIds []string
	var runningTaskIds []string
	var hostIds []string
	var dist distro.Distro
	var hostAllocator *DeficitBasedHostAllocator

	Convey("With a deficit based host allocator,"+
		" determining the number of new hosts to spin up...", t, func() {

		hostAllocator = &DeficitBasedHostAllocator{}
		taskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		hostIds = []string{"h1", "h2", "h3", "h4", "h5"}
		dist = distro.Distro{Provider: "ec2"}

		Convey("if there are no tasks to run, no new hosts should be needed",
			func() {
				hosts := []host.Host{
					{Id: hostIds[0]},
					{Id: hostIds[1]},
					{Id: hostIds[2]},
				}
				dist.PoolSize = len(hosts) + 5

				hostAllocatorData := &HostAllocatorData{
					existingDistroHosts: map[string][]host.Host{
						"": hosts,
					},
					distros: map[string]distro.Distro{
						"": dist,
					},
				}

				So(hostAllocator.numNewHostsForDistro(hostAllocatorData,
					dist, hostAllocatorTestConf), ShouldEqual, 0)
			})

		Convey("if the number of existing hosts equals the max hosts, no new"+
			" hosts can be spawned", func() {
			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
				{Id: taskIds[2]},
				{Id: taskIds[3]},
			}
			dist.PoolSize = 0

			hostAllocatorData := &HostAllocatorData{
				existingDistroHosts: map[string][]host.Host{},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)
			hosts := []host.Host{
				{Id: hostIds[0]},
			}
			dist.PoolSize = len(hosts)

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)
		})

		Convey("if the number of existing hosts exceeds the max hosts, no new"+
			" hosts can be spawned", func() {

			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
				{Id: taskIds[2]},
				{Id: taskIds[3]},
			}
			hosts := []host.Host{
				{Id: hostIds[0]},
				{Id: hostIds[1]},
			}
			dist.PoolSize = 1

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)
		})

		Convey("if the number of tasks to run is less than the number of free"+
			" hosts, no new hosts are needed", func() {
			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
			}
			hosts := []host.Host{
				{Id: hostIds[0]},
				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				{Id: hostIds[2]},
				{Id: hostIds[3]},
			}
			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)

		})

		Convey("if the number of tasks to run is equal to the number of free"+
			" hosts, no new hosts are needed", func() {
			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
			}
			hosts := []host.Host{
				{Id: hostIds[0]},
				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				{Id: hostIds[3]},
			}
			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)
		})

		Convey("if the number of tasks to run exceeds the number of free"+
			" hosts, new hosts are needed up to the maximum allowed for the"+
			" distro", func() {
			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
				{Id: taskIds[2]},
				{Id: taskIds[3]},
				{Id: taskIds[4]},
			}
			hosts := []host.Host{
				{Id: hostIds[0]},
				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				{Id: hostIds[3]},
				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 9

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 3)

			dist.PoolSize = 8
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}
			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 3)
			dist.PoolSize = 7
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}
			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 2)
			dist.PoolSize = 6
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}
			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 1)
		})

		Convey("if the distro cannot be used to spawn hosts, then no new hosts"+
			" can be spawned", func() {
			hosts := []host.Host{
				{Id: hostIds[0]},
			}
			taskQueueItems := []model.TaskQueueItem{
				{Id: taskIds[0]},
				{Id: taskIds[1]},
				{Id: taskIds[2]},
			}
			dist.PoolSize = 20
			dist.Provider = "static"
			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}
			So(hostAllocator.numNewHostsForDistro(hostAllocatorData, dist, hostAllocatorTestConf),
				ShouldEqual, 0)
		})

	})

}
func TestDurationBasedHostAllocator(t *testing.T) {
	var taskIds []string
	var runningTaskIds []string
	var hostIds []string
	var dist distro.Distro
	var testTaskDuration time.Duration
	var taskDurations model.ProjectTaskDurations
	var durationBasedHostAllocator *DurationBasedHostAllocator

	Convey("With a duration based host allocator,"+
		" determining the number of new hosts to spin up", t, func() {

		durationBasedHostAllocator = &DurationBasedHostAllocator{}
		taskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		hostIds = []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"}
		dist = distro.Distro{Provider: "ec2"}
		testTaskDuration = time.Duration(2) * time.Minute
		taskDurations = model.ProjectTaskDurations{
			TaskDurationByProject: map[string]*model.BuildVariantTaskDurations{
				"": &model.BuildVariantTaskDurations{
					TaskDurationByBuildVariant: map[string]*model.TaskDurations{
						"": &model.TaskDurations{
							TaskDurationByDisplayName: map[string]time.Duration{
								"": testTaskDuration,
							},
						},
					},
				},
			},
		}

		So(db.Clear(task.Collection), ShouldBeNil)

		Convey("if there are no tasks to run, no new hosts should be needed",
			func() {
				hosts := []host.Host{
					host.Host{Id: hostIds[0]},
					host.Host{Id: hostIds[1]},
					host.Host{Id: hostIds[2]},
				}
				dist.PoolSize = len(hosts) + 5

				hostAllocatorData := &HostAllocatorData{
					existingDistroHosts: map[string][]host.Host{
						"": hosts,
					},
					distros: map[string]distro.Distro{
						"": dist,
					},
				}

				tasksAccountedFor := make(map[string]bool)
				distroScheduleData := make(map[string]DistroScheduleData)

				newHosts, err := durationBasedHostAllocator.
					numNewHostsForDistro(hostAllocatorData, dist,
						tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
				So(err, ShouldBeNil)
				So(newHosts, ShouldEqual, 0)
			})

		Convey("if the number of existing hosts equals the max hosts, no new"+
			" hosts can be spawned", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			dist.PoolSize = 0

			hostAllocatorData := &HostAllocatorData{
				existingDistroHosts: map[string][]host.Host{},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
			}
			dist.PoolSize = len(hosts)

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of existing hosts exceeds the max hosts, no new"+
			" hosts can be spawned", func() {

			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
			}
			dist.PoolSize = 1

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run is less than the number of free"+
			" hosts, no new hosts are needed", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
			}
			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)

		})

		Convey("if the number of tasks to run is equal to the number of free"+
			" hosts, no new hosts are needed", func() {
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
			}
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}

			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run exceeds the number of free"+
			" hosts, new hosts are needed up to the maximum allowed for the"+
			" dist", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 9

			// In this test:
			//
			// 1. Total distro duration is:
			//		(len(taskQueueItems) * expDur ) +
			//		time left on hosts with running tasks
			// which comes out to:
			//		(5 * 200 * 60) + (60 * 3) ~ 60180 (in seconds)
			//
			// 2. MAX_DURATION_PER_DISTRO = 7200 (2 hours)
			//
			// 3. We have 5 existing hosts
			//
			// Thus, our duration based host allocator will always return 8 -
			// which is greater than what distro.PoolSize-len(existingDistroHosts)
			// will ever return in this situation.
			//
			// Hence, we should always expect to use that minimum.
			//
			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// total running duration here is
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)

			dist.PoolSize = 8
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
			dist.PoolSize = 7
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)

			dist.PoolSize = 6

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor = make(map[string]bool)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 1)
		})

		Convey("if the distro cannot be used to spawn hosts, then no new "+
			"hosts can be spawned", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4]},
			}

			dist.PoolSize = 20
			dist.Provider = "static"

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, the estimate of new "+
			"\nhosts should be used", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, but greater than "+
			"\nthe difference between the number of runnable tasks and the "+
			"\nnumber of free hosts, that difference should be used", func() {
			expDur := time.Duration(400) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 11
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than both the maximum "+
			"\nnumber of new hosts allowed for this distro, and the "+
			"\ndifference between the number of runnable tasks and the "+
			"\nnumber of free hosts, then the duration based estimate should "+
			"be used", func() {
			expDur := time.Duration(180) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
				host.Host{Id: hostIds[5]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 2
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)
		})
	})

}