Пример #1
0
func TestPatchPluginAPI(t *testing.T) {
	testConfig := evergreen.TestConfig()
	cwd := testutil.GetDirectoryOfFile()
	Convey("With a running api server and installed plugin", t, func() {
		registry := plugin.NewSimpleRegistry()
		gitPlugin := &GitPlugin{}
		err := registry.Register(gitPlugin)
		testutil.HandleTestingErr(err, t, "Couldn't register patch plugin")
		server, err := service.CreateTestServer(testConfig, nil, plugin.APIPlugins, false)
		testutil.HandleTestingErr(err, t, "Couldn't set up testing server")
		taskConfig, _ := plugintest.CreateTestConfig(filepath.Join(cwd, "testdata", "plugin_patch.yml"), t)
		testCommand := GitGetProjectCommand{Directory: "dir"}
		_, _, err = plugintest.SetupAPITestData("testTask", filepath.Join(cwd, "testdata", "testmodule.patch"), t)
		testutil.HandleTestingErr(err, t, "Couldn't set up test documents")
		testTask, err := task.FindOne(task.ById("testTaskId"))
		testutil.HandleTestingErr(err, t, "Couldn't set up test patch task")

		sliceAppender := &evergreen.SliceAppender{[]*slogger.Log{}}
		logger := agentutil.NewTestLogger(sliceAppender)

		Convey("calls to existing tasks with patches should succeed", func() {
			httpCom := plugintest.TestAgentCommunicator(testTask.Id, testTask.Secret, server.URL)
			pluginCom := &comm.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err, ShouldBeNil)
			So(patch, ShouldNotBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})
		Convey("calls to non-existing tasks should fail", func() {
			v := version.Version{Id: ""}
			testutil.HandleTestingErr(v.Insert(), t, "Couldn't insert dummy version")
			httpCom := plugintest.TestAgentCommunicator("BAD_TASK_ID", "", server.URL)
			pluginCom := &comm.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err.Error(), ShouldContainSubstring, "not found")
			So(err, ShouldNotBeNil)
			So(patch, ShouldBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})
		Convey("calls to existing tasks without patches should fail", func() {
			noPatchTask := task.Task{Id: "noPatchTask", BuildId: "a"}
			testutil.HandleTestingErr(noPatchTask.Insert(), t, "Couldn't insert patch task")
			noPatchVersion := version.Version{Id: "noPatchVersion", BuildIds: []string{"a"}}
			testutil.HandleTestingErr(noPatchVersion.Insert(), t, "Couldn't insert patch version")
			v := version.Version{Id: ""}
			testutil.HandleTestingErr(v.Insert(), t, "Couldn't insert dummy version")
			httpCom := plugintest.TestAgentCommunicator(noPatchTask.Id, "", server.URL)
			pluginCom := &comm.TaskJSONCommunicator{gitPlugin.Name(), httpCom}
			patch, err := testCommand.GetPatch(taskConfig, pluginCom, logger)
			So(err, ShouldNotBeNil)
			So(err.Error(), ShouldContainSubstring, "no patch found for task")
			So(patch, ShouldBeNil)
			testutil.HandleTestingErr(db.Clear(version.Collection), t,
				"unable to clear versions collection")
		})

	})
}
Пример #2
0
func getTaskHistory(t *task.Task, w http.ResponseWriter, r *http.Request) {
	var t2 *task.Task = t
	var err error
	if t.Requester == evergreen.PatchVersionRequester {
		t2, err = t.FindTaskOnBaseCommit()
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		t.RevisionOrderNumber = t2.RevisionOrderNumber
	}

	before := []TaskJSON{}
	jsonQuery := db.Query(bson.M{
		ProjectIdKey:           t.Project,
		VariantKey:             t.BuildVariant,
		RevisionOrderNumberKey: bson.M{"$lte": t.RevisionOrderNumber},
		TaskNameKey:            t.DisplayName,
		IsPatchKey:             false,
		NameKey:                mux.Vars(r)["name"]})
	jsonQuery = jsonQuery.Sort([]string{"-order"}).Limit(100)
	err = db.FindAllQ(collection, jsonQuery, &before)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	//reverse order of "before" because we had to sort it backwards to apply the limit correctly:
	for i, j := 0, len(before)-1; i < j; i, j = i+1, j-1 {
		before[i], before[j] = before[j], before[i]
	}

	after := []TaskJSON{}
	jsonAfterQuery := db.Query(bson.M{
		ProjectIdKey:           t.Project,
		VariantKey:             t.BuildVariant,
		RevisionOrderNumberKey: bson.M{"$gt": t.RevisionOrderNumber},
		TaskNameKey:            t.DisplayName,
		IsPatchKey:             false,
		NameKey:                mux.Vars(r)["name"]}).Sort([]string{"order"}).Limit(100)
	err = db.FindAllQ(collection, jsonAfterQuery, &after)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	//concatenate before + after
	before = append(before, after...)

	// if our task was a patch, replace the base commit's info in the history with the patch
	if t.Requester == evergreen.PatchVersionRequester {
		before, err = fixPatchInHistory(t.Id, t2, before)
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
	}
	plugin.WriteJSON(w, http.StatusOK, before)
}
Пример #3
0
func MarkTaskUndispatched(t *task.Task) error {
	// record that the task as undispatched on the host
	if err := t.MarkAsUndispatched(); err != nil {
		return err
	}
	// the task was successfully dispatched, log the event
	event.LogTaskUndispatched(t.Id, t.Id)

	// update the cached version of the task in its related build document
	if err := build.SetCachedTaskUndispatched(t.BuildId, t.Id); err != nil {
		return err
	}
	return nil
}
Пример #4
0
func TestMarkStart(t *testing.T) {
	Convey("With a task, build and version", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection, version.Collection), t,
			"Error clearing task and build collections")
		displayName := "testName"
		b := &build.Build{
			Id:      "buildtest",
			Status:  evergreen.BuildCreated,
			Version: "abc",
		}
		v := &version.Version{
			Id:     b.Version,
			Status: evergreen.VersionCreated,
		}
		testTask := task.Task{
			Id:          "testTask",
			DisplayName: displayName,
			Activated:   true,
			BuildId:     b.Id,
			Project:     "sample",
			Status:      evergreen.TaskUndispatched,
			Version:     b.Version,
		}

		b.Tasks = []build.TaskCache{
			{
				Id:     testTask.Id,
				Status: evergreen.TaskUndispatched,
			},
		}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)
		So(v.Insert(), ShouldBeNil)
		Convey("when calling MarkStart, the task, version and build should be updated", func() {
			So(MarkStart(testTask.Id), ShouldBeNil)
			testTask, err := task.FindOne(task.ById(testTask.Id))
			So(err, ShouldBeNil)
			So(testTask.Status, ShouldEqual, evergreen.TaskStarted)
			b, err := build.FindOne(build.ById(b.Id))
			So(err, ShouldBeNil)
			So(b.Status, ShouldEqual, evergreen.BuildStarted)
			So(b.Tasks, ShouldNotBeNil)
			So(len(b.Tasks), ShouldEqual, 1)
			So(b.Tasks[0].Status, ShouldEqual, evergreen.TaskStarted)
			v, err := version.FindOne(version.ById(v.Id))
			So(err, ShouldBeNil)
			So(v.Status, ShouldEqual, evergreen.VersionStarted)
		})
	})
}
Пример #5
0
func MarkTaskDispatched(t *task.Task, hostId, distroId string) error {
	// record that the task was dispatched on the host
	if err := t.MarkAsDispatched(hostId, distroId, time.Now()); err != nil {
		return fmt.Errorf("error marking task %v as dispatched "+
			"on host %v: %v", t.Id, hostId, err)
	}
	// the task was successfully dispatched, log the event
	event.LogTaskDispatched(t.Id, hostId)

	// update the cached version of the task in its related build document
	if err := build.SetCachedTaskDispatched(t.BuildId, t.Id); err != nil {
		return fmt.Errorf("error updating task cache in build %v: %v", t.BuildId, err)
	}
	return nil
}
Пример #6
0
// doStepBack performs a stepback on the task if there is a previous task and if not it returns nothing.
func doStepback(t *task.Task, caller string, detail *apimodels.TaskEndDetail, deactivatePrevious bool) error {
	//See if there is a prior success for this particular task.
	//If there isn't, we should not activate the previous task because
	//it could trigger stepping backwards ad infinitum.
	prevTask, err := t.PreviousCompletedTask(t.Project, []string{evergreen.TaskSucceeded})
	if prevTask == nil {
		return nil
	}
	if err != nil {
		return fmt.Errorf("Error locating previous successful task: %v", err)
	}

	// activate the previous task to pinpoint regression
	return ActivatePreviousTask(t.Id, caller)
}
Пример #7
0
func TestMarkEnd(t *testing.T) {
	Convey("With a task and a build", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection, version.Collection), t,
			"Error clearing task and build collections")
		displayName := "testName"
		userName := "******"
		b := &build.Build{
			Id:      "buildtest",
			Status:  evergreen.BuildStarted,
			Version: "abc",
		}
		p := &Project{
			Identifier: "sample",
		}
		v := &version.Version{
			Id:     b.Version,
			Status: evergreen.VersionStarted,
		}
		testTask := task.Task{
			Id:          "testone",
			DisplayName: displayName,
			Activated:   true,
			BuildId:     b.Id,
			Project:     "sample",
			Status:      evergreen.TaskStarted,
		}

		b.Tasks = []build.TaskCache{
			{
				Id:     testTask.Id,
				Status: evergreen.TaskStarted,
			},
		}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)
		So(v.Insert(), ShouldBeNil)
		Convey("task, build and version status will be updated properly", func() {
			details := apimodels.TaskEndDetail{
				Status: evergreen.TaskFailed,
			}
			So(MarkEnd(testTask.Id, userName, time.Now(), &details, p, false), ShouldBeNil)

		})
	})
}
Пример #8
0
func TestAverageStatistics(t *testing.T) {
	testutil.HandleTestingErr(db.ClearCollections(task.Collection), t, "couldnt reset host")
	Convey("With a distro sampleDistro inserted", t, func() {
		d := distro.Distro{
			Id: "sampleDistro",
		}
		err := d.Insert()
		So(err, ShouldBeNil)
		distroId := d.Id
		Convey("With a set of tasks that have different scheduled -> start times over a given time period", func() {
			now := time.Now()
			bucketSize := 10 * time.Second
			numberBuckets := 3

			task1 := task.Task{Id: "task1", ScheduledTime: now,
				StartTime: now.Add(time.Duration(5) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}

			So(task1.Insert(), ShouldBeNil)

			task2 := task.Task{Id: "task2", ScheduledTime: now,
				StartTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}

			So(task2.Insert(), ShouldBeNil)

			task3 := task.Task{Id: "task3", ScheduledTime: now.Add(time.Duration(10) * time.Second),
				StartTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskStarted, DistroId: distroId}
			So(task3.Insert(), ShouldBeNil)

			frameBounds := FrameBounds{
				StartTime:     now,
				EndTime:       now.Add(time.Duration(numberBuckets) * bucketSize),
				NumberBuckets: numberBuckets,
				BucketSize:    bucketSize,
			}
			avgBuckets, err := AverageStatistics(distroId, frameBounds)
			So(err, ShouldBeNil)

			So(avgBuckets[0].AverageTime, ShouldEqual, 5*time.Second)
			So(avgBuckets[1].AverageTime, ShouldEqual, 0)
			So(avgBuckets[2].AverageTime, ShouldEqual, 15*time.Second)

			Convey("if the distro id given does not exist, it shoud return an empty list", func() {
				_, err := AverageStatistics("noId", frameBounds)
				So(err, ShouldNotBeNil)
			})
		})
	})

}
Пример #9
0
// updateTaskCost determines a task's cost based on the host it ran on. Hosts that
// are unable to calculate their own costs will not set a task's Cost field. Errors
// are logged but not returned, since any number of API failures could happen and
// we shouldn't sacrifice a task's status for them.
func (as *APIServer) updateTaskCost(t *task.Task, h *host.Host, finishTime time.Time) {
	manager, err := providers.GetCloudManager(h.Provider, &as.Settings)
	if err != nil {
		evergreen.Logger.Logf(slogger.ERROR,
			"Error loading provider for host %v cost calculation: %v ", t.HostId, err)
		return
	}
	if calc, ok := manager.(cloud.CloudCostCalculator); ok {
		evergreen.Logger.Logf(slogger.INFO, "Calculating cost for task %v", t.Id)
		cost, err := calc.CostForDuration(h, t.StartTime, finishTime)
		if err != nil {
			evergreen.Logger.Logf(slogger.ERROR,
				"Error calculating cost for task %v: %v ", t.Id, err)
			return
		}
		if err := t.SetCost(cost); err != nil {
			evergreen.Logger.Logf(slogger.ERROR,
				"Error updating cost for task %v: %v ", t.Id, err)
			return
		}
	}
}
Пример #10
0
func TestUpdateBuildStatusForTask(t *testing.T) {
	Convey("With two tasks and a build", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection, version.Collection), t,
			"Error clearing task and build collections")
		displayName := "testName"
		b := &build.Build{
			Id:      "buildtest",
			Status:  evergreen.BuildStarted,
			Version: "abc",
		}
		v := &version.Version{
			Id:     b.Version,
			Status: evergreen.VersionStarted,
		}
		testTask := task.Task{
			Id:          "testone",
			DisplayName: displayName,
			Activated:   false,
			BuildId:     b.Id,
			Project:     "sample",
			Status:      evergreen.TaskFailed,
		}
		anotherTask := task.Task{
			Id:          "two",
			DisplayName: displayName,
			Activated:   true,
			BuildId:     b.Id,
			Project:     "sample",
			Status:      evergreen.TaskFailed,
		}

		b.Tasks = []build.TaskCache{
			{
				Id:     testTask.Id,
				Status: evergreen.TaskSucceeded,
			},
			{
				Id:     anotherTask.Id,
				Status: evergreen.TaskFailed,
			},
		}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)
		So(anotherTask.Insert(), ShouldBeNil)
		So(v.Insert(), ShouldBeNil)
		Convey("updating the build for a task should update the build's status and the version's status", func() {
			So(UpdateBuildAndVersionStatusForTask(testTask.Id), ShouldBeNil)
			b, err := build.FindOne(build.ById(b.Id))
			So(err, ShouldBeNil)
			So(b.Status, ShouldEqual, evergreen.BuildFailed)
			v, err := version.FindOne(version.ById(v.Id))
			So(v.Status, ShouldEqual, evergreen.VersionFailed)

		})

	})
}
Пример #11
0
func TestAbortTask(t *testing.T) {
	Convey("With a task and a build", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection, version.Collection), t,
			"Error clearing task, build, and version collections")
		displayName := "testName"
		userName := "******"
		b := &build.Build{
			Id: "buildtest",
		}
		testTask := task.Task{
			Id:          "testone",
			DisplayName: displayName,
			Activated:   false,
			BuildId:     b.Id,
			Status:      evergreen.TaskStarted,
		}
		finishedTask := task.Task{
			Id:          "another",
			DisplayName: displayName,
			Activated:   false,
			BuildId:     b.Id,
			Status:      evergreen.TaskFailed,
		}
		b.Tasks = []build.TaskCache{
			{
				Id: testTask.Id,
			},
			{
				Id: finishedTask.Id,
			},
		}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)
		So(finishedTask.Insert(), ShouldBeNil)
		Convey("with a task that has started, aborting a task should work", func() {
			So(AbortTask(testTask.Id, userName), ShouldBeNil)
			testTask, err := task.FindOne(task.ById(testTask.Id))
			So(err, ShouldBeNil)
			So(testTask.Activated, ShouldEqual, false)
			So(testTask.Aborted, ShouldEqual, true)
		})
		Convey("a task that is finished should error when aborting", func() {
			So(AbortTask(finishedTask.Id, userName), ShouldNotBeNil)
		})
	})

}
func TestDurationBasedHostAllocator(t *testing.T) {
	var taskIds []string
	var runningTaskIds []string
	var hostIds []string
	var dist distro.Distro
	var testTaskDuration time.Duration
	var taskDurations model.ProjectTaskDurations
	var durationBasedHostAllocator *DurationBasedHostAllocator

	Convey("With a duration based host allocator,"+
		" determining the number of new hosts to spin up", t, func() {

		durationBasedHostAllocator = &DurationBasedHostAllocator{}
		taskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5"}
		hostIds = []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"}
		dist = distro.Distro{Provider: "ec2"}
		testTaskDuration = time.Duration(2) * time.Minute
		taskDurations = model.ProjectTaskDurations{
			TaskDurationByProject: map[string]*model.BuildVariantTaskDurations{
				"": &model.BuildVariantTaskDurations{
					TaskDurationByBuildVariant: map[string]*model.TaskDurations{
						"": &model.TaskDurations{
							TaskDurationByDisplayName: map[string]time.Duration{
								"": testTaskDuration,
							},
						},
					},
				},
			},
		}

		So(db.Clear(task.Collection), ShouldBeNil)

		Convey("if there are no tasks to run, no new hosts should be needed",
			func() {
				hosts := []host.Host{
					host.Host{Id: hostIds[0]},
					host.Host{Id: hostIds[1]},
					host.Host{Id: hostIds[2]},
				}
				dist.PoolSize = len(hosts) + 5

				hostAllocatorData := &HostAllocatorData{
					existingDistroHosts: map[string][]host.Host{
						"": hosts,
					},
					distros: map[string]distro.Distro{
						"": dist,
					},
				}

				tasksAccountedFor := make(map[string]bool)
				distroScheduleData := make(map[string]DistroScheduleData)

				newHosts, err := durationBasedHostAllocator.
					numNewHostsForDistro(hostAllocatorData, dist,
						tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
				So(err, ShouldBeNil)
				So(newHosts, ShouldEqual, 0)
			})

		Convey("if the number of existing hosts equals the max hosts, no new"+
			" hosts can be spawned", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			dist.PoolSize = 0

			hostAllocatorData := &HostAllocatorData{
				existingDistroHosts: map[string][]host.Host{},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
			}
			dist.PoolSize = len(hosts)

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of existing hosts exceeds the max hosts, no new"+
			" hosts can be spawned", func() {

			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
				model.TaskQueueItem{Id: taskIds[2]},
				model.TaskQueueItem{Id: taskIds[3]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
			}
			dist.PoolSize = 1

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run is less than the number of free"+
			" hosts, no new hosts are needed", func() {
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
			}
			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)

		})

		Convey("if the number of tasks to run is equal to the number of free"+
			" hosts, no new hosts are needed", func() {
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
			}
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0]},
				model.TaskQueueItem{Id: taskIds[1]},
			}

			dist.PoolSize = len(hosts) + 5

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the number of tasks to run exceeds the number of free"+
			" hosts, new hosts are needed up to the maximum allowed for the"+
			" dist", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 9

			// In this test:
			//
			// 1. Total distro duration is:
			//		(len(taskQueueItems) * expDur ) +
			//		time left on hosts with running tasks
			// which comes out to:
			//		(5 * 200 * 60) + (60 * 3) ~ 60180 (in seconds)
			//
			// 2. MAX_DURATION_PER_DISTRO = 7200 (2 hours)
			//
			// 3. We have 5 existing hosts
			//
			// Thus, our duration based host allocator will always return 8 -
			// which is greater than what distro.PoolSize-len(existingDistroHosts)
			// will ever return in this situation.
			//
			// Hence, we should always expect to use that minimum.
			//
			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// total running duration here is
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)

			dist.PoolSize = 8
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
			dist.PoolSize = 7
			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor = make(map[string]bool)
			distroScheduleData = make(map[string]DistroScheduleData)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)

			dist.PoolSize = 6

			hostAllocatorData = &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}
			tasksAccountedFor = make(map[string]bool)

			newHosts, err = durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 1)
		})

		Convey("if the distro cannot be used to spawn hosts, then no new "+
			"hosts can be spawned", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}
			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1]},
				host.Host{Id: hostIds[2]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4]},
			}

			dist.PoolSize = 20
			dist.Provider = "static"

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 0)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, the estimate of new "+
			"\nhosts should be used", func() {
			expDur := time.Duration(200) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than the maximum "+
			"\nnumber of new hosts allowed for this distro, but greater than "+
			"\nthe difference between the number of runnable tasks and the "+
			"\nnumber of free hosts, that difference should be used", func() {
			expDur := time.Duration(400) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 11
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 3)
		})

		Convey("if the duration based estimate is less than both the maximum "+
			"\nnumber of new hosts allowed for this distro, and the "+
			"\ndifference between the number of runnable tasks and the "+
			"\nnumber of free hosts, then the duration based estimate should "+
			"be used", func() {
			expDur := time.Duration(180) * time.Minute
			// all runnable tasks have an expected duration of expDur (200mins)
			taskQueueItems := []model.TaskQueueItem{
				model.TaskQueueItem{Id: taskIds[0], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[1], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[2], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[3], ExpectedDuration: expDur},
				model.TaskQueueItem{Id: taskIds[4], ExpectedDuration: expDur},
			}

			// running tasks have a time to completion of about 1 minute
			hosts := []host.Host{
				host.Host{Id: hostIds[0]},
				host.Host{Id: hostIds[1], RunningTask: runningTaskIds[0]},
				host.Host{Id: hostIds[2], RunningTask: runningTaskIds[1]},
				host.Host{Id: hostIds[3]},
				host.Host{Id: hostIds[4], RunningTask: runningTaskIds[2]},
				host.Host{Id: hostIds[5]},
			}
			dist.PoolSize = 20

			hostAllocatorData := &HostAllocatorData{
				taskQueueItems: map[string][]model.TaskQueueItem{
					"": taskQueueItems,
				},
				existingDistroHosts: map[string][]host.Host{
					"": hosts,
				},
				distros: map[string]distro.Distro{
					"": dist,
				},
				projectTaskDurations: taskDurations,
			}

			tasksAccountedFor := make(map[string]bool)
			distroScheduleData := make(map[string]DistroScheduleData)

			// tasks running on hosts
			for _, runningTaskId := range runningTaskIds {
				task := task.Task{Id: runningTaskId}
				So(task.Insert(), ShouldBeNil)
			}

			// estimates based on data
			// duration estimate: 2
			// max new hosts allowed: 15
			// 'one-host-per-scheduled-task': 3
			newHosts, err := durationBasedHostAllocator.
				numNewHostsForDistro(hostAllocatorData, dist,
					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
			So(err, ShouldBeNil)
			So(newHosts, ShouldEqual, 2)
		})
	})

}
Пример #13
0
func TestAttachFilesApi(t *testing.T) {
	testConfig := evergreen.TestConfig()
	Convey("With a running api server and installed api hook", t, func() {
		reset(t)
		taskConfig, _ := plugintest.CreateTestConfig("testdata/plugin_attach_files.yml", t)
		registry := plugin.NewSimpleRegistry()
		attachPlugin := &AttachPlugin{}
		err := registry.Register(attachPlugin)
		testutil.HandleTestingErr(err, t, "Couldn't register patch plugin")
		server, err := apiserver.CreateTestServer(testConfig, nil, plugin.APIPlugins, true)
		testutil.HandleTestingErr(err, t, "Couldn't set up testing server")
		sliceAppender := &evergreen.SliceAppender{[]*slogger.Log{}}
		logger := agent.NewTestLogger(sliceAppender)

		testTask := task.Task{Id: "test1", DisplayName: "TASK!!!", BuildId: "build1"}
		testutil.HandleTestingErr(testTask.Insert(), t, "couldn't insert test task")
		taskConfig.Task = &testTask

		httpCom := plugintest.TestAgentCommunicator(testTask.Id, testTask.Secret, server.URL)
		pluginCom := &agent.TaskJSONCommunicator{attachPlugin.Name(), httpCom}

		Convey("using a well-formed api call", func() {
			testCommand := AttachTaskFilesCommand{
				artifact.Params{
					"upload":   "gopher://evergreen.equipment",
					"coverage": "http://www.blankets.com",
				},
			}
			err := testCommand.SendTaskFiles(taskConfig, logger, pluginCom)
			So(err, ShouldBeNil)

			Convey("the given values should be written to the db", func() {
				entry, err := artifact.FindOne(artifact.ByTaskId(testTask.Id))
				So(err, ShouldBeNil)
				So(entry, ShouldNotBeNil)
				So(entry.TaskId, ShouldEqual, testTask.Id)
				So(entry.TaskDisplayName, ShouldEqual, testTask.DisplayName)
				So(entry.BuildId, ShouldEqual, testTask.BuildId)
				So(len(entry.Files), ShouldEqual, 2)
			})

			Convey("with a second api call", func() {
				testCommand := AttachTaskFilesCommand{
					artifact.Params{
						"3x5":      "15",
						"$b.o.o.l": "{\"json\":false}",
						"coverage": "http://tumblr.com/tagged/tarp",
					},
				}
				err := testCommand.SendTaskFiles(taskConfig, logger, pluginCom)
				So(err, ShouldBeNil)
				entry, err := artifact.FindOne(artifact.ByTaskId(testTask.Id))
				So(err, ShouldBeNil)
				So(entry, ShouldNotBeNil)

				Convey("new values should be added", func() {
					Convey("and old values should still remain", func() {
						So(len(entry.Files), ShouldEqual, 5)
					})
				})
			})
		})

		Convey("but the following malformed calls should fail:", func() {
			Convey("- calls with garbage content", func() {
				resp, err := pluginCom.TaskPostJSON(
					AttachTaskFilesAPIEndpoint,
					"I am not a proper post request for this endpoint",
				)
				So(err, ShouldBeNil)
				So(resp, ShouldNotBeNil)
				So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
			})

			Convey("- calls with nested subdocs", func() {
				resp, err := pluginCom.TaskPostJSON(
					AttachTaskFilesAPIEndpoint,
					map[string]interface{}{
						"cool": map[string]interface{}{
							"this_is": "a",
							"broken":  "test",
						},
					})
				So(err, ShouldBeNil)
				So(resp, ShouldNotBeNil)
				So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
			})
		})
	})
}
Пример #14
0
func TestTryResetTask(t *testing.T) {
	Convey("With a task, a build, version and a project", t, func() {
		Convey("resetting a task without a max number of executions", func() {
			testutil.HandleTestingErr(db.ClearCollections(task.Collection, task.OldCollection, build.Collection, version.Collection), t,
				"Error clearing task and build collections")
			displayName := "testName"
			userName := "******"
			b := &build.Build{
				Id:      "buildtest",
				Status:  evergreen.BuildStarted,
				Version: "abc",
			}
			v := &version.Version{
				Id:     b.Version,
				Status: evergreen.VersionStarted,
			}
			testTask := task.Task{
				Id:          "testone",
				DisplayName: displayName,
				Activated:   false,
				BuildId:     b.Id,
				Execution:   1,
				Project:     "sample",
				Status:      evergreen.TaskSucceeded,
			}
			p := &Project{
				Identifier: "sample",
			}
			detail := &apimodels.TaskEndDetail{
				Status: evergreen.TaskFailed,
			}

			b.Tasks = []build.TaskCache{
				{
					Id: testTask.Id,
				},
			}
			So(b.Insert(), ShouldBeNil)
			So(testTask.Insert(), ShouldBeNil)
			So(v.Insert(), ShouldBeNil)
			Convey("should reset and add a task to the old tasks collection", func() {
				So(TryResetTask(testTask.Id, userName, "", p, detail), ShouldBeNil)
				testTask, err := task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.Details, ShouldResemble, apimodels.TaskEndDetail{})
				So(testTask.Status, ShouldEqual, evergreen.TaskUndispatched)
				So(testTask.FinishTime, ShouldResemble, util.ZeroTime)
				oldTaskId := fmt.Sprintf("%v_%v", testTask.Id, 1)
				fmt.Println(oldTaskId)
				oldTask, err := task.FindOneOld(task.ById(oldTaskId))
				So(err, ShouldBeNil)
				So(oldTask, ShouldNotBeNil)
				So(oldTask.Execution, ShouldEqual, 1)
				So(oldTask.Details, ShouldResemble, *detail)
				So(oldTask.FinishTime, ShouldNotResemble, util.ZeroTime)
			})

		})
		Convey("resetting a task with a max number of excutions", func() {
			testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection, version.Collection), t,
				"Error clearing task and build collections")
			displayName := "testName"
			userName := "******"
			b := &build.Build{
				Id:      "buildtest",
				Status:  evergreen.BuildStarted,
				Version: "abc",
			}
			v := &version.Version{
				Id:     b.Version,
				Status: evergreen.VersionStarted,
			}
			testTask := task.Task{
				Id:          "testone",
				DisplayName: displayName,
				Activated:   false,
				BuildId:     b.Id,
				Execution:   evergreen.MaxTaskExecution,
				Project:     "sample",
				Status:      evergreen.TaskSucceeded,
			}
			p := &Project{
				Identifier: "sample",
			}
			detail := &apimodels.TaskEndDetail{
				Status: evergreen.TaskFailed,
			}
			anotherTask := task.Task{
				Id:          "two",
				DisplayName: displayName,
				Activated:   false,
				BuildId:     b.Id,
				Execution:   evergreen.MaxTaskExecution,
				Project:     "sample",
				Status:      evergreen.TaskSucceeded,
			}
			b.Tasks = []build.TaskCache{
				{
					Id: testTask.Id,
				},
				{
					Id: anotherTask.Id,
				},
			}
			So(b.Insert(), ShouldBeNil)
			So(testTask.Insert(), ShouldBeNil)
			So(v.Insert(), ShouldBeNil)
			So(anotherTask.Insert(), ShouldBeNil)

			Convey("should not reset if an origin other than the ui package tries to reset", func() {
				So(TryResetTask(testTask.Id, userName, "", p, detail), ShouldBeNil)
				testTask, err := task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.Details, ShouldResemble, *detail)
				So(testTask.Status, ShouldEqual, detail.Status)
				So(testTask.FinishTime, ShouldNotResemble, util.ZeroTime)
			})
			Convey("should reset and use detail information if the UI package passes in a detail ", func() {
				So(TryResetTask(anotherTask.Id, userName, evergreen.UIPackage, p, detail), ShouldBeNil)
				a, err := task.FindOne(task.ById(anotherTask.Id))
				So(err, ShouldBeNil)
				So(a.Details, ShouldResemble, apimodels.TaskEndDetail{})
				So(a.Status, ShouldEqual, evergreen.TaskUndispatched)
				So(a.FinishTime, ShouldResemble, util.ZeroTime)
			})
		})
	})
}
Пример #15
0
func TestSetActiveState(t *testing.T) {
	Convey("With one task with no dependencies", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection), t,
			"Error clearing task and build collections")
		displayName := "testName"
		userName := "******"
		testTime := time.Now()
		b := &build.Build{
			Id: "buildtest",
		}
		testTask := task.Task{
			Id:            "testone",
			DisplayName:   displayName,
			ScheduledTime: testTime,
			Activated:     false,
			BuildId:       b.Id,
		}
		b.Tasks = []build.TaskCache{{Id: testTask.Id}}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)

		Convey("activating the task should set the task state to active", func() {
			So(SetActiveState(testTask.Id, "randomUser", true), ShouldBeNil)
			testTask, err := task.FindOne(task.ById(testTask.Id))
			So(err, ShouldBeNil)
			So(testTask.Activated, ShouldBeTrue)
			So(testTask.ScheduledTime, ShouldHappenWithin, oneMs, testTime)

			Convey("deactivating an active task as a normal user should deactivate the task", func() {
				So(SetActiveState(testTask.Id, userName, false), ShouldBeNil)
				testTask, err = task.FindOne(task.ById(testTask.Id))
				So(testTask.Activated, ShouldBeFalse)
			})
		})
		Convey("when deactivating an active task as evergreen", func() {
			Convey("if the task is activated by evergreen, the task should deactivate", func() {
				So(SetActiveState(testTask.Id, evergreen.DefaultTaskActivator, true), ShouldBeNil)
				testTask, err := task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.ActivatedBy, ShouldEqual, evergreen.DefaultTaskActivator)
				So(SetActiveState(testTask.Id, evergreen.DefaultTaskActivator, false), ShouldBeNil)
				testTask, err = task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.Activated, ShouldEqual, false)
			})
			Convey("if the task is not activated by evergreen, the task should not deactivate", func() {
				So(SetActiveState(testTask.Id, userName, true), ShouldBeNil)
				testTask, err := task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.ActivatedBy, ShouldEqual, userName)
				So(SetActiveState(testTask.Id, evergreen.DefaultTaskActivator, false), ShouldBeNil)
				testTask, err = task.FindOne(task.ById(testTask.Id))
				So(err, ShouldBeNil)
				So(testTask.Activated, ShouldEqual, true)
			})

		})
	})
	Convey("With one task has tasks it depends on", t, func() {
		testutil.HandleTestingErr(db.ClearCollections(task.Collection, build.Collection), t,
			"Error clearing task and build collections")
		displayName := "testName"
		userName := "******"
		testTime := time.Now()
		taskId := "t1"
		buildId := "b1"

		dep1 := &task.Task{
			Id:            "t2",
			ScheduledTime: testTime,
			BuildId:       buildId,
		}
		dep2 := &task.Task{
			Id:            "t3",
			ScheduledTime: testTime,
			BuildId:       buildId,
		}
		So(dep1.Insert(), ShouldBeNil)
		So(dep2.Insert(), ShouldBeNil)

		testTask := task.Task{
			Id:          taskId,
			DisplayName: displayName,
			Activated:   false,
			BuildId:     buildId,
			DependsOn: []task.Dependency{
				{"t2", evergreen.TaskSucceeded},
				{"t3", evergreen.TaskSucceeded},
			},
		}

		b := &build.Build{
			Id:    buildId,
			Tasks: []build.TaskCache{{Id: taskId}, {Id: "t2"}, {Id: "t3"}},
		}
		So(b.Insert(), ShouldBeNil)
		So(testTask.Insert(), ShouldBeNil)

		Convey("activating the task should activate the tasks it depends on", func() {
			So(SetActiveState(testTask.Id, userName, true), ShouldBeNil)
			depTask, err := task.FindOne(task.ById(dep1.Id))
			So(err, ShouldBeNil)
			So(depTask.Activated, ShouldBeTrue)

			depTask, err = task.FindOne(task.ById(dep2.Id))
			So(err, ShouldBeNil)
			So(depTask.Activated, ShouldBeTrue)

			Convey("deactivating the task should not deactive the tasks it depends on", func() {
				So(SetActiveState(testTask.Id, userName, false), ShouldBeNil)
				depTask, err := task.FindOne(task.ById(depTask.Id))
				So(err, ShouldBeNil)
				So(depTask.Activated, ShouldBeTrue)
			})
		})
	})
}
Пример #16
0
func TestCreateTaskBuckets(t *testing.T) {
	testutil.HandleTestingErr(db.ClearCollections(task.Collection), t, "couldnt reset host")
	Convey("With a starting time and a minute bucket size and inserting tasks with different start and finish", t, func() {
		now := time.Now()
		bucketSize := time.Duration(10) * time.Second

		// -20 -> 20
		beforeStartHost := task.Task{Id: "beforeStartTask", StartTime: now.Add(time.Duration(-20) * time.Second), FinishTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskSucceeded}
		So(beforeStartHost.Insert(), ShouldBeNil)

		// 80 -> 120
		afterEndHost := task.Task{Id: "afterStartTask", StartTime: now.Add(time.Duration(80) * time.Second), FinishTime: now.Add(time.Duration(120) * time.Second), Status: evergreen.TaskFailed}
		So(afterEndHost.Insert(), ShouldBeNil)

		// 20 -> 40: shouldnt be added
		h1 := task.Task{Id: "h1", StartTime: now.Add(time.Duration(20) * time.Second), FinishTime: now.Add(time.Duration(40) * time.Second), Status: evergreen.TaskUndispatched}
		So(h1.Insert(), ShouldBeNil)

		// 10 -> 80
		h2 := task.Task{Id: "h2", StartTime: now.Add(time.Duration(10) * time.Second), FinishTime: now.Add(time.Duration(80) * time.Second), Status: evergreen.TaskSucceeded}
		So(h2.Insert(), ShouldBeNil)

		// 20 -> shouldnt be added
		neverEnding := task.Task{Id: "neverEnding", StartTime: now.Add(time.Duration(20) * time.Second), Status: evergreen.TaskSucceeded}
		So(neverEnding.Insert(), ShouldBeNil)

		// 5 -> 7
		sameBucket := task.Task{Id: "sameBucket", StartTime: now.Add(time.Duration(5) * time.Second), FinishTime: now.Add(time.Duration(7) * time.Second), Status: evergreen.TaskFailed}
		So(sameBucket.Insert(), ShouldBeNil)

		// 5 -> 30
		h4 := task.Task{Id: "h4", StartTime: now.Add(time.Duration(5) * time.Second), FinishTime: now.Add(time.Duration(30) * time.Second), Status: evergreen.TaskFailed}
		So(h4.Insert(), ShouldBeNil)

		endTime := now.Add(time.Duration(40) * time.Second)
		frameBounds := FrameBounds{
			StartTime:     now,
			EndTime:       endTime,
			NumberBuckets: 4,
			BucketSize:    bucketSize,
		}
		Convey("for four buckets of 10 seconds", func() {
			tasks, err := task.Find(task.ByTimeRun(now, endTime))
			So(err, ShouldBeNil)
			So(len(tasks), ShouldEqual, 4)

			buckets, errors := CreateTaskBuckets(tasks, []task.Task{}, frameBounds)
			So(errors, ShouldBeEmpty)
			So(len(buckets), ShouldEqual, 4)
			So(int(buckets[0].TotalTime.Seconds()), ShouldEqual, 17)
			So(int(math.Ceil(buckets[1].TotalTime.Seconds())), ShouldEqual, 30)
			So(int(math.Ceil(buckets[2].TotalTime.Seconds())), ShouldEqual, 20)
		})

	})
}