Пример #1
40
// FindMinimumQueuePositionForTask finds the position of a task in the many task queues
// where its position is the lowest. It returns an error if the aggregation it runs fails.
func FindMinimumQueuePositionForTask(taskId string) (int, error) {
	var results []struct {
		Index int `bson:"index"`
	}
	queueItemIdKey := fmt.Sprintf("%v.%v", TaskQueueQueueKey, TaskQueueItemIdKey)

	// NOTE: this aggregation requires 3.2+ because of its use of
	// $unwind's 'path' and 'includeArrayIndex'
	pipeline := []bson.M{
		{"$match": bson.M{
			queueItemIdKey: taskId}},
		{"$unwind": bson.M{
			"path":              fmt.Sprintf("$%s", TaskQueueQueueKey),
			"includeArrayIndex": "index"}},
		{"$match": bson.M{
			queueItemIdKey: taskId}},
		{"$sort": bson.M{
			"index": 1}},
		{"$limit": 1},
	}

	err := db.Aggregate(TaskQueuesCollection, pipeline, &results)

	if len(results) == 0 {
		return -1, err
	}

	return (results[0].Index + 1), err
}
Пример #2
0
// ExpectedTaskDuration takes a given project and buildvariant and computes
// the average duration - grouped by task display name - for tasks that have
// completed within a given threshold as determined by the window
func ExpectedTaskDuration(project, buildvariant string, window time.Duration) (map[string]time.Duration, error) {
	pipeline := []bson.M{
		{
			"$match": bson.M{
				BuildVariantKey: buildvariant,
				ProjectKey:      project,
				StatusKey: bson.M{
					"$in": []string{evergreen.TaskSucceeded, evergreen.TaskFailed},
				},
				DetailsKey + "." + TaskEndDetailTimedOut: bson.M{
					"$ne": true,
				},
				FinishTimeKey: bson.M{
					"$gte": time.Now().Add(-window),
				},
				StartTimeKey: bson.M{
					// make sure all documents have a valid start time so we don't
					// return tasks with runtimes of multiple years
					"$gt": util.ZeroTime,
				},
			},
		},
		{
			"$project": bson.M{
				DisplayNameKey: 1,
				TimeTakenKey:   1,
				IdKey:          0,
			},
		},
		{
			"$group": bson.M{
				"_id": fmt.Sprintf("$%v", DisplayNameKey),
				"exp_dur": bson.M{
					"$avg": fmt.Sprintf("$%v", TimeTakenKey),
				},
			},
		},
	}

	// anonymous struct for unmarshalling result bson
	var results []struct {
		DisplayName      string `bson:"_id"`
		ExpectedDuration int64  `bson:"exp_dur"`
	}

	err := db.Aggregate(Collection, pipeline, &results)
	if err != nil {
		return nil, fmt.Errorf("error aggregating task average duration: %v", err)
	}

	expDurations := make(map[string]time.Duration)
	for _, result := range results {
		expDuration := time.Duration(result.ExpectedDuration) * time.Nanosecond
		expDurations[result.DisplayName] = expDuration
	}

	return expDurations, nil
}
Пример #3
0
// FetchCells returns a Grid of Cells - grouped by variant and display name.
// current is the most recent version and from which to fetch prior Cells
// going back as far as depth versions.
func FetchCells(current version.Version, depth int) (Grid, error) {
	cells := Grid{}
	pipeline := []bson.M{
		// Stage 1: Get all builds from the current version going back
		// as far as depth versions.
		{"$match": bson.M{
			build.RequesterKey: evergreen.RepotrackerVersionRequester,
			build.RevisionOrderNumberKey: bson.M{
				"$lte": current.RevisionOrderNumber,
				"$gte": (current.RevisionOrderNumber - depth),
			},
			build.ProjectKey: current.Identifier,
		}},
		// Stage 2: Sort the builds by the most recently completed.
		{"$sort": bson.M{
			build.RevisionOrderNumberKey: -1,
		}},
		// Stage 3: Project only the relevant fields.
		{"$project": bson.M{
			build.TasksKey:    1,
			build.RevisionKey: 1,
			"v":               "$" + build.BuildVariantKey,
		}},
		// Stage 4: Flatten the task cache for easier grouping.
		{"$unwind": "$tasks"},
		// Stage 5: Rewrite and project out only the relevant fields.
		{"$project": bson.M{
			"_id": 0,
			"v":   1,
			"r":   "$" + build.RevisionKey,
			"d":   "$" + build.TasksKey + "." + build.TaskCacheDisplayNameKey,
			"st":  "$" + build.TasksKey + "." + build.TaskCacheStatusKey,
			"ed":  "$" + build.TasksKey + "." + build.TaskCacheStatusDetailsKey,
			"id":  "$" + build.TasksKey + "." + build.TaskCacheIdKey,
		}},
		// Stage 6: Group the tasks by variant and display name. For each group,
		// add the history - all prior versioned tasks along with their status,
		// id, and revision identifier.
		{"$group": bson.M{
			"_id": bson.M{
				"v": "$v",
				"d": "$d",
			},
			"h": bson.M{
				"$push": bson.M{
					"s": "$st",
					"e": "$ed",
					"d": "$id",
					"v": "$r",
				},
			},
		}},
	}
	return cells, db.Aggregate(build.Collection, pipeline, &cells)
}
Пример #4
0
// AverageStatistics uses an agg pipeline that creates buckets given a time frame and finds the average scheduled ->
// start time for that time frame.
// One thing to note is that the average time is in milliseconds, not nanoseconds and must be converted.
func AverageStatistics(distroId string, bounds FrameBounds) (AvgBuckets, error) {

	// error out if the distro does not exist
	_, err := distro.FindOne(distro.ById(distroId))
	if err != nil {
		return nil, err
	}
	intBucketSize := util.FromNanoseconds(bounds.BucketSize)
	buckets := AvgBuckets{}
	pipeline := []bson.M{
		// find all tasks that have started within the time frame for a given distro and only valid statuses.
		{"$match": bson.M{
			task.StartTimeKey: bson.M{
				"$gte": bounds.StartTime,
				"$lte": bounds.EndTime,
			},
			// only need tasks that have already started or those that have finished,
			// not looking for tasks that have been scheduled but not started.
			task.StatusKey: bson.M{
				"$in": []string{evergreen.TaskStarted,
					evergreen.TaskFailed, evergreen.TaskSucceeded},
			},
			task.DistroIdKey: distroId,
		}},
		// project the difference in scheduled -> start, as well as the bucket
		{"$project": bson.M{
			"diff": bson.M{
				"$subtract": []interface{}{"$" + task.StartTimeKey, "$" + task.ScheduledTimeKey},
			},
			"b": bson.M{
				"$floor": bson.M{
					"$divide": []interface{}{
						bson.M{"$subtract": []interface{}{"$" + task.StartTimeKey, bounds.StartTime}},
						intBucketSize},
				},
			},
		}},
		{"$group": bson.M{
			"_id": "$b",
			"a":   bson.M{"$avg": "$diff"},
			"n":   bson.M{"$sum": 1},
		}},

		{"$sort": bson.M{
			"_id": 1,
		}},
	}

	if err := db.Aggregate(task.Collection, pipeline, &buckets); err != nil {
		return nil, err
	}
	return convertBucketsToNanoseconds(buckets, bounds), nil
}
Пример #5
0
// FetchRevisionOrderFailures returns the most recent test failures
// grouped by revision - looking as far back as depth revisions
func FetchRevisionOrderFailures(current version.Version, depth int) (RevisionFailures, error) {
	pipeline := []bson.M{
		// Stage 1: Get the most recent completed tasks - looking back as far as
		// depth versions - on this project.
		{"$match": bson.M{
			task.RevisionOrderNumberKey: bson.M{
				"$lte": current.RevisionOrderNumber,
				"$gte": (current.RevisionOrderNumber - depth),
			},
			task.ProjectKey:   current.Identifier,
			task.RequesterKey: evergreen.RepotrackerVersionRequester,
		}},
		// Stage 2: Project only relevant fields.
		{"$project": bson.M{
			task.DisplayNameKey:  1,
			task.RevisionKey:     1,
			task.BuildVariantKey: 1,
			task.IdKey:           1,
			"l":                  "$" + task.TestResultsKey,
		}},
		// Stage 3: Flatten out the test results
		{"$unwind": "$l"},
		// Stage 4: Take only failed test results
		{"$match": bson.M{
			"l." + task.TestResultStatusKey: evergreen.TestFailedStatus,
		}},
		// Stage 5: Project only relevant fields including just the test file key
		{"$project": bson.M{
			task.RevisionKey:     1,
			task.BuildVariantKey: 1,
			task.DisplayNameKey:  1,
			task.IdKey:           1,
			"f":                  "$l." + task.TestResultTestFileKey,
		}},
		// Stage 6: Group by revision. For each one include the
		// variant name, task name, task id and test name
		{"$group": bson.M{
			"_id": "$" + task.RevisionKey,
			"a": bson.M{
				"$push": bson.M{
					"n":   "$" + task.BuildVariantKey,
					"i":   "$f",
					"t":   "$" + task.DisplayNameKey,
					"tid": "$" + task.IdKey,
				},
			},
		}},
	}
	taskFailures := RevisionFailures{}
	return taskFailures, db.Aggregate(task.Collection, pipeline, &taskFailures)
}
Пример #6
0
// getTags finds TaskJSONs that have tags
func getTags(w http.ResponseWriter, r *http.Request) {
	t, err := task.FindOne(task.ById(mux.Vars(r)["task_id"]))
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	tags := []struct {
		Tag string `bson:"_id" json:"tag"`
	}{}
	err = db.Aggregate(collection, []bson.M{
		{"$match": bson.M{ProjectIdKey: t.Project, TagKey: bson.M{"$exists": true, "$ne": ""}}},
		{"$project": bson.M{TagKey: 1}}, bson.M{"$group": bson.M{"_id": "$tag"}},
	}, &tags)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	plugin.WriteJSON(w, http.StatusOK, tags)
}
Пример #7
0
// AverageTaskTimeDifference takes two field names (such that field2 happened
// after field1), a field to group on, and a cutoff time.
// It returns the average duration between fields 1 and 2, grouped by
// the groupBy field, including only task documents where both time
// fields happened after the given cutoff time. This information is returned
// as a map from groupBy_field -> avg_time_difference
//
// NOTE: THIS FUNCTION DOES NOT SANITIZE INPUT!
// BAD THINGS CAN HAPPEN IF NON-TIME FIELDNAMES ARE PASSED IN
// OR IF A FIELD OF NON-STRING TYPE IS SUPPLIED FOR groupBy!
func AverageTaskTimeDifference(field1 string, field2 string,
	groupByField string, cutoff time.Time) (map[string]time.Duration, error) {

	// This pipeline returns the average time difference between
	// two time fields, grouped by a given field of "string" type.
	// It assumes field2 happened later than field1.
	// Time difference returned in milliseconds.
	pipeline := []bson.M{
		{"$match": bson.M{
			field1: bson.M{"$gt": cutoff},
			field2: bson.M{"$gt": cutoff}}},
		{"$group": bson.M{
			"_id": "$" + groupByField,
			"avg_time": bson.M{
				"$avg": bson.M{
					"$subtract": []string{"$" + field2, "$" + field1},
				},
			},
		}},
	}

	// anonymous struct for unmarshalling result bson
	// NOTE: This means we can only group by string fields currently
	var results []struct {
		GroupId     string `bson:"_id"`
		AverageTime int64  `bson:"avg_time"`
	}

	err := db.Aggregate(TasksCollection, pipeline, &results)
	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error aggregating task times by [%v, %v]: %v", field1, field2, err)
		return nil, err
	}

	avgTimes := make(map[string]time.Duration)
	for _, res := range results {
		avgTimes[res.GroupId] = time.Duration(res.AverageTime) * time.Millisecond
	}

	return avgTimes, nil
}
Пример #8
0
// FetchFailures returns the most recent test failures that have occurred at or
// before the current version - looking back as far as depth versions.
func FetchFailures(current version.Version, depth int) (Failures, error) {
	pipeline := []bson.M{
		// Stage 1: Get the most recent completed tasks - looking back as far as
		// depth versions - on this project.
		{"$match": bson.M{
			model.TaskRevisionOrderNumberKey: bson.M{
				"$lte": current.RevisionOrderNumber,
				"$gte": (current.RevisionOrderNumber - depth),
			},
			model.TaskProjectKey:   current.Project,
			model.TaskRequesterKey: evergreen.RepotrackerVersionRequester,
			model.TaskStatusKey: bson.M{
				"$in": []string{
					evergreen.TaskFailed,
					evergreen.TaskSucceeded,
				},
			},
		}},
		// Stage 2: Sort the tasks by the most recently completed.
		{"$sort": bson.M{
			model.TaskRevisionOrderNumberKey: -1,
		}},
		// Stage 3: Project only relevant fields.
		{"$project": bson.M{
			model.TaskDisplayNameKey:  1,
			model.TaskBuildVariantKey: 1,
			model.TaskTestResultsKey:  1,
			model.TaskIdKey:           1,
		}},
		// Stage 4: Group these tasks by display name and buildvariant -
		// this returns the most recently completed grouped by task display name
		// and by variant. We take only the first test results (adding its task
		// id) for each task/variant group.
		{"$group": bson.M{
			"_id": bson.M{
				"t": "$" + model.TaskDisplayNameKey,
				"v": "$" + model.TaskBuildVariantKey,
			},
			"l": bson.M{
				"$first": "$" + model.TaskTestResultsKey,
			},
			"tid": bson.M{
				"$first": "$" + model.TaskIdKey,
			},
		}},
		// Stage 5: For each group, filter out those task/variant combinations
		// that don't have at least one test failure in them.
		{"$match": bson.M{
			"l." + model.TestResultStatusKey: evergreen.TestFailedStatus,
		}},
		// Stage 6: Rewrite each task/variant combination from the _id into the
		// top-level. Project only the test name and status for all tests, and
		// add a 'status' literal string to each group. This sets up the
		// documents for redacting in next stage.
		{"$project": bson.M{
			"tid": 1,
			"t":   "$_id.t",
			"v":   "$_id.v",
			"l." + model.TestResultStatusKey:   1,
			"l." + model.TestResultTestFileKey: 1,
			"status": bson.M{
				"$literal": evergreen.TestFailedStatus,
			},
		}},
		// Stage 7: While each test result contains at least one failed test,
		// some other tests may have passed. Prune individual tests that did
		// not fail.
		{"$redact": bson.M{
			"$cond": bson.M{
				"if": bson.M{
					"$eq": []string{
						"$status", evergreen.TestFailedStatus,
					},
				},
				"then": "$$DESCEND",
				"else": "$$PRUNE",
			},
		}},
		// Stage 8: We no longer need the status fields so project only fields
		// we want to return.
		{"$project": bson.M{
			"f":   "$l." + model.TestResultTestFileKey,
			"_id": 0,
			"tid": 1,
			"t":   1,
			"v":   1,
		}},
		// Stage 9: Flatten each failing test so we can group them by all the
		// variants on which they are failing.
		{"$unwind": "$f"},
		// Stage 10: Group individual test failure. For each, add the variants
		// it's failing on (and the accompanying task id) and include task's
		// display name.
		{"$group": bson.M{
			"_id": bson.M{
				"t": "$f",
				"d": "$t",
			},
			"a": bson.M{
				"$push": bson.M{
					"n": "$v",
					"i": "$tid",
				},
			},
		}},
	}
	failures := Failures{}
	return failures, db.Aggregate(model.TasksCollection, pipeline, &failures)
}
Пример #9
0
// Returns a JSON response with the status of the specified version
// grouped on the tasks. The keys of the object are the task names,
// with each key in the nested object representing a particular build
// variant.
func (restapi *restAPI) getVersionStatusByTask(versionId string, w http.ResponseWriter, r *http.Request) {
	id := "_id"
	taskName := "task_name"
	statuses := "statuses"

	pipeline := []bson.M{
		// 1. Find only builds corresponding to the specified version
		{
			"$match": bson.M{
				build.VersionKey: versionId,
			},
		},
		// 2. Loop through each task run on a particular build variant
		{
			"$unwind": fmt.Sprintf("$%v", build.TasksKey),
		},
		// 3. Group on the task name and construct a new document containing
		//    all of the relevant info about the task status
		{
			"$group": bson.M{
				id: fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheDisplayNameKey),
				statuses: bson.M{
					"$push": bson.M{
						build.BuildVariantKey:       fmt.Sprintf("$%v", build.BuildVariantKey),
						build.TaskCacheIdKey:        fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheIdKey),
						build.TaskCacheStatusKey:    fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheStatusKey),
						build.TaskCacheStartTimeKey: fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheStartTimeKey),
						build.TaskCacheTimeTakenKey: fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheTimeTakenKey),
						build.TaskCacheActivatedKey: fmt.Sprintf("$%v.%v", build.TasksKey, build.TaskCacheActivatedKey),
					},
				},
			},
		},
		// 4. Rename the "_id" field to "task_name"
		{
			"$project": bson.M{
				id:       0,
				taskName: fmt.Sprintf("$%v", id),
				statuses: 1,
			},
		},
	}

	// Anonymous struct used to unmarshal output from the aggregation pipeline
	var tasks []struct {
		DisplayName string `bson:"task_name"`
		Statuses    []struct {
			BuildVariant string `bson:"build_variant"`
			// Use an anonyous field to make the semantics of inlining
			build.TaskCache `bson:",inline"`
		} `bson:"statuses"`
	}

	err := db.Aggregate(build.Collection, pipeline, &tasks)
	if err != nil {
		msg := fmt.Sprintf("Error finding status for version '%v'", versionId)
		evergreen.Logger.Logf(slogger.ERROR, "%v: %v", msg, err)
		restapi.WriteJSON(w, http.StatusInternalServerError, responseError{Message: msg})
		return
	}

	result := versionStatusByTaskContent{
		Id:    versionId,
		Tasks: make(map[string]versionStatusByTask, len(tasks)),
	}

	for _, task := range tasks {
		statuses := make(versionStatusByTask, len(task.Statuses))
		for _, task := range task.Statuses {
			status := versionStatus{
				Id:        task.Id,
				Status:    task.Status,
				TimeTaken: task.TimeTaken,
			}
			statuses[task.BuildVariant] = status
		}
		result.Tasks[task.DisplayName] = statuses
	}

	restapi.WriteJSON(w, http.StatusOK, result)
	return

}
Пример #10
0
// getTasksForLatestVersion sends back the TaskJSON data associated with the latest version.
func getTasksForLatestVersion(w http.ResponseWriter, r *http.Request) {
	project := mux.Vars(r)["project_id"]
	name := mux.Vars(r)["name"]
	skip, err := util.GetIntValue(r, "skip", 0)
	if err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	if skip < 0 {
		http.Error(w, "negative skip", http.StatusBadRequest)
		return
	}
	pipeline := []bson.M{
		// match on name and project
		{"$match": bson.M{
			NameKey:      name,
			ProjectIdKey: project,
		}},

		// sort on the revision number
		{"$sort": bson.M{
			RevisionOrderNumberKey: -1,
		}},

		// group by version id
		{"$group": bson.M{
			"_id": bson.M{
				"r":   "$" + RevisionOrderNumberKey,
				"vid": "$" + VersionIdKey,
			},
			"t": bson.M{
				"$push": bson.M{
					"d":    "$" + DataKey,
					"t_id": "$" + TaskIdKey,
					"tn":   "$" + TaskNameKey,
					"var":  "$" + VariantKey,
				},
			},
		}},

		// sort on the _id
		{"$sort": bson.M{
			"_id.r": -1,
		}},

		{"$skip": skip},
		{"$limit": 2},
	}

	tasksForVersions := []TasksForVersion{}
	err = db.Aggregate(collection, pipeline, &tasksForVersions)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	if len(tasksForVersions) == 0 {
		http.Error(w, "no tasks found", http.StatusNotFound)
		return
	}

	// we default have another revision
	lastRevision := false
	currentVersion := tasksForVersions[0]

	// if there is only one version, then we are at the last revision.
	if len(tasksForVersions) < 2 {
		lastRevision = true
	}

	// get the version commit info
	v, err := version.FindOne(version.ById(currentVersion.Id.VersionId))
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	if v == nil {
		http.Error(w, "{}", http.StatusNotFound)
		return
	}

	commitInfo := CommitInfo{
		Author:     v.Author,
		Message:    v.Message,
		CreateTime: v.CreateTime,
		Revision:   v.Revision,
		VersionId:  v.Id,
	}

	data := VersionData{currentVersion.Tasks, commitInfo, lastRevision}
	plugin.WriteJSON(w, http.StatusOK, data)
}
Пример #11
0
Файл: json.go Проект: sr527/json
func (hwp *JSONPlugin) GetUIHandler() http.Handler {
	r := mux.NewRouter()
	r.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
		plugin.WriteJSON(w, http.StatusOK, "1")
	})
	r.HandleFunc("/task/{task_id}/{name}/", func(w http.ResponseWriter, r *http.Request) {
		var jsonForTask TaskJSON
		err := db.FindOneQ(collection, db.Query(bson.M{"task_id": mux.Vars(r)["task_id"], "name": mux.Vars(r)["name"]}), &jsonForTask)
		if err != nil {
			if err != mgo.ErrNotFound {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			http.Error(w, "{}", http.StatusNotFound)
			return
		}
		plugin.WriteJSON(w, http.StatusOK, jsonForTask)
	})

	r.HandleFunc("/task/{task_id}/{name}/tags", func(w http.ResponseWriter, r *http.Request) {
		t, err := model.FindTask(mux.Vars(r)["task_id"])
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		tags := []struct {
			Tag string `bson:"_id" json:"tag"`
		}{}
		err = db.Aggregate(collection, []bson.M{
			{"$match": bson.M{"project_id": t.Project, "tag": bson.M{"$exists": true, "$ne": ""}}},
			{"$project": bson.M{"tag": 1}}, bson.M{"$group": bson.M{"_id": "$tag"}},
		}, &tags)
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		plugin.WriteJSON(w, http.StatusOK, tags)
	})
	r.HandleFunc("/task/{task_id}/{name}/tag", func(w http.ResponseWriter, r *http.Request) {
		t, err := model.FindTask(mux.Vars(r)["task_id"])
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		if t == nil {
			http.Error(w, "{}", http.StatusNotFound)
			return
		}
		if r.Method == "DELETE" {
			if _, err = db.UpdateAll(collection,
				bson.M{"version_id": t.Version, "name": mux.Vars(r)["name"]},
				bson.M{"$unset": bson.M{"tag": 1}}); err != nil {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			plugin.WriteJSON(w, http.StatusOK, "")
		}
		inTag := struct {
			Tag string `json:"tag"`
		}{}
		err = util.ReadJSONInto(r.Body, &inTag)
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		if len(inTag.Tag) == 0 {
			http.Error(w, "tag must not be blank", http.StatusBadRequest)
			return
		}

		_, err = db.UpdateAll(collection,
			bson.M{"version_id": t.Version, "name": mux.Vars(r)["name"]},
			bson.M{"$set": bson.M{"tag": inTag.Tag}})

		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		plugin.WriteJSON(w, http.StatusOK, "")
	})
	r.HandleFunc("/tag/{project_id}/{tag}/{variant}/{task_name}/{name}", func(w http.ResponseWriter, r *http.Request) {
		var jsonForTask TaskJSON
		err := db.FindOneQ(collection,
			db.Query(bson.M{"project_id": mux.Vars(r)["project_id"],
				"tag":       mux.Vars(r)["tag"],
				"variant":   mux.Vars(r)["variant"],
				"task_name": mux.Vars(r)["task_name"],
				"name":      mux.Vars(r)["name"],
			}), &jsonForTask)
		if err != nil {
			if err != mgo.ErrNotFound {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			http.Error(w, "{}", http.StatusNotFound)
			return
		}
		plugin.WriteJSON(w, http.StatusOK, jsonForTask)
	})
	r.HandleFunc("/commit/{project_id}/{revision}/{variant}/{task_name}/{name}", func(w http.ResponseWriter, r *http.Request) {
		var jsonForTask TaskJSON
		err := db.FindOneQ(collection,
			db.Query(bson.M{"project_id": mux.Vars(r)["project_id"],
				"revision":  bson.RegEx{"^" + regexp.QuoteMeta(mux.Vars(r)["revision"]), "i"},
				"variant":   mux.Vars(r)["variant"],
				"task_name": mux.Vars(r)["task_name"],
				"name":      mux.Vars(r)["name"],
				"is_patch":  false,
			}), &jsonForTask)
		if err != nil {
			if err != mgo.ErrNotFound {
				http.Error(w, err.Error(), http.StatusInternalServerError)
				return
			}
			http.Error(w, "{}", http.StatusNotFound)
			return
		}
		plugin.WriteJSON(w, http.StatusOK, jsonForTask)
	})
	r.HandleFunc("/history/{task_id}/{name}", func(w http.ResponseWriter, r *http.Request) {
		t, err := model.FindTask(mux.Vars(r)["task_id"])
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		if t == nil {
			http.Error(w, "{}", http.StatusNotFound)
			return
		}

		before := []TaskJSON{}
		jsonQuery := db.Query(bson.M{
			"project_id": t.Project,
			"variant":    t.BuildVariant,
			"order":      bson.M{"$lte": t.RevisionOrderNumber},
			"task_name":  t.DisplayName,
			"is_patch":   false,
			"name":       mux.Vars(r)["name"]})
		jsonQuery = jsonQuery.Sort([]string{"-order"}).Limit(100)
		err = db.FindAllQ(collection, jsonQuery, &before)
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}
		//reverse order of "before" because we had to sort it backwards to apply the limit correctly:
		for i, j := 0, len(before)-1; i < j; i, j = i+1, j-1 {
			before[i], before[j] = before[j], before[i]
		}

		after := []TaskJSON{}
		jsonAfterQuery := db.Query(bson.M{
			"project_id": t.Project,
			"variant":    t.BuildVariant,
			"order":      bson.M{"$gt": t.RevisionOrderNumber},
			"task_name":  t.DisplayName,
			"is_patch":   false,
			"name":       mux.Vars(r)["name"]}).Sort([]string{"order"}).Limit(100)
		err = db.FindAllQ(collection, jsonAfterQuery, &after)
		if err != nil {
			http.Error(w, err.Error(), http.StatusInternalServerError)
			return
		}

		//concatenate before + after
		before = append(before, after...)
		plugin.WriteJSON(w, http.StatusOK, before)
	})
	return r
}
Пример #12
0
// Returns versions and tasks grouped by gitspec, newest first (sorted by order number desc)
func (self *buildVariantHistoryIterator) GetItems(beforeCommit *version.Version, numRevisions int) ([]bson.M, []version.Version, error) {
	var versionQuery db.Q
	if beforeCommit != nil {
		versionQuery = db.Query(bson.M{
			version.RequesterKey:           evergreen.RepotrackerVersionRequester,
			version.RevisionOrderNumberKey: bson.M{"$lt": beforeCommit.RevisionOrderNumber},
			version.IdentifierKey:          self.ProjectName,
			version.BuildVariantsKey: bson.M{
				"$elemMatch": bson.M{
					version.BuildStatusVariantKey: self.BuildVariantInVersion,
				},
			},
		})
	} else {
		versionQuery = db.Query(bson.M{
			version.RequesterKey:  evergreen.RepotrackerVersionRequester,
			version.IdentifierKey: self.ProjectName,
			version.BuildVariantsKey: bson.M{
				"$elemMatch": bson.M{
					version.BuildStatusVariantKey: self.BuildVariantInVersion,
				},
			},
		})
	}
	versionQuery = versionQuery.WithFields(
		version.IdKey,
		version.RevisionOrderNumberKey,
		version.RevisionKey,
		version.MessageKey,
		version.CreateTimeKey,
	).Sort([]string{"-" + version.RevisionOrderNumberKey}).Limit(numRevisions)

	//Get the next numCommits
	versions, err := version.Find(versionQuery)

	if err != nil {
		return nil, nil, err
	}

	if len(versions) == 0 {
		return nil, []version.Version{}, nil
	}

	//versionEndBoundary is the *earliest* version which should be included in results
	versionEndBoundary := versions[len(versions)-1]

	matchFilter := bson.M{
		task.RequesterKey:    evergreen.RepotrackerVersionRequester,
		task.BuildVariantKey: self.BuildVariantInTask,
		task.ProjectKey:      self.ProjectName,
	}

	if beforeCommit != nil {
		matchFilter[task.RevisionOrderNumberKey] = bson.M{
			"$gte": versionEndBoundary.RevisionOrderNumber,
			"$lt":  beforeCommit.RevisionOrderNumber,
		}
	} else {
		matchFilter[task.RevisionOrderNumberKey] = bson.M{
			"$gte": versionEndBoundary.RevisionOrderNumber,
		}
	}

	pipeline := []bson.M{
		{"$match": matchFilter},
		{"$sort": bson.D{{task.RevisionOrderNumberKey, 1}}},
		{
			"$group": bson.M{
				"_id":   "$" + task.RevisionKey,
				"order": bson.M{"$first": "$" + task.RevisionOrderNumberKey},
				"tasks": bson.M{
					"$push": bson.M{
						"_id":              "$" + task.IdKey,
						"status":           "$" + task.StatusKey,
						"task_end_details": "$" + task.DetailsKey,
						"activated":        "$" + task.ActivatedKey,
						"time_taken":       "$" + task.TimeTakenKey,
						"display_name":     "$" + task.DisplayNameKey,
					},
				},
			},
		},
		{"$sort": bson.M{task.RevisionOrderNumberKey: -1, task.DisplayNameKey: 1}},
	}

	var output []bson.M
	if err = db.Aggregate(task.Collection, pipeline, &output); err != nil {
		return nil, nil, err
	}

	return output, versions, nil
}
Пример #13
0
// Given a project name and a list of build variants, return the latest version
// on which all the given build variants succeeded. Gives up after 100 versions.
func FindLastPassingVersionForBuildVariants(project *Project, buildVariantNames []string) (*version.Version, error) {
	if len(buildVariantNames) == 0 {
		return nil, fmt.Errorf("No build variants specified!")
	}

	// Get latest commit order number for this project
	latestVersion, err := version.FindOne(db.Query(
		version.ByMostRecentForRequester(project.Identifier, evergreen.RepotrackerVersionRequester).
			WithFields(version.RevisionOrderNumberKey)))
	if err != nil {
		return nil, fmt.Errorf("Error getting latest version: %v", err)
	}
	if latestVersion == nil {
		return nil, nil
	}

	mostRecentRevisionOrderNumber := latestVersion.RevisionOrderNumber

	// Earliest commit order number to consider
	leastRecentRevisionOrderNumber := mostRecentRevisionOrderNumber - StaleVersionCutoff
	if leastRecentRevisionOrderNumber < 0 {
		leastRecentRevisionOrderNumber = 0
	}

	pipeline := []bson.M{
		// Limit ourselves to builds for non-stale versions and the given project
		// and build variants
		{
			"$match": bson.M{
				build.ProjectKey:             project.Identifier,
				build.RevisionOrderNumberKey: bson.M{"$gte": leastRecentRevisionOrderNumber},
				build.BuildVariantKey:        bson.M{"$in": buildVariantNames},
				build.StatusKey:              evergreen.BuildSucceeded,
			},
		},
		// Sum up the number of builds that succeeded for each commit order number
		{
			"$group": bson.M{
				"_id": fmt.Sprintf("$%v", build.RevisionOrderNumberKey),
				"numSucceeded": bson.M{
					"$sum": 1,
				},
			},
		},
		// Find builds that succeeded on all of the requested build variants
		{
			"$match": bson.M{"numSucceeded": len(buildVariantNames)},
		},
		// Order by commit order number, descending
		{
			"$sort": bson.M{"_id": -1},
		},
		// Get the highest commit order number where builds succeeded on all the
		// requested build variants
		{
			"$limit": 1,
		},
	}

	var result []bson.M
	err = db.Aggregate(build.Collection, pipeline, &result)

	if err != nil {
		return nil, fmt.Errorf("Aggregation failed: %v", err)
	}

	if len(result) == 0 {
		return nil, nil
	}

	// Get the version corresponding to the resulting commit order number
	v, err := version.FindOne(
		db.Query(bson.M{
			version.RequesterKey:           evergreen.RepotrackerVersionRequester,
			version.IdentifierKey:          project.Identifier,
			version.RevisionOrderNumberKey: result[0]["_id"],
		}))
	if err != nil {
		return nil, err
	}
	if v == nil {
		return nil, fmt.Errorf("Couldn't find version with id `%v` after "+
			"successful aggregation.", result[0]["_id"])
	}
	return v, nil
}