func (uis *UIServer) averageSchedulerStats(w http.ResponseWriter, r *http.Request) { // get granularity (in seconds) granularity, err := util.GetIntValue(r, "granularity", 0) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if granularity == 0 { uis.LoggedError(w, r, http.StatusBadRequest, fmt.Errorf("Invalid granularity")) return } // get number of days back daysBack, err := util.GetIntValue(r, "numberDays", 0) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if daysBack == 0 { uis.LoggedError(w, r, http.StatusBadRequest, fmt.Errorf("Invalid days back")) return } distroId := mux.Vars(r)["distro_id"] bounds := model.CalculateBounds(daysBack, granularity) avgBuckets, err := model.AverageStatistics(distroId, bounds) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } uis.WriteJSON(w, http.StatusOK, avgBuckets) }
func (uis *UIServer) schedulerHostUtilization(w http.ResponseWriter, r *http.Request) { // get granularity (in seconds) granularity, err := util.GetIntValue(r, "granularity", 0) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if granularity == 0 { uis.LoggedError(w, r, http.StatusBadRequest, fmt.Errorf("Invalid granularity")) return } // get number of days back daysBack, err := util.GetIntValue(r, "numberDays", 0) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if daysBack == 0 { uis.LoggedError(w, r, http.StatusBadRequest, fmt.Errorf("Invalid days back")) return } bucketData, err := model.CreateAllHostUtilizationBuckets(daysBack, granularity) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } uis.WriteJSON(w, http.StatusOK, bucketData) return }
func (restapi *restAPI) getHostUtilizationStats(w http.ResponseWriter, r *http.Request) { // get granularity (in seconds) granularity, err := util.GetIntValue(r, "granularity", 0) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if granularity == 0 { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid granularity"}) return } // get number of days back daysBack, err := util.GetIntValue(r, "numberDays", 0) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if daysBack == 0 { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid days back"}) return } isCSV, err := util.GetBoolValue(r, "csv", true) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } buckets, err := model.CreateAllHostUtilizationBuckets(daysBack, granularity) if err != nil { restapi.WriteJSON(w, http.StatusInternalServerError, responseError{Message: fmt.Sprintf("error getting buckets: %v", err.Error())}) return } restBuckets := []restHostUtilizationBucket{} // convert the time.Durations into integers for _, b := range buckets { r := restHostUtilizationBucket{ StaticHost: int(b.StaticHost), DynamicHost: int(b.DynamicHost), Task: int(b.Task), StartTime: b.StartTime, EndTime: b.EndTime, } restBuckets = append(restBuckets, r) } if isCSV { util.WriteCSVResponse(w, http.StatusOK, restBuckets) return } restapi.WriteJSON(w, http.StatusOK, buckets) }
func (restapi *restAPI) getOptimalAndActualMakespans(w http.ResponseWriter, r *http.Request) { // get number of days back numberBuilds, err := util.GetIntValue(r, "number", 0) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if numberBuilds == 0 { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid number builds"}) return } isCSV, err := util.GetBoolValue(r, "csv", true) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } makespanData, err := getMakespanRatios(numberBuilds) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if isCSV { util.WriteCSVResponse(w, http.StatusOK, makespanData) return } restapi.WriteJSON(w, http.StatusOK, makespanData) }
// listPatches returns a user's "n" most recent patches. func (as *APIServer) listPatches(w http.ResponseWriter, r *http.Request) { dbUser := MustHaveUser(r) n, err := util.GetIntValue(r, "n", 0) if err != nil { as.LoggedError(w, r, http.StatusBadRequest, fmt.Errorf("cannot read value n: %v", err)) return } query := patch.ByUser(dbUser.Id).Sort([]string{"-" + patch.CreateTimeKey}) if n > 0 { query = query.Limit(n) } patches, err := patch.Find(query) if err != nil { as.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("error finding patches for user %v: %v", dbUser.Id, err)) return } as.WriteJSON(w, http.StatusOK, patches) }
// getTaskQueueSize returns a JSON response with a SUCCESS flag if all task queues have a size // less than the size indicated. If a distro's task queue has size greater than or equal to the size given, // there will be an ERROR flag along with a map of the distro name to the size of the task queue. // If the size is 0 or the size is not sent, the JSON response will be SUCCESS with a list of all distros and their // task queue sizes. func (as *APIServer) checkTaskQueueSize(w http.ResponseWriter, r *http.Request) { size, err := util.GetIntValue(r, "size", 0) if err != nil { as.LoggedError(w, r, http.StatusInternalServerError, err) return } distro := r.FormValue("distro") distroNames := make(map[string]int) status := "SUCCESS" if distro != "" { taskQueue, err := model.FindTaskQueueForDistro(distro) if err != nil { as.LoggedError(w, r, http.StatusBadRequest, err) return } if taskQueue.Length() >= size { distroNames[distro] = taskQueue.Length() status = "ERROR" } } else { taskQueues, err := model.FindAllTaskQueues() if err != nil { as.LoggedError(w, r, http.StatusInternalServerError, err) return } for _, queue := range taskQueues { if queue.Length() >= size { distroNames[queue.Distro] = queue.Length() status = "ERROR" } } } growthResponse := struct { Status string Distros map[string]int }{status, distroNames} as.WriteJSON(w, http.StatusOK, growthResponse) }
// getTasksForLatestVersion sends back the TaskJSON data associated with the latest version. func getTasksForLatestVersion(w http.ResponseWriter, r *http.Request) { project := mux.Vars(r)["project_id"] name := mux.Vars(r)["name"] skip, err := util.GetIntValue(r, "skip", 0) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } if skip < 0 { http.Error(w, "negative skip", http.StatusBadRequest) return } pipeline := []bson.M{ // match on name and project {"$match": bson.M{ NameKey: name, ProjectIdKey: project, }}, // sort on the revision number {"$sort": bson.M{ RevisionOrderNumberKey: -1, }}, // group by version id {"$group": bson.M{ "_id": bson.M{ "r": "$" + RevisionOrderNumberKey, "vid": "$" + VersionIdKey, }, "t": bson.M{ "$push": bson.M{ "d": "$" + DataKey, "t_id": "$" + TaskIdKey, "tn": "$" + TaskNameKey, "var": "$" + VariantKey, }, }, }}, // sort on the _id {"$sort": bson.M{ "_id.r": -1, }}, {"$skip": skip}, {"$limit": 2}, } tasksForVersions := []TasksForVersion{} err = db.Aggregate(collection, pipeline, &tasksForVersions) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if len(tasksForVersions) == 0 { http.Error(w, "no tasks found", http.StatusNotFound) return } // we default have another revision lastRevision := false currentVersion := tasksForVersions[0] // if there is only one version, then we are at the last revision. if len(tasksForVersions) < 2 { lastRevision = true } // get the version commit info v, err := version.FindOne(version.ById(currentVersion.Id.VersionId)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if v == nil { http.Error(w, "{}", http.StatusNotFound) return } commitInfo := CommitInfo{ Author: v.Author, Message: v.Message, CreateTime: v.CreateTime, Revision: v.Revision, VersionId: v.Id, } data := VersionData{currentVersion.Tasks, commitInfo, lastRevision} plugin.WriteJSON(w, http.StatusOK, data) }
func (restapi *restAPI) getAverageSchedulerStats(w http.ResponseWriter, r *http.Request) { // get granularity (in seconds) granularity, err := util.GetIntValue(r, "granularity", 0) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if granularity == 0 { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid granularity"}) return } // get number of days back daysBack, err := util.GetIntValue(r, "numberDays", 0) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } if daysBack == 0 { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid days back"}) return } isCSV, err := util.GetBoolValue(r, "csv", true) if err != nil { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: err.Error()}) return } distroId := mux.Vars(r)["distro_id"] if distroId == "" { restapi.WriteJSON(w, http.StatusBadRequest, responseError{Message: "invalid distro id"}) return } bounds := model.CalculateBounds(daysBack, granularity) buckets, err := model.AverageStatistics(distroId, bounds) if err != nil { restapi.WriteJSON(w, http.StatusInternalServerError, responseError{Message: fmt.Sprintf("error getting buckets: %v", err.Error())}) return } restBuckets := []restAvgBucket{} // convert the time.Durations into integers for _, b := range buckets { r := restAvgBucket{ Id: b.Id, AverageTime: int(b.AverageTime), NumberTasks: b.NumberTasks, Start: b.Start, End: b.End, } restBuckets = append(restBuckets, r) } if isCSV { util.WriteCSVResponse(w, http.StatusOK, restBuckets) return } restapi.WriteJSON(w, http.StatusOK, buckets) }