func (uis *UIServer) projectsPage(w http.ResponseWriter, r *http.Request) { _ = MustHaveUser(r) projCtx := MustHaveProjectContext(r) allProjects, err := model.FindAllProjectRefs() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } // construct a json-marshaling friendly representation of our supported triggers allTaskTriggers := []interface{}{} for _, taskTrigger := range alerts.AvailableTaskFailTriggers { allTaskTriggers = append(allTaskTriggers, struct { Id string `json:"id"` Display string `json:"display"` }{taskTrigger.Id(), taskTrigger.Display()}) } data := struct { ProjectData projectContext User *user.DBUser AllProjects []model.ProjectRef AvailableTriggers []interface{} }{projCtx, GetUser(r), allProjects, allTaskTriggers} uis.WriteHTML(w, http.StatusOK, data, "base", "projects.html", "base_angular.html", "menu.html") }
// filterAuthorizedProjects iterates through a list of projects and returns a list of all the projects that a user // is authorized to view and edit the settings of. func (uis *UIServer) filterAuthorizedProjects(u *user.DBUser) ([]model.ProjectRef, error) { allProjects, err := model.FindAllProjectRefs() if err != nil { return nil, err } authorizedProjects := []model.ProjectRef{} // only returns projects for which the user is authorized to see. for _, project := range allProjects { if uis.isSuperUser(u) || isAdmin(u, &project) { authorizedProjects = append(authorizedProjects, project) } } return authorizedProjects, nil }
func (uis *UIServer) addProject(w http.ResponseWriter, r *http.Request) { _ = MustHaveUser(r) vars := mux.Vars(r) id := vars["project_id"] projectRef, err := model.FindOneProjectRef(id) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if projectRef != nil { http.Error(w, "Project already exists", http.StatusInternalServerError) return } newProject := model.ProjectRef{ Identifier: id, Enabled: true, Tracked: true, RepoKind: "github", } err = newProject.Insert() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } allProjects, err := model.FindAllProjectRefs() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } data := struct { Available bool ProjectId string AllProjects []model.ProjectRef }{true, id, allProjects} uis.WriteJSON(w, http.StatusOK, data) }
// getProjectsIds returns a JSON response of an array of active project Ids. // Users must use credentials to see private projects. func (restapi restAPI) getProjectIds(w http.ResponseWriter, r *http.Request) { u := GetUser(r) refs, err := model.FindAllProjectRefs() if err != nil { restapi.WriteJSON(w, http.StatusNotFound, responseError{ Message: fmt.Sprintf("error finding projects: %v", err), }) return } projects := []string{} for _, r := range refs { if r.Enabled && (!r.Private || u != nil) { projects = append(projects, r.Identifier) } } restapi.WriteJSON(w, http.StatusOK, struct { Projects []string `json:"projects"` }{projects}) return }
func (uis *UIServer) modifyProject(w http.ResponseWriter, r *http.Request) { _ = MustHaveUser(r) vars := mux.Vars(r) id := vars["project_id"] projectRef, err := model.FindOneProjectRef(id) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if projectRef == nil { http.Error(w, "Project not found", http.StatusNotFound) return } responseRef := struct { Identifier string `json:"id"` DisplayName string `json:"display_name"` RemotePath string `json:"remote_path"` BatchTime int `json:"batch_time"` DeactivatePrevious bool `json:"deactivate_previous"` Branch string `json:"branch_name"` ProjVarsMap map[string]string `json:"project_vars"` Enabled bool `json:"enabled"` Private bool `json:"private"` Owner string `json:"owner_name"` Repo string `json:"repo_name"` AlertConfig map[string][]struct { Provider string `json:"provider"` Settings map[string]interface{} `json:"settings"` } `json:"alert_config"` }{} err = util.ReadJSONInto(r.Body, &responseRef) if err != nil { http.Error(w, fmt.Sprintf("Error parsing request body %v", err), http.StatusInternalServerError) return } projectRef.DisplayName = responseRef.DisplayName projectRef.RemotePath = responseRef.RemotePath projectRef.BatchTime = responseRef.BatchTime projectRef.Branch = responseRef.Branch projectRef.Enabled = responseRef.Enabled projectRef.Private = responseRef.Private projectRef.Owner = responseRef.Owner projectRef.DeactivatePrevious = responseRef.DeactivatePrevious projectRef.Repo = responseRef.Repo projectRef.Identifier = id projectRef.Alerts = map[string][]model.AlertConfig{} for triggerId, alerts := range responseRef.AlertConfig { //TODO validate the triggerID, provider, and settings. for _, alert := range alerts { projectRef.Alerts[triggerId] = append(projectRef.Alerts[triggerId], model.AlertConfig{ Provider: alert.Provider, Settings: bson.M(alert.Settings), }) } } err = projectRef.Upsert() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } //modify project vars if necessary projectVars := model.ProjectVars{id, responseRef.ProjVarsMap} _, err = projectVars.Upsert() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } allProjects, err := model.FindAllProjectRefs() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } data := struct { AllProjects []model.ProjectRef }{allProjects} uis.WriteJSON(w, http.StatusOK, data) }
// run all monitoring functions func RunAllMonitoring(settings *evergreen.Settings) error { // load in all of the distros distros, err := distro.Find(db.Q{}) if err != nil { return fmt.Errorf("error finding distros: %v", err) } // fetch the project refs, which we will use to get all of the projects projectRefs, err := model.FindAllProjectRefs() if err != nil { return fmt.Errorf("error loading in project refs: %v", err) } // turn the project refs into a map of the project id -> project projects := map[string]model.Project{} for _, ref := range projectRefs { // only monitor projects that are enabled if !ref.Enabled { continue } project, err := model.FindProject("", &ref) // continue on error to stop the whole monitoring process from // being held up if err != nil { evergreen.Logger.Logf(slogger.ERROR, "error finding project %v: %v", ref.Identifier, err) continue } if project == nil { evergreen.Logger.Logf(slogger.ERROR, "no project entry found for"+ " ref %v", ref.Identifier) continue } projects[project.Identifier] = *project } // initialize the task monitor taskMonitor := &TaskMonitor{ flaggingFuncs: defaultTaskFlaggingFuncs, } // clean up any necessary tasks errs := taskMonitor.CleanupTasks(projects) for _, err := range errs { evergreen.Logger.Logf(slogger.ERROR, "Error cleaning up tasks: %v", err) } // initialize the host monitor hostMonitor := &HostMonitor{ flaggingFuncs: defaultHostFlaggingFuncs, monitoringFuncs: defaultHostMonitoringFuncs, } // clean up any necessary hosts errs = hostMonitor.CleanupHosts(distros, settings) for _, err := range errs { evergreen.Logger.Logf(slogger.ERROR, "Error cleaning up hosts: %v", err) } // run monitoring checks errs = hostMonitor.RunMonitoringChecks(settings) for _, err := range errs { evergreen.Logger.Logf(slogger.ERROR, "Error running host monitoring checks: %v", err) } // initialize the notifier notifier := &Notifier{ notificationBuilders: defaultNotificationBuilders, } // send notifications errs = notifier.Notify(settings) for _, err := range errs { evergreen.Logger.Logf(slogger.ERROR, "Error sending notifications: %v", err) } // Do alerts for spawnhosts - collect all hosts expiring in the next 12 hours. // The trigger logic will filter out any hosts that aren't in a notification window, or have // already have alerts sent. now := time.Now() thresholdTime := now.Add(12 * time.Hour) expiringSoonHosts, err := host.Find(host.ByExpiringBetween(now, thresholdTime)) if err != nil { return err } for _, h := range expiringSoonHosts { err := alerts.RunSpawnWarningTriggers(&h) if err != nil { evergreen.Logger.Logf(slogger.ERROR, "Error queueing alert: %v", err) } } return nil }