func (s *Server) ListJobs(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("list-jobs") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var jobs []atc.Job dashboard, groups, err := pipelineDB.GetDashboard() if err != nil { logger.Error("failed-to-get-dashboard", err) w.WriteHeader(http.StatusInternalServerError) return } for _, job := range dashboard { jobs = append( jobs, present.Job(job.Job, job.JobConfig, groups, job.FinishedBuild, job.NextBuild), ) } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(jobs) }) }
func (s *Server) RenamePipeline(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { data, err := ioutil.ReadAll(r.Body) if err != nil { s.logger.Error("call-to-update-pipeline-name-copy-failed", err) w.WriteHeader(http.StatusInternalServerError) return } var value struct{ Name string } err = json.Unmarshal(data, &value) if err != nil { s.logger.Error("call-to-update-pipeline-name-unmarshal-failed", err) w.WriteHeader(http.StatusInternalServerError) return } err = pipelineDB.UpdateName(value.Name) if err != nil { s.logger.Error("call-to-update-pipeline-name-failed", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusNoContent) }) }
func (s *Server) ListResources(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var resources []atc.Resource config, _, err := pipelineDB.GetConfig() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } showCheckErr := s.validator.IsAuthenticated(r) for _, resource := range config.Resources { dbResource, err := pipelineDB.GetResource(resource.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } resources = append( resources, present.Resource( resource, config.Groups, dbResource, showCheckErr, ), ) } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resources) }) }
func (s *Server) UnpauseResource(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resourceName := rata.Param(r, "resource_name") _, found, err := pipelineDB.GetResource(resourceName) if err != nil { s.logger.Error("failed-to-get-resource", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { s.logger.Debug("resource-not-found", lager.Data{"resource": resourceName}) w.WriteHeader(http.StatusNotFound) return } err = pipelineDB.UnpauseResource(resourceName) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (rsf *radarSchedulerFactory) BuildScheduler(pipelineDB db.PipelineDB) *scheduler.Scheduler { radar := rsf.BuildRadar(pipelineDB) return &scheduler.Scheduler{ PipelineDB: pipelineDB, BuildsDB: rsf.db, Factory: &factory.BuildFactory{PipelineName: pipelineDB.GetPipelineName()}, Engine: rsf.engine, Scanner: radar, } }
func (s *Server) GetVersionsDB(pipelineDB db.PipelineDB) http.Handler { // logger := s.logger.Session("get-versions-db-pipeline") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { versionsDB, _ := pipelineDB.LoadVersionsDB() w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(versionsDB) }) }
func (s *Server) UnpausePipeline(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := pipelineDB.Unpause() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) UnpauseResource(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resourceName := rata.Param(r, "resource_name") err := pipelineDB.UnpauseResource(resourceName) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) PauseJob(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := rata.Param(r, "job_name") err := pipelineDB.PauseJob(jobName) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (rsf *radarSchedulerFactory) BuildScheduler(pipelineDB db.PipelineDB) scheduler.BuildScheduler { radar := rsf.BuildRadar(pipelineDB) return &scheduler.Scheduler{ PipelineDB: pipelineDB, BuildsDB: rsf.db, Factory: factory.NewBuildFactory( pipelineDB.GetPipelineName(), atc.NewPlanFactory(time.Now().Unix()), ), Engine: rsf.engine, Scanner: radar, } }
func (s *Server) PausePipeline(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("pause-pipeline") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := pipelineDB.Pause() if err != nil { logger.Error("failed-to-pause-pipeline", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) GetResource(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("get-resource") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config, _, found, err := pipelineDB.GetConfig() if err != nil { logger.Error("failed-to-get-config", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { logger.Info("config-not-found") w.WriteHeader(http.StatusNotFound) return } resourceName := r.FormValue(":resource_name") resourceConfig, resourceFound := config.Resources.Lookup(resourceName) if !resourceFound { logger.Info("resource-not-in-config") w.WriteHeader(http.StatusNotFound) return } dbResource, found, err := pipelineDB.GetResource(resourceName) if err != nil { logger.Error("failed-to-get-resource", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { logger.Debug("resource-not-found", lager.Data{"resource": resourceName}) w.WriteHeader(http.StatusNotFound) return } resource := present.Resource( resourceConfig, config.Groups, dbResource, auth.IsAuthenticated(r), ) w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resource) }) }
func (s *Server) ListResources(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("list-resources") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var resources []atc.Resource config, _, found, err := pipelineDB.GetConfig() if err != nil { logger.Error("failed-to-get-config", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { w.WriteHeader(http.StatusNotFound) return } showCheckErr := auth.IsAuthenticated(r) for _, resource := range config.Resources { dbResource, found, err := pipelineDB.GetResource(resource.Name) if err != nil { logger.Error("failed-to-get-resource", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { logger.Debug("resource-not-found", lager.Data{"resource": resource}) w.WriteHeader(http.StatusNotFound) return } resources = append( resources, present.Resource( resource, config.Groups, dbResource, showCheckErr, ), ) } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resources) }) }
func (s *Server) PauseResource(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("pause-resource") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resourceName := rata.Param(r, "resource_name") err := pipelineDB.PauseResource(resourceName) if err != nil { logger.Error("failed-to-pause-resource", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) UnpauseJob(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("unpause-job") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := rata.Param(r, "job_name") err := pipelineDB.UnpauseJob(jobName) if err != nil { logger.Error("failed-to-unpause-job", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) EnableResourceVersion(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resourceID, err := strconv.Atoi(rata.Param(r, "resource_version_id")) if err != nil { w.WriteHeader(http.StatusBadRequest) return } err = pipelineDB.EnableVersionedResource(resourceID) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) DisableResourceVersion(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("disable-resource-version") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resourceID, err := strconv.Atoi(rata.Param(r, "resource_version_id")) if err != nil { w.WriteHeader(http.StatusBadRequest) return } err = pipelineDB.DisableVersionedResource(resourceID) if err != nil { logger.Error("failed-to-disable-versioned-resource", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) }) }
func (s *Server) CreateJobBuild(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger := s.logger.Session("create-job-build") jobName := r.FormValue(":job_name") config, _, found, err := pipelineDB.GetConfig() if err != nil { logger.Error("could-not-get-pipeline-config", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { w.WriteHeader(http.StatusNotFound) return } job, found := config.Jobs.Lookup(jobName) if !found { w.WriteHeader(http.StatusNotFound) return } if job.DisableManualTrigger { w.WriteHeader(http.StatusConflict) return } scheduler := s.schedulerFactory.BuildScheduler(pipelineDB, s.externalURL) build, _, err := scheduler.TriggerImmediately(logger, job, config.Resources, config.ResourceTypes) if err != nil { logger.Error("failed-to-trigger", err) w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "failed to trigger: %s", err) return } json.NewEncoder(w).Encode(present.Build(build)) }) }
func (s *Server) ListJobBuilds(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := r.FormValue(":job_name") builds, err := pipelineDB.GetAllJobBuilds(jobName) if err != nil { w.WriteHeader(http.StatusNotFound) return } w.WriteHeader(http.StatusOK) resources := make([]atc.Build, len(builds)) for i := 0; i < len(builds); i++ { resources[i] = present.Build(builds[i]) } json.NewEncoder(w).Encode(resources) }) }
func (server *server) GetPipeline(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config, _, err := pipelineDB.GetConfig() if err != nil { server.logger.Error("failed-to-load-config", err) w.WriteHeader(http.StatusInternalServerError) return } groups := map[string]bool{} for _, group := range config.Groups { groups[group.Name] = false } enabledGroups, found := r.URL.Query()["groups"] if !found && len(config.Groups) > 0 { enabledGroups = []string{config.Groups[0].Name} } for _, name := range enabledGroups { groups[name] = true } data := TemplateData{ Groups: groups, GroupStates: group.States(config.Groups, func(g atc.GroupConfig) bool { return groups[g.Name] }), PipelineName: pipelineDB.GetPipelineName(), } log := server.logger.Session("index") err = server.template.Execute(w, data) if err != nil { log.Fatal("failed-to-task-template", err, lager.Data{ "template-data": data, }) } }) }
func (s *Server) DeletePipeline(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger := s.logger.Session("destroying-pipeline", lager.Data{ "name": pipelineDB.GetPipelineName(), }) logger.Info("start") err := pipelineDB.Destroy() if err != nil { s.logger.Error("failed", err) w.WriteHeader(http.StatusInternalServerError) return } logger.Info("done") w.WriteHeader(http.StatusNoContent) }) }
func (s *Server) ListJobs(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var jobs []atc.Job config, _, err := pipelineDB.GetConfig() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } for _, job := range config.Jobs { finished, next, err := pipelineDB.GetJobFinishedAndNextBuild(job.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } dbJob, err := pipelineDB.GetJob(job.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } jobs = append(jobs, present.Job(dbJob, job, config.Groups, finished, next)) } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(jobs) }) }
func (s *Server) GetJob(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := r.FormValue(":job_name") config, _, err := pipelineDB.GetConfig() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } job, found := config.Jobs.Lookup(jobName) if !found { w.WriteHeader(http.StatusNotFound) return } finished, next, err := pipelineDB.GetJobFinishedAndNextBuild(jobName) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } dbJob, err := pipelineDB.GetJob(job.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(present.Job(dbJob, job, config.Groups, finished, next)) }) }
func (s *Server) GetJobBuild(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("get-job-build") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := r.FormValue(":job_name") buildName := r.FormValue(":build_name") build, found, err := pipelineDB.GetJobBuild(jobName, buildName) if err != nil { logger.Error("failed-to-get-job-build", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { w.WriteHeader(http.StatusNotFound) return } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(present.Build(build)) }) }
func (s *Server) ListBuildsWithVersionAsOutput(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { versionIDString := r.FormValue(":resource_version_id") versionID, _ := strconv.Atoi(versionIDString) builds, err := pipelineDB.GetBuildsWithVersionAsOutput(versionID) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } presentedBuilds := []atc.Build{} for _, build := range builds { presentedBuilds = append(presentedBuilds, present.Build(build)) } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(presentedBuilds) }) }
func (s *Server) ListResourceVersions(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("list-resource-versions") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var ( err error until int since int limit int ) resourceName := r.FormValue(":resource_name") urlUntil := r.FormValue(atc.PaginationQueryUntil) until, _ = strconv.Atoi(urlUntil) urlSince := r.FormValue(atc.PaginationQuerySince) since, _ = strconv.Atoi(urlSince) urlLimit := r.FormValue(atc.PaginationQueryLimit) limit, _ = strconv.Atoi(urlLimit) if limit == 0 { limit = atc.PaginationAPIDefaultLimit } versions, pagination, found, err := pipelineDB.GetResourceVersions(resourceName, db.Page{Until: until, Since: since, Limit: limit}) if err != nil { logger.Error("failed-to-get-resource-versions", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { w.WriteHeader(http.StatusNotFound) return } if pagination.Next != nil { s.addNextLink(w, pipelineDB.GetPipelineName(), resourceName, *pagination.Next) } if pagination.Previous != nil { s.addPreviousLink(w, pipelineDB.GetPipelineName(), resourceName, *pagination.Previous) } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) resourceVersions := make([]atc.VersionedResource, len(versions)) for i := 0; i < len(versions); i++ { resourceVersions[i] = present.SavedVersionedResource(versions[i]) } json.NewEncoder(w).Encode(resourceVersions) }) }
func (s *Server) ListJobBuilds(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var ( builds []db.Build err error until int since int limit int ) jobName := r.FormValue(":job_name") urlUntil := r.FormValue(atc.PaginationQueryUntil) until, _ = strconv.Atoi(urlUntil) urlSince := r.FormValue(atc.PaginationQuerySince) since, _ = strconv.Atoi(urlSince) urlLimit := r.FormValue(atc.PaginationQueryLimit) limit, _ = strconv.Atoi(urlLimit) if limit == 0 { limit = atc.PaginationAPIDefaultLimit } builds, pagination, err := pipelineDB.GetJobBuilds(jobName, db.Page{ Since: since, Until: until, Limit: limit, }) if err != nil { w.WriteHeader(http.StatusNotFound) return } if pagination.Next != nil { s.addNextLink(w, pipelineDB.GetPipelineName(), jobName, *pagination.Next) } if pagination.Previous != nil { s.addPreviousLink(w, pipelineDB.GetPipelineName(), jobName, *pagination.Previous) } w.WriteHeader(http.StatusOK) jobBuilds := make([]atc.Build, len(builds)) for i := 0; i < len(builds); i++ { jobBuilds[i] = present.Build(builds[i]) } json.NewEncoder(w).Encode(jobBuilds) }) }
func (server *server) TriggerBuild(pipelineDB db.PipelineDB) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config, _, err := pipelineDB.GetConfig() if err != nil { server.logger.Error("failed-to-load-config", err) w.WriteHeader(http.StatusInternalServerError) return } job, found := config.Jobs.Lookup(r.FormValue(":job")) if !found { w.WriteHeader(http.StatusNotFound) return } log := server.logger.Session("trigger-build", lager.Data{ "job": job.Name, }) log.Debug("triggering") scheduler := server.radarSchedulerFactory.BuildScheduler(pipelineDB) build, err := scheduler.TriggerImmediately(log, job, config.Resources) if err != nil { log.Error("failed-to-trigger", err) w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "failed to trigger: %s", err) return } redirectPath, err := routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{ "pipeline_name": pipelineDB.GetPipelineName(), "job": job.Name, "build": build.Name, }) if err != nil { log.Fatal("failed-to-construct-redirect-uri", err, lager.Data{ "pipeline": pipelineDB.GetPipelineName(), "job": job.Name, "build": build.Name, }) } http.Redirect(w, r, redirectPath, http.StatusFound) }) }
func (s *Server) GetJob(pipelineDB db.PipelineDB) http.Handler { logger := s.logger.Session("get-job") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { jobName := r.FormValue(":job_name") config, _, found, err := pipelineDB.GetConfig() if err != nil { logger.Error("could-not-get-pipeline-config", err) w.WriteHeader(http.StatusInternalServerError) return } if !found { w.WriteHeader(http.StatusNotFound) return } job, found := config.Jobs.Lookup(jobName) if !found { w.WriteHeader(http.StatusNotFound) return } finished, next, err := pipelineDB.GetJobFinishedAndNextBuild(jobName) if err != nil { logger.Error("could-not-get-job-finished-and-next-build", err) w.WriteHeader(http.StatusInternalServerError) return } dbJob, err := pipelineDB.GetJob(job.Name) if err != nil { logger.Error("could-not-get-job-finished", err) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(present.Job(dbJob, job, config.Groups, finished, next)) }) }
sqlDB = db.NewSQL(dbLogger, dbConn, bus) pipelineDBFactory = db.NewPipelineDBFactory(dbLogger, dbConn, bus, sqlDB) atcProcess, atcPort = startATC(atcBin, 1) }) AfterEach(func() { ginkgomon.Interrupt(atcProcess) Expect(dbConn.Close()).To(Succeed()) Expect(dbListener.Close()).To(Succeed()) }) Describe("viewing a list of builds", func() { var page *agouti.Page var pipelineDB db.PipelineDB BeforeEach(func() { var err error page, err = agoutiDriver.NewPage() Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { Expect(page.Destroy()).To(Succeed()) }) homepage := func() string { return fmt.Sprintf("http://127.0.0.1:%d", atcPort) }