Пример #1
0
func (s *Server) ListResourceVersions(pipelineDB db.PipelineDB) http.Handler {
	logger := s.logger.Session("list-resource-versions")
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		var (
			err   error
			until int
			since int
			limit int
		)

		resourceName := r.FormValue(":resource_name")

		urlUntil := r.FormValue(atc.PaginationQueryUntil)
		until, _ = strconv.Atoi(urlUntil)

		urlSince := r.FormValue(atc.PaginationQuerySince)
		since, _ = strconv.Atoi(urlSince)

		urlLimit := r.FormValue(atc.PaginationQueryLimit)

		limit, _ = strconv.Atoi(urlLimit)
		if limit == 0 {
			limit = atc.PaginationAPIDefaultLimit
		}

		versions, pagination, found, err := pipelineDB.GetResourceVersions(resourceName, db.Page{Until: until, Since: since, Limit: limit})
		if err != nil {
			logger.Error("failed-to-get-resource-versions", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		if !found {
			w.WriteHeader(http.StatusNotFound)
			return
		}

		if pagination.Next != nil {
			s.addNextLink(w, pipelineDB.GetPipelineName(), resourceName, *pagination.Next)
		}

		if pagination.Previous != nil {
			s.addPreviousLink(w, pipelineDB.GetPipelineName(), resourceName, *pagination.Previous)
		}

		w.Header().Set("Content-Type", "application/json")

		w.WriteHeader(http.StatusOK)

		resourceVersions := make([]atc.VersionedResource, len(versions))
		for i := 0; i < len(versions); i++ {
			resourceVersions[i] = present.SavedVersionedResource(versions[i])
		}
		json.NewEncoder(w).Encode(resourceVersions)
	})
}
Пример #2
0
func (rsf *radarSchedulerFactory) BuildScheduler(pipelineDB db.PipelineDB) *scheduler.Scheduler {
	radar := rsf.BuildRadar(pipelineDB)
	return &scheduler.Scheduler{
		PipelineDB: pipelineDB,
		BuildsDB:   rsf.db,
		Factory:    &factory.BuildFactory{PipelineName: pipelineDB.GetPipelineName()},
		Engine:     rsf.engine,
		Scanner:    radar,
	}
}
Пример #3
0
func (rsf *radarSchedulerFactory) BuildScheduler(pipelineDB db.PipelineDB) scheduler.BuildScheduler {
	radar := rsf.BuildRadar(pipelineDB)
	return &scheduler.Scheduler{
		PipelineDB: pipelineDB,
		BuildsDB:   rsf.db,
		Factory: factory.NewBuildFactory(
			pipelineDB.GetPipelineName(),
			atc.NewPlanFactory(time.Now().Unix()),
		),
		Engine:  rsf.engine,
		Scanner: radar,
	}
}
Пример #4
0
func (s *Server) ListJobBuilds(pipelineDB db.PipelineDB) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		var (
			builds []db.Build
			err    error
			until  int
			since  int
			limit  int
		)

		jobName := r.FormValue(":job_name")

		urlUntil := r.FormValue(atc.PaginationQueryUntil)
		until, _ = strconv.Atoi(urlUntil)

		urlSince := r.FormValue(atc.PaginationQuerySince)
		since, _ = strconv.Atoi(urlSince)

		urlLimit := r.FormValue(atc.PaginationQueryLimit)
		limit, _ = strconv.Atoi(urlLimit)
		if limit == 0 {
			limit = atc.PaginationAPIDefaultLimit
		}

		builds, pagination, err := pipelineDB.GetJobBuilds(jobName, db.Page{
			Since: since,
			Until: until,
			Limit: limit,
		})
		if err != nil {
			w.WriteHeader(http.StatusNotFound)
			return
		}

		if pagination.Next != nil {
			s.addNextLink(w, pipelineDB.GetPipelineName(), jobName, *pagination.Next)
		}

		if pagination.Previous != nil {
			s.addPreviousLink(w, pipelineDB.GetPipelineName(), jobName, *pagination.Previous)
		}

		w.WriteHeader(http.StatusOK)

		jobBuilds := make([]atc.Build, len(builds))
		for i := 0; i < len(builds); i++ {
			jobBuilds[i] = present.Build(builds[i])
		}
		json.NewEncoder(w).Encode(jobBuilds)
	})
}
Пример #5
0
func (server *server) TriggerBuild(pipelineDB db.PipelineDB) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		config, _, err := pipelineDB.GetConfig()
		if err != nil {
			server.logger.Error("failed-to-load-config", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		job, found := config.Jobs.Lookup(r.FormValue(":job"))
		if !found {
			w.WriteHeader(http.StatusNotFound)
			return
		}

		log := server.logger.Session("trigger-build", lager.Data{
			"job": job.Name,
		})

		log.Debug("triggering")

		scheduler := server.radarSchedulerFactory.BuildScheduler(pipelineDB)

		build, err := scheduler.TriggerImmediately(log, job, config.Resources)
		if err != nil {
			log.Error("failed-to-trigger", err)
			w.WriteHeader(http.StatusInternalServerError)
			fmt.Fprintf(w, "failed to trigger: %s", err)
			return
		}

		redirectPath, err := routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{
			"pipeline_name": pipelineDB.GetPipelineName(),
			"job":           job.Name,
			"build":         build.Name,
		})
		if err != nil {
			log.Fatal("failed-to-construct-redirect-uri", err, lager.Data{
				"pipeline": pipelineDB.GetPipelineName(),
				"job":      job.Name,
				"build":    build.Name,
			})
		}

		http.Redirect(w, r, redirectPath, http.StatusFound)
	})
}
Пример #6
0
func (server *server) GetPipeline(pipelineDB db.PipelineDB) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		config, _, err := pipelineDB.GetConfig()
		if err != nil {
			server.logger.Error("failed-to-load-config", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		groups := map[string]bool{}
		for _, group := range config.Groups {
			groups[group.Name] = false
		}

		enabledGroups, found := r.URL.Query()["groups"]
		if !found && len(config.Groups) > 0 {
			enabledGroups = []string{config.Groups[0].Name}
		}

		for _, name := range enabledGroups {
			groups[name] = true
		}

		data := TemplateData{
			Groups: groups,
			GroupStates: group.States(config.Groups, func(g atc.GroupConfig) bool {
				return groups[g.Name]
			}),
			PipelineName: pipelineDB.GetPipelineName(),
		}

		log := server.logger.Session("index")

		err = server.template.Execute(w, data)
		if err != nil {
			log.Fatal("failed-to-task-template", err, lager.Data{
				"template-data": data,
			})
		}
	})
}
Пример #7
0
func (s *Server) DeletePipeline(pipelineDB db.PipelineDB) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		logger := s.logger.Session("destroying-pipeline", lager.Data{
			"name": pipelineDB.GetPipelineName(),
		})

		logger.Info("start")

		err := pipelineDB.Destroy()
		if err != nil {
			s.logger.Error("failed", err)

			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		logger.Info("done")

		w.WriteHeader(http.StatusNoContent)
	})
}
			versions = nil
			expectedVersions = nil
			for i := 0; i < 10; i++ {
				version := atc.Version{"version": fmt.Sprintf("%d", i+1)}
				versions = append(versions, version)
				expectedVersions = append(expectedVersions,
					db.SavedVersionedResource{
						ID:      i + 1,
						Enabled: true,
						VersionedResource: db.VersionedResource{
							Resource:     resource.Name,
							Type:         resource.Type,
							Version:      db.Version(version),
							Metadata:     nil,
							PipelineName: pipelineDB.GetPipelineName(),
						},
					})
			}

			err := pipelineDB.SaveResourceVersions(resource, versions)
			Expect(err).NotTo(HaveOccurred())
		})

		Context("when there are no versions to be found", func() {
			It("returns the versions, with previous/next pages", func() {
				historyPage, pagination, err := pipelineDB.GetResourceVersions("nope", db.Page{})
				Expect(err).ToNot(HaveOccurred())
				Expect(historyPage).To(Equal([]db.SavedVersionedResource{}))
				Expect(pagination).To(Equal(db.Pagination{}))
			})
Пример #9
0
		var otherBuild db.Build

		BeforeEach(func() {
			var err error
			build, err = pipelineDB.CreateJobBuild("some-job")
			Expect(err).ToNot(HaveOccurred())
			otherBuild, err = pipelineDB.CreateJobBuild("some-job")
			Expect(err).ToNot(HaveOccurred())

			success, err := database.StartBuild(build.ID, "", "")
			Expect(err).ToNot(HaveOccurred())
			Expect(success).To(BeTrue())
		})

		It("only returns back build preps of pending builds", func() {
			buildPreps, err := database.GetBuildPrepsForPendingBuildsForPipeline(pipelineDB.GetPipelineName())
			Expect(err).ToNot(HaveOccurred())

			Expect(len(buildPreps)).To(Equal(1))
			Expect(buildPreps[0].BuildID).To(Equal(otherBuild.ID))
		})
	})

	Describe("GetAllStartedBuilds", func() {
		var build1 db.Build
		var build2 db.Build

		BeforeEach(func() {
			var err error

			build1, err = database.CreateOneOffBuild()
Пример #10
0
func (server *server) GetBuild(pipelineDB db.PipelineDB) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		jobName := r.FormValue(":job")
		if len(jobName) == 0 {
			w.WriteHeader(http.StatusBadRequest)
			return
		}

		buildName := r.FormValue(":build")
		if len(buildName) == 0 {
			w.WriteHeader(http.StatusBadRequest)
			return
		}

		config, _, err := pipelineDB.GetConfig()
		if err != nil {
			server.logger.Error("failed-to-load-config", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		job, found := config.Jobs.Lookup(jobName)
		if !found {
			w.WriteHeader(http.StatusNotFound)
			return
		}

		log := server.logger.Session("get-build", lager.Data{
			"job":   job.Name,
			"build": buildName,
		})

		build, err := pipelineDB.GetJobBuild(jobName, buildName)
		if err != nil {
			log.Error("get-build-failed", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		inputs, outputs, err := pipelineDB.GetBuildResources(build.ID)
		if err != nil {
			log.Error("failed-to-get-build-resources", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		bs, err := pipelineDB.GetAllJobBuilds(jobName)
		if err != nil {
			log.Error("get-all-builds-failed", err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		templateData := TemplateData{
			GroupStates: group.States(config.Groups, func(g atc.GroupConfig) bool {
				for _, groupJob := range g.Jobs {
					if groupJob == job.Name {
						return true
					}
				}

				return false
			}),

			Job:    job,
			Builds: bs,

			Build:        build,
			Inputs:       inputs,
			Outputs:      outputs,
			PipelineName: pipelineDB.GetPipelineName(),
		}

		err = server.template.Execute(w, templateData)
		if err != nil {
			log.Fatal("failed-to-task-template", err, lager.Data{
				"template-data": templateData,
			})
		}
	})
}