func (MigrateSuite) TestMigrateRedisService(c *C) { db := setupTestDB(c, "controllertest_migrate_redis_service") m := &testMigrator{c: c, db: db} // start from ID 19 m.migrateTo(19) type procType struct { Service string `json:"service"` } // add a Redis app appName := "redis-" + random.UUID() appMeta := map[string]string{"flynn-system-app": "true"} releaseID := random.UUID() procs := map[string]*procType{ "redis": {Service: "redis"}, } c.Assert(db.Exec(`INSERT INTO releases (release_id, processes) VALUES ($1, $2)`, releaseID, procs), IsNil) c.Assert(db.Exec(`INSERT INTO apps (app_id, name, release_id, meta) VALUES ($1, $2, $3, $4)`, random.UUID(), appName, releaseID, appMeta), IsNil) // migrate to 20 and check the service got updated m.migrateTo(20) var updatedProcs map[string]*procType c.Assert(db.QueryRow(`SELECT processes FROM releases WHERE release_id = $1`, releaseID).Scan(&updatedProcs), IsNil) proc, ok := updatedProcs["redis"] if !ok { c.Fatal("missing redis process type") } c.Assert(proc.Service, Equals, appName) }
// TestMigrateJobStates checks that migrating to ID 9 does not break existing // job records func (MigrateSuite) TestMigrateJobStates(c *C) { db := setupTestDB(c, "controllertest_migrate_job_states") m := &testMigrator{c: c, db: db} // start from ID 7 m.migrateTo(7) // insert a job hostID := "host1" uuid := random.UUID() jobID := cluster.GenerateJobID(hostID, uuid) appID := random.UUID() releaseID := random.UUID() c.Assert(db.Exec(`INSERT INTO apps (app_id, name) VALUES ($1, $2)`, appID, "migrate-app"), IsNil) c.Assert(db.Exec(`INSERT INTO releases (release_id) VALUES ($1)`, releaseID), IsNil) c.Assert(db.Exec(`INSERT INTO job_cache (job_id, app_id, release_id, state) VALUES ($1, $2, $3, $4)`, jobID, appID, releaseID, "up"), IsNil) // migrate to 8 and check job states are still constrained m.migrateTo(8) err := db.Exec(`UPDATE job_cache SET state = 'foo' WHERE job_id = $1`, jobID) c.Assert(err, NotNil) if !postgres.IsPostgresCode(err, postgres.ForeignKeyViolation) { c.Fatalf("expected postgres foreign key violation, got %s", err) } // migrate to 9 and check job IDs are correct, pending state is valid m.migrateTo(9) var clusterID, dbUUID, dbHostID string c.Assert(db.QueryRow("SELECT cluster_id, job_id, host_id FROM job_cache WHERE cluster_id = $1", jobID).Scan(&clusterID, &dbUUID, &dbHostID), IsNil) c.Assert(clusterID, Equals, jobID) c.Assert(dbUUID, Equals, uuid) c.Assert(dbHostID, Equals, hostID) c.Assert(db.Exec(`UPDATE job_cache SET state = 'pending' WHERE job_id = $1`, uuid), IsNil) }
func (MigrateSuite) TestMigrateReleaseArtifactIndex(c *C) { db := setupTestDB(c, "controllertest_migrate_release_artifact_index") m := &testMigrator{c: c, db: db} // start from ID 16 m.migrateTo(16) // create some releases and artifacts releaseIDs := []string{random.UUID(), random.UUID()} for _, releaseID := range releaseIDs { c.Assert(db.Exec(`INSERT INTO releases (release_id) VALUES ($1)`, releaseID), IsNil) } artifactIDs := []string{random.UUID(), random.UUID()} c.Assert(db.Exec(`INSERT INTO artifacts (artifact_id, type, uri) VALUES ($1, $2, $3)`, artifactIDs[0], "docker", "http://example.com"), IsNil) c.Assert(db.Exec(`INSERT INTO artifacts (artifact_id, type, uri) VALUES ($1, $2, $3)`, artifactIDs[1], "file", "http://example.com"), IsNil) // insert some rows into release_artifacts for _, releaseID := range releaseIDs { for _, artifactID := range artifactIDs { c.Assert(db.Exec(`INSERT INTO release_artifacts (release_id, artifact_id) VALUES ($1, $2)`, releaseID, artifactID), IsNil) } } // migrate to 17 and check the index column was set correctly m.migrateTo(17) for _, releaseID := range releaseIDs { for i, artifactID := range artifactIDs { var index int32 c.Assert(db.QueryRow(`SELECT index FROM release_artifacts WHERE release_id = $1 AND artifact_id = $2`, releaseID, artifactID).Scan(&index), IsNil) c.Assert(index, Equals, int32(i)) } } }
func (s *S) TestJobLogWait(c *C) { app := s.createTestApp(c, &ct.App{Name: "joblog-wait"}) hostID, jobID := random.UUID(), random.UUID() hc := tu.NewFakeHostClient(hostID) hc.SetAttachFunc(jobID, func(req *host.AttachReq, wait bool) (cluster.AttachClient, error) { if !wait { return nil, cluster.ErrWouldWait } return cluster.NewAttachClient(newFakeLog(strings.NewReader("foo"))), nil }) s.cc.SetHostClient(hostID, hc) req, err := http.NewRequest("GET", fmt.Sprintf("%s/apps/%s/jobs/%s-%s/log", s.srv.URL, app.ID, hostID, jobID), nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey) res, err := http.DefaultClient.Do(req) c.Assert(err, IsNil) res.Body.Close() c.Assert(res.StatusCode, Equals, 404) req, err = http.NewRequest("GET", fmt.Sprintf("%s/apps/%s/jobs/%s-%s/log?wait=true", s.srv.URL, app.ID, hostID, jobID), nil) c.Assert(err, IsNil) req.SetBasicAuth("", authKey) res, err = http.DefaultClient.Do(req) var buf bytes.Buffer _, err = buf.ReadFrom(res.Body) res.Body.Close() c.Assert(err, IsNil) c.Assert(buf.String(), Equals, "foo") }
// TestMigrateReleaseArtifacts checks that migrating to ID 15 correctly // migrates releases by creating appropriate records in the release_artifacts // table func (MigrateSuite) TestMigrateReleaseArtifacts(c *C) { db := setupTestDB(c, "controllertest_migrate_release_artifacts") m := &testMigrator{c: c, db: db} // start from ID 14 m.migrateTo(14) // add some artifacts and releases releaseArtifacts := map[string]string{ random.UUID(): random.UUID(), random.UUID(): random.UUID(), random.UUID(): random.UUID(), } for releaseID, artifactID := range releaseArtifacts { c.Assert(db.Exec(`INSERT INTO artifacts (artifact_id, type, uri) VALUES ($1, $2, $3)`, artifactID, "docker", "http://example.com/"+artifactID), IsNil) c.Assert(db.Exec(`INSERT INTO releases (release_id, artifact_id) VALUES ($1, $2)`, releaseID, artifactID), IsNil) } c.Assert(db.Exec(`INSERT INTO releases (release_id) VALUES ($1)`, random.UUID()), IsNil) // insert multiple slug based releases with the same slug URI slugReleaseIDs := []string{random.UUID(), random.UUID()} imageArtifactID := random.UUID() slugEnv := map[string]string{"SLUG_URL": "http://example.com/slug.tgz"} c.Assert(db.Exec(`INSERT INTO artifacts (artifact_id, type, uri) VALUES ($1, $2, $3)`, imageArtifactID, "docker", "http://example.com/"+imageArtifactID), IsNil) for _, id := range slugReleaseIDs { c.Assert(db.Exec(`INSERT INTO releases (release_id, artifact_id, env) VALUES ($1, $2, $3)`, id, imageArtifactID, slugEnv), IsNil) releaseArtifacts[id] = imageArtifactID } // migrate to 15 and check release_artifacts was populated correctly m.migrateTo(15) rows, err := db.Query("SELECT release_id, artifact_id FROM release_artifacts INNER JOIN artifacts USING (artifact_id) WHERE type = 'docker'") c.Assert(err, IsNil) defer rows.Close() actual := make(map[string]string) for rows.Next() { var releaseID, artifactID string c.Assert(rows.Scan(&releaseID, &artifactID), IsNil) actual[releaseID] = artifactID } c.Assert(rows.Err(), IsNil) c.Assert(actual, DeepEquals, releaseArtifacts) for _, id := range slugReleaseIDs { // check the slug releases got "git=true" in metadata var releaseMeta map[string]string err = db.QueryRow("SELECT meta FROM releases WHERE release_id = $1", id).Scan(&releaseMeta) c.Assert(err, IsNil) c.Assert(releaseMeta, DeepEquals, map[string]string{"git": "true"}) // check the slug releases got a file artifact with the correct URI and meta var slugURI string var artifactMeta map[string]string err = db.QueryRow("SELECT uri, meta FROM artifacts INNER JOIN release_artifacts USING (artifact_id) WHERE type = 'file' AND release_id = $1", id).Scan(&slugURI, &artifactMeta) c.Assert(err, IsNil) c.Assert(slugURI, Equals, slugEnv["SLUG_URL"]) c.Assert(artifactMeta, DeepEquals, map[string]string{"blobstore": "true"}) } }
func (s *S) createLogTestApp(c *C, name string, stream io.Reader) (*ct.App, string, string) { app := s.createTestApp(c, &ct.App{Name: name}) hostID, jobID := random.UUID(), random.UUID() hc := tu.NewFakeHostClient(hostID) hc.SetAttach(jobID, cluster.NewAttachClient(newFakeLog(stream))) s.cc.SetHostClient(hostID, hc) return app, hostID, jobID }
func (s *S) TestKillJob(c *C) { app := s.createTestApp(c, &ct.App{Name: "killjob"}) hostID, jobID := random.UUID(), random.UUID() hc := tu.NewFakeHostClient(hostID) s.cc.AddHost(hc) c.Assert(s.c.DeleteJob(app.ID, hostID+"-"+jobID), IsNil) c.Assert(hc.IsStopped(jobID), Equals, true) }
// TestMigrateArtifactMeta checks that migrating to ID 16 correctly // sets artifact metadata for those stored in the blobstore func (MigrateSuite) TestMigrateArtifactMeta(c *C) { db := setupTestDB(c, "controllertest_migrate_artifact_meta") m := &testMigrator{c: c, db: db} // start from ID 15 m.migrateTo(15) type artifact struct { ID string URI string MetaBefore map[string]string MetaAfter map[string]string } artifacts := []*artifact{ { ID: random.UUID(), URI: "http://example.com/file1.tar", MetaBefore: nil, MetaAfter: nil, }, { ID: random.UUID(), URI: "http://example.com/file2.tar", MetaBefore: map[string]string{"foo": "bar"}, MetaAfter: map[string]string{"foo": "bar"}, }, { ID: random.UUID(), URI: "http://blobstore.discoverd/file1.tar", MetaBefore: nil, MetaAfter: map[string]string{"blobstore": "true"}, }, { ID: random.UUID(), URI: "http://blobstore.discoverd/file2.tar", MetaBefore: map[string]string{"foo": "bar"}, MetaAfter: map[string]string{"foo": "bar", "blobstore": "true"}, }, } // create the artifacts for _, a := range artifacts { c.Assert(db.Exec(`INSERT INTO artifacts (artifact_id, type, uri, meta) VALUES ($1, $2, $3, $4)`, a.ID, "file", a.URI, a.MetaBefore), IsNil) } // migrate to 16 and check the artifacts have the appropriate metadata m.migrateTo(16) for _, a := range artifacts { var meta map[string]string c.Assert(db.QueryRow("SELECT meta FROM artifacts WHERE artifact_id = $1", a.ID).Scan(&meta), IsNil) c.Assert(meta, DeepEquals, a.MetaAfter) } }
func (s *S) TestKillJob(c *C) { app := s.createTestApp(c, &ct.App{Name: "killjob"}) hostID, jobID := random.UUID(), random.UUID() hc := tu.NewFakeHostClient(hostID) s.cc.SetHostClient(hostID, hc) res, err := s.Delete("/apps/" + app.ID + "/jobs/" + hostID + "-" + jobID) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) c.Assert(hc.IsStopped(jobID), Equals, true) }
func (TestSuite) TestFormationChange(c *C) { s := runTestScheduler(c, nil, true) defer s.Stop() s.waitJobStart() app, err := s.GetApp(testAppID) c.Assert(err, IsNil) release, err := s.GetRelease(testReleaseID) c.Assert(err, IsNil) artifact, err := s.GetArtifact(release.ImageArtifactID()) c.Assert(err, IsNil) // Test scaling up an existing formation c.Log("Test scaling up an existing formation. Wait for formation change and job start") s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 4}}) s.waitFormationChange() for i := 0; i < 3; i++ { job := s.waitJobStart() c.Assert(job.Type, Equals, testJobType) c.Assert(job.AppID, Equals, app.ID) c.Assert(job.ReleaseID, Equals, testReleaseID) } c.Assert(s.RunningJobs(), HasLen, 4) // Test scaling down an existing formation c.Log("Test scaling down an existing formation. Wait for formation change and job stop") s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}}) s.waitFormationChange() for i := 0; i < 3; i++ { s.waitJobStop() } c.Assert(s.RunningJobs(), HasLen, 1) // Test creating a new formation c.Log("Test creating a new formation. Wait for formation change and job start") artifact = &ct.Artifact{ID: random.UUID()} processes := map[string]int{testJobType: testJobCount} release = NewRelease(random.UUID(), artifact, processes) s.CreateArtifact(artifact) s.CreateRelease(release) c.Assert(len(s.formations), Equals, 1) s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: processes}) s.waitFormationChange() c.Assert(len(s.formations), Equals, 2) job := s.waitJobStart() c.Assert(job.Type, Equals, testJobType) c.Assert(job.AppID, Equals, app.ID) c.Assert(job.ReleaseID, Equals, release.ID) }
func (p *Provider) ForkVolume(vol volume.Volume) (volume.Volume, error) { zvol, err := p.owns(vol) if err != nil { return nil, err } if !vol.IsSnapshot() { return nil, fmt.Errorf("can only fork a snapshot") } id := random.UUID() info := &volume.Info{ID: id, Type: vol.Info().Type} v2 := &zfsVolume{ info: info, provider: zvol.provider, basemount: p.mountPath(info), } cloneID := fmt.Sprintf("%s/%s", zvol.provider.dataset.Name, id) v2.dataset, err = zvol.dataset.Clone(cloneID, map[string]string{ "mountpoint": v2.basemount, }) if err != nil { return nil, fmt.Errorf("could not fork volume: %s", err) } p.volumes[id] = v2 return v2, nil }
func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", random.UUID()} { in := &ct.Artifact{ ID: id, Type: ct.ArtifactTypeFlynn, RawManifest: ct.ImageManifest{ Type: ct.ImageManifestTypeV1, }.RawManifest(), URI: fmt.Sprintf("https://example.com/manifest%d.json", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.RawManifest, DeepEquals, in.RawManifest) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact, err := s.c.GetArtifact(out.ID) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) _, err = s.c.GetArtifact("fail" + out.ID) c.Assert(err, Equals, controller.ErrNotFound) } }
func (a *GenRandomAction) Run(s *State) error { if a.Length == 0 { a.Length = 16 } data := interpolate(s, a.Data) if data == "" { switch a.Encoding { case "", "hex": data = random.Hex(a.Length) case "base64": data = base64.StdEncoding.EncodeToString(random.Bytes(a.Length)) case "base64safe": data = random.Base64(a.Length) case "uuid": data = random.UUID() default: return fmt.Errorf("bootstrap: unknown random type: %q", a.Encoding) } } s.StepData[a.ID] = &RandomData{Data: data} if a.ControllerKey { s.SetControllerKey(data) } return nil }
func main() { serviceName := os.Getenv("FLYNN_POSTGRES") if serviceName == "" { serviceName = "postgres" } singleton := os.Getenv("SINGLETON") == "true" password := os.Getenv("PGPASSWORD") const dataDir = "/data" idFile := filepath.Join(dataDir, "instance_id") idBytes, err := ioutil.ReadFile(idFile) if err != nil && !os.IsNotExist(err) { shutdown.Fatalf("error reading instance ID: %s", err) } id := string(idBytes) if len(id) == 0 { id = random.UUID() if err := ioutil.WriteFile(idFile, []byte(id), 0644); err != nil { shutdown.Fatalf("error writing instance ID: %s", err) } } err = discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{ LeaderType: discoverd.LeaderTypeManual, }) if err != nil && !httphelper.IsObjectExistsError(err) { shutdown.Fatal(err) } inst := &discoverd.Instance{ Addr: ":5432", Meta: map[string]string{pgIdKey: id}, } hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) log := log15.New("app", "postgres") pg := NewPostgres(Config{ ID: id, Singleton: singleton, DataDir: filepath.Join(dataDir, "db"), BinDir: "/usr/lib/postgresql/9.5/bin/", Password: password, Logger: log.New("component", "postgres"), ExtWhitelist: true, WaitUpstream: true, SHMType: "posix", }) dd := sd.NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd")) peer := state.NewPeer(inst, id, pgIdKey, singleton, dd, pg, log.New("component", "peer")) shutdown.BeforeExit(func() { peer.Close() }) go peer.Run() shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http"))) // TODO(titanous): clean shutdown of postgres }
func (r *ArtifactRepo) Add(data interface{}) error { a := data.(*ct.Artifact) // TODO: actually validate if a.ID == "" { a.ID = random.UUID() } if a.Type == "" { return ct.ValidationError{Field: "type", Message: "must not be empty"} } if a.URI == "" { return ct.ValidationError{Field: "uri", Message: "must not be empty"} } if a.Type == ct.ArtifactTypeFlynn && a.RawManifest == nil { if a.Size <= 0 { return ct.ValidationError{Field: "size", Message: "must be greater than zero"} } if err := downloadManifest(a); err != nil { return ct.ValidationError{Field: "manifest", Message: fmt.Sprintf("failed to download from %s: %s", a.URI, err)} } } tx, err := r.db.Begin() if err != nil { return err } err = tx.QueryRow("artifact_insert", a.ID, string(a.Type), a.URI, a.Meta, []byte(a.RawManifest), a.Hashes, a.Size, a.LayerURLTemplate).Scan(&a.CreatedAt) if postgres.IsUniquenessError(err, "") { tx.Rollback() tx, err = r.db.Begin() if err != nil { return err } var size *int64 var layerURLTemplate *string err = tx.QueryRow("artifact_select_by_type_and_uri", string(a.Type), a.URI).Scan(&a.ID, &a.Meta, &a.RawManifest, &a.Hashes, &size, &layerURLTemplate, &a.CreatedAt) if err != nil { tx.Rollback() return err } if size != nil { a.Size = *size } if layerURLTemplate != nil { a.LayerURLTemplate = *layerURLTemplate } } if err != nil { tx.Rollback() return err } if err := createEvent(tx.Exec, &ct.Event{ ObjectID: a.ID, ObjectType: ct.EventTypeArtifact, }, a); err != nil { tx.Rollback() return err } return tx.Commit() }
func (r *httpRoute) ServeHTTP(ctx context.Context, w http.ResponseWriter, req *http.Request) { start, _ := ctxhelper.StartTimeFromContext(ctx) req.Header.Set("X-Request-Start", strconv.FormatInt(start.UnixNano()/int64(time.Millisecond), 10)) req.Header.Set("X-Request-Id", random.UUID()) r.rp.ServeHTTP(w, req) }
func (TestSuite) TestScaleCriticalApp(c *C) { s := runTestScheduler(c, nil, true) defer s.Stop() s.waitJobStart() // scale a critical app up app := &ct.App{ID: "critical-app", Meta: map[string]string{"flynn-system-critical": "true"}} artifact := &ct.Artifact{ID: random.UUID()} processes := map[string]int{"critical": 1} release := NewRelease("critical-release-1", artifact, processes) s.CreateApp(app) s.CreateArtifact(artifact) s.CreateRelease(release) s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: processes}) s.waitFormationChange() s.waitJobStart() // check we can't scale it down s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: nil}) _, err := s.waitForEvent("refusing to scale down critical app") s.c.Assert(err, IsNil) s.waitFormationChange() // scale up another formation newRelease := NewRelease("critical-release-2", artifact, processes) s.CreateRelease(newRelease) s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: newRelease.ID, Processes: processes}) s.waitFormationChange() s.waitJobStart() // check we can now scale the original down s.PutFormation(&ct.Formation{AppID: app.ID, ReleaseID: release.ID, Processes: nil}) s.waitFormationChange() s.waitJobStop() }
func (api *HTTPAPI) CreateProvider(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { pspec := &volume.ProviderSpec{} if err := httphelper.DecodeJSON(r, &pspec); err != nil { httphelper.Error(w, err) return } if pspec.ID == "" { pspec.ID = random.UUID() } if pspec.Kind == "" { httphelper.ValidationError(w, "kind", "must not be blank") return } var provider volume.Provider provider, err := volumemanager.NewProvider(pspec) if err == volume.UnknownProviderKind { httphelper.ValidationError(w, "kind", fmt.Sprintf("%q is not known", pspec.Kind)) return } if err := api.vman.AddProvider(pspec.ID, provider); err != nil { switch err { case volumemanager.ErrProviderExists: httphelper.ObjectExistsError(w, fmt.Sprintf("provider %q already exists", pspec.ID)) return default: httphelper.Error(w, err) return } } httphelper.JSON(w, 200, pspec) }
func (s *S) TestJobGet(c *C) { app := s.createTestApp(c, &ct.App{Name: "job-get"}) release := s.createTestRelease(c, &ct.Release{}) s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID}) uuid := random.UUID() hostID := "host0" jobID := cluster.GenerateJobID(hostID, uuid) s.createTestJob(c, &ct.Job{ ID: jobID, UUID: uuid, HostID: hostID, AppID: app.ID, ReleaseID: release.ID, Type: "web", State: ct.JobStateStarting, Meta: map[string]string{"some": "info"}, }) // test getting the job with both the job ID and the UUID for _, id := range []string{jobID, uuid} { job, err := s.c.GetJob(app.ID, id) c.Assert(err, IsNil) c.Assert(job.ID, Equals, jobID) c.Assert(job.UUID, Equals, uuid) c.Assert(job.HostID, Equals, hostID) c.Assert(job.AppID, Equals, app.ID) c.Assert(job.ReleaseID, Equals, release.ID) c.Assert(job.Meta, DeepEquals, map[string]string{"some": "info"}) } }
func (s *S) TestCreateArtifact(c *C) { for i, id := range []string{"", random.UUID()} { in := &ct.Artifact{ ID: id, Type: "docker-image", URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i), } out := s.createTestArtifact(c, in) c.Assert(out.Type, Equals, in.Type) c.Assert(out.URI, Equals, in.URI) c.Assert(out.ID, Not(Equals), "") if id != "" { c.Assert(out.ID, Equals, id) } gotArtifact := &ct.Artifact{} res, err := s.Get("/artifacts/"+out.ID, gotArtifact) c.Assert(err, IsNil) c.Assert(gotArtifact, DeepEquals, out) res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact) c.Assert(res.StatusCode, Equals, 404) } }
func (r *ReleaseRepo) Add(data interface{}) error { release := data.(*ct.Release) releaseCopy := *release releaseCopy.ID = "" releaseCopy.ArtifactID = "" releaseCopy.CreatedAt = nil for typ, proc := range releaseCopy.Processes { resource.SetDefaults(&proc.Resources) releaseCopy.Processes[typ] = proc } data, err := json.Marshal(&releaseCopy) if err != nil { return err } if release.ID == "" { release.ID = random.UUID() } var artifactID *string if release.ArtifactID != "" { artifactID = &release.ArtifactID } err = r.db.QueryRow("INSERT INTO releases (release_id, artifact_id, data) VALUES ($1, $2, $3) RETURNING created_at", release.ID, artifactID, data).Scan(&release.CreatedAt) release.ID = postgres.CleanUUID(release.ID) if release.ArtifactID != "" { release.ArtifactID = postgres.CleanUUID(release.ArtifactID) } return err }
func (r *DeploymentRepo) Add(data interface{}) (*ct.Deployment, error) { d := data.(*ct.Deployment) if d.ID == "" { d.ID = random.UUID() } var oldReleaseID *string if d.OldReleaseID != "" { oldReleaseID = &d.OldReleaseID } tx, err := r.db.Begin() if err != nil { return nil, err } if err := tx.QueryRow("deployment_insert", d.ID, d.AppID, oldReleaseID, d.NewReleaseID, d.Strategy, d.Processes, d.DeployTimeout).Scan(&d.CreatedAt); err != nil { tx.Rollback() return nil, err } // fake initial deployment if d.FinishedAt != nil { if err := tx.Exec("deployment_update_finished_at", d.ID, d.FinishedAt); err != nil { tx.Rollback() return nil, err } if err = createDeploymentEvent(tx.Exec, d, "complete"); err != nil { tx.Rollback() return nil, err } d.Status = "complete" return d, tx.Commit() } if err := tx.Commit(); err != nil { return nil, err } args, err := json.Marshal(ct.DeployID{ID: d.ID}) if err != nil { return nil, err } tx, err = r.db.Begin() if err != nil { return nil, err } if err = createDeploymentEvent(tx.Exec, d, "pending"); err != nil { tx.Rollback() return nil, err } d.Status = "pending" job := &que.Job{Type: "deployment", Args: args} if err := r.q.EnqueueInTx(job, tx.Tx); err != nil { tx.Rollback() return nil, err } if err = tx.Commit(); err != nil { return nil, err } return d, err }
func (b *s3Backend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error { if append { // This is a hack, the next easiest thing to do if we need to handle // upload resumption is to finalize the multipart upload when the client // disconnects and when the rest of the data arrives, start a new // multi-part upload copying the existing object as the first part // (which is supported by S3 as a specific API call). This requires // replacing the simple uploader, so it was not done in the first pass. existing, err := b.Open(tx, info, false) if err != nil { return err } r = io.MultiReader(existing, r) } info.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil { return err } u := s3manager.NewUploaderWithClient(b.client) _, err := u.Upload(&s3manager.UploadInput{ Bucket: &b.bucket, Key: &info.ExternalID, ContentType: &info.Type, Body: r, }) return err }
func (s *Scheduler) handleFormationDiff(f *Formation, diff Processes) { log := logger.New("fn", "handleFormationDiff", "app.id", f.App.ID, "release.id", f.Release.ID) log.Info("formation in incorrect state", "diff", diff) for typ, n := range diff { if n > 0 { log.Info(fmt.Sprintf("starting %d new %s jobs", n, typ)) for i := 0; i < n; i++ { job := &Job{ ID: random.UUID(), Type: typ, AppID: f.App.ID, ReleaseID: f.Release.ID, Formation: f, startedAt: time.Now(), state: JobStateNew, } s.jobs.Add(job) go s.StartJob(job) } } else if n < 0 { log.Info(fmt.Sprintf("stopping %d %s jobs", -n, typ)) for i := 0; i < -n; i++ { s.stopJob(f, typ) } } } }
func (s *Scheduler) restartJob(job *Job) { restarts := job.restarts // reset the restart count if it has been running for longer than the // back off period if job.startedAt.Before(time.Now().Add(-s.backoffPeriod)) { restarts = 0 } backoff := s.getBackoffDuration(restarts) // create a new job so its state is tracked separately from the job // it is replacing newJob := &Job{ ID: random.UUID(), Type: job.Type, AppID: job.AppID, ReleaseID: job.ReleaseID, Formation: job.Formation, startedAt: time.Now(), state: JobStateScheduled, restarts: restarts + 1, } s.jobs.Add(newJob) logger.Info("scheduling job restart", "fn", "restartJob", "attempts", newJob.restarts, "delay", backoff) newJob.restartTimer = time.AfterFunc(backoff, func() { s.StartJob(newJob) }) }
func (s *S) TestPutResource(c *C) { app := s.createTestApp(c, &ct.App{Name: "put-resource"}) provider := s.createTestProvider(c, &ct.Provider{URL: "https://example.ca", Name: "put-resource"}) resource := &ct.Resource{ ExternalID: "/foo/bar", Env: map[string]string{"FOO": "BAR"}, Apps: []string{app.ID}, } id := random.UUID() path := fmt.Sprintf("/providers/%s/resources/%s", provider.ID, id) created := &ct.Resource{} _, err := s.Put(path, resource, created) c.Assert(err, IsNil) c.Assert(created.ID, Equals, id) c.Assert(created.ProviderID, Equals, provider.ID) c.Assert(created.Env, DeepEquals, resource.Env) c.Assert(created.Apps, DeepEquals, resource.Apps) c.Assert(created.CreatedAt, Not(IsNil)) gotResource := &ct.Resource{} _, err = s.Get(path, gotResource) c.Assert(err, IsNil) c.Assert(gotResource, DeepEquals, created) }
func convert(imageURL string) (string, error) { id := random.UUID() cmd := exec.Command("/bin/docker-artifact", imageURL) cmd.Env = append(os.Environ(), fmt.Sprintf("ARTIFACT_ID=%s", id)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stdout return id, cmd.Run() }
func (r *AppRepo) Add(data interface{}) error { app := data.(*ct.App) tx, err := r.db.Begin() if err != nil { return err } if app.Name == "" { var nameID uint32 if err := tx.QueryRow("SELECT nextval('name_ids')").Scan(&nameID); err != nil { tx.Rollback() return err } app.Name = name.Get(nameID) } if len(app.Name) > 100 || !utils.AppNamePattern.MatchString(app.Name) { return ct.ValidationError{Field: "name", Message: "is invalid"} } if app.ID == "" { app.ID = random.UUID() } if app.Strategy == "" { app.Strategy = "all-at-once" } meta, err := json.Marshal(app.Meta) if err != nil { return err } if err := tx.QueryRow("INSERT INTO apps (app_id, name, meta, strategy) VALUES ($1, $2, $3, $4) RETURNING created_at, updated_at", app.ID, app.Name, meta, app.Strategy).Scan(&app.CreatedAt, &app.UpdatedAt); err != nil { tx.Rollback() if postgres.IsUniquenessError(err, "apps_name_idx") { return httphelper.ObjectExistsErr(fmt.Sprintf("application %q already exists", app.Name)) } return err } if err := createEvent(tx.Exec, &ct.Event{ AppID: app.ID, ObjectID: app.ID, ObjectType: ct.EventTypeApp, }, app); err != nil { tx.Rollback() return err } if err := tx.Commit(); err != nil { return err } if !app.System() && r.defaultDomain != "" { route := (&router.HTTPRoute{ Domain: fmt.Sprintf("%s.%s", app.Name, r.defaultDomain), Service: app.Name + "-web", }).ToRoute() if err := createRoute(r.db, r.router, app.ID, route); err != nil { log.Printf("Error creating default route for %s: %s", app.Name, err) } } return nil }
func (r *AppRepo) Add(data interface{}) error { app := data.(*ct.App) tx, err := r.db.Begin() if err != nil { return err } if app.Name == "" { var nameID int64 if err := tx.QueryRow("app_next_name_id").Scan(&nameID); err != nil { tx.Rollback() return err } // Safe cast because name_ids is limited to 32 bit size in schema app.Name = name.Get(uint32(nameID)) } if len(app.Name) > 100 || !utils.AppNamePattern.MatchString(app.Name) { return ct.ValidationError{Field: "name", Message: "is invalid"} } if app.ID == "" { app.ID = random.UUID() } if app.Strategy == "" { app.Strategy = "all-at-once" } if app.DeployTimeout == 0 { app.DeployTimeout = ct.DefaultDeployTimeout } if err := tx.QueryRow("app_insert", app.ID, app.Name, app.Meta, app.Strategy, app.DeployTimeout).Scan(&app.CreatedAt, &app.UpdatedAt); err != nil { tx.Rollback() if postgres.IsUniquenessError(err, "apps_name_idx") { return httphelper.ObjectExistsErr(fmt.Sprintf("application %q already exists", app.Name)) } return err } if err := createEvent(tx.Exec, &ct.Event{ AppID: app.ID, ObjectID: app.ID, ObjectType: ct.EventTypeApp, }, app); err != nil { tx.Rollback() return err } if err := tx.Commit(); err != nil { return err } if !app.System() && r.defaultDomain != "" { route := (&router.HTTPRoute{ Domain: fmt.Sprintf("%s.%s", app.Name, r.defaultDomain), Service: app.Name + "-web", }).ToRoute() if err := createRoute(r.db, r.router, app.ID, route); err != nil { log.Printf("Error creating default route for %s: %s", app.Name, err) } } return nil }
func (b *gcsBackend) Copy(tx *postgres.DBTx, dst, src FileInfo) error { dst.ExternalID = random.UUID() if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", dst.ID, dst.ExternalID); err != nil { return err } _, err := b.bucket.Object(src.ExternalID).CopyTo(context.Background(), b.bucket.Object(dst.ExternalID), nil) return err }