func runServer(_ *docopt.Args) error { addr := ":" + os.Getenv("PORT") db := postgres.Wait(nil, nil) if err := dbMigrations.Migrate(db); err != nil { return fmt.Errorf("error running DB migrations: %s", err) } mux := http.NewServeMux() repo, err := data.NewFileRepoFromEnv(db) if err != nil { return err } hb, err := discoverd.AddServiceAndRegister("blobstore", addr) if err != nil { return err } shutdown.BeforeExit(func() { hb.Close() }) log.Println("Blobstore serving files on " + addr) mux.Handle("/", handler(repo)) mux.Handle(status.Path, status.Handler(func() status.Status { if err := db.Exec("SELECT 1"); err != nil { return status.Unhealthy } return status.Healthy })) h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux)) return http.ListenAndServe(addr, h) }
func main() { defer shutdown.Exit() db := postgres.Wait(&postgres.Conf{ Service: serviceName, User: "******", Password: os.Getenv("PGPASSWORD"), Database: "postgres", }, nil) api := &pgAPI{db} router := httprouter.New() router.POST("/databases", httphelper.WrapHandler(api.createDatabase)) router.DELETE("/databases", httphelper.WrapHandler(api.dropDatabase)) router.GET("/ping", httphelper.WrapHandler(api.ping)) port := os.Getenv("PORT") if port == "" { port = "3000" } addr := ":" + port hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router)) shutdown.Fatal(http.ListenAndServe(addr, handler)) }
func main() { log := logger.New("fn", "main") log.Info("creating controller client") client, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal(err) } log.Info("connecting to postgres") db := postgres.Wait(nil, schema.PrepareStatements) shutdown.BeforeExit(func() { db.Close() }) go func() { status.AddHandler(func() status.Status { _, err := db.ConnPool.Exec("ping") if err != nil { return status.Unhealthy } return status.Healthy }) addr := ":" + os.Getenv("PORT") hb, err := discoverd.AddServiceAndRegister("controller-worker", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) shutdown.Fatal(http.ListenAndServe(addr, nil)) }() workers := que.NewWorkerPool( que.NewClient(db.ConnPool), que.WorkMap{ "deployment": deployment.JobHandler(db, client, logger), "app_deletion": app_deletion.JobHandler(db, client, logger), "domain_migration": domain_migration.JobHandler(db, client, logger), "release_cleanup": release_cleanup.JobHandler(db, client, logger), "app_garbage_collection": app_garbage_collection.JobHandler(db, client, logger), }, workerCount, ) workers.Interval = 5 * time.Second log.Info("starting workers", "count", workerCount, "interval", workers.Interval) workers.Start() shutdown.BeforeExit(func() { workers.Shutdown() }) select {} // block and keep running }
func runMigrate(args *docopt.Args) error { deleteFiles := args.Bool["--delete"] concurrency, err := strconv.Atoi(args.String["--concurrency"]) if err != nil { return err } if concurrency < 1 { concurrency = 4 } prefix := args.String["--prefix"] db := postgres.Wait(nil, nil) repo, err := data.NewFileRepoFromEnv(db) if err != nil { return nil } files, err := repo.ListFilesExcludingDefaultBackend(prefix) if err != nil { return nil } var wg sync.WaitGroup tokens := make(chan struct{}, concurrency) var errorCount int64 dest := repo.DefaultBackend().Name() for i, f := range files { log.Printf("[%d/%d] Moving %s (%s, %d bytes) from %s to %s", i+1, len(files), f.FileInfo.Name, f.ID, f.Size, f.Backend.Name(), dest) tokens <- struct{}{} wg.Add(1) go func(f data.BackendFile) { if err := moveFile(db, repo, f, deleteFiles); err != nil { log.Printf("Error moving %s (%s): %s", f.FileInfo.Name, f.ID, err) atomic.AddInt64(&errorCount, 1) } <-tokens wg.Done() }(f) } wg.Wait() db.Close() if errorCount > 0 { return fmt.Errorf("Finished with %d errors", errorCount) } log.Printf("Finished with no errors.") return nil }
func main() { log := logger.New("fn", "main") log.Info("creating controller client") client, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal() } log.Info("connecting to postgres") db := postgres.Wait("", "") log.Info("creating postgres connection pool") pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ ConnConfig: pgx.ConnConfig{ Host: os.Getenv("PGHOST"), User: os.Getenv("PGUSER"), Password: os.Getenv("PGPASSWORD"), Database: os.Getenv("PGDATABASE"), }, AfterConnect: que.PrepareStatements, MaxConnections: workerCount, }) if err != nil { log.Error("error creating postgres connection pool", "err", err) shutdown.Fatal() } shutdown.BeforeExit(func() { pgxpool.Close() }) workers := que.NewWorkerPool( que.NewClient(pgxpool), que.WorkMap{ "deployment": deployment.JobHandler(db, client, logger), "app_deletion": app_deletion.JobHandler(db, client, logger), }, workerCount, ) workers.Interval = 5 * time.Second log.Info("starting workers", "count", workerCount, "interval", workers.Interval) workers.Start() shutdown.BeforeExit(func() { workers.Shutdown() }) select {} // block and keep running }
func migrate() error { db := postgres.Wait(nil, nil) slugbuilder, err := getSlugbuilderArtifact(db) if err != nil { log.Printf("error getting slugbuilder artifact: %s", err) return err } artifacts, err := getActiveSlugArtifacts(db) if err != nil { log.Printf("error getting active slug artifacts: %s", err) return err } log.Printf("converting %d active slugs to Flynn images", len(artifacts)) for i, artifact := range artifacts { log.Printf("converting slug %s (%d/%d)", artifact.ID, i+1, len(artifacts)) newID, err := convert(slugbuilder, artifact.URI) if err != nil { if err == ErrNotFound { log.Printf("skipping slug %s (%d/%d): slug no longer exists", artifact.ID, i+1, len(artifacts)) continue } log.Printf("error converting slug %s (%d/%d): %s", artifact.ID, i+1, len(artifacts), err) return err } tx, err := db.Begin() if err != nil { return err } if err := tx.Exec(`UPDATE release_artifacts SET artifact_id = $1 WHERE artifact_id = $2`, newID, artifact.ID); err != nil { tx.Rollback() return err } if err := tx.Exec(`UPDATE artifacts SET deleted_at = now() WHERE artifact_id = $1`, artifact.ID); err != nil { tx.Rollback() return err } if err := tx.Commit(); err != nil { return err } } return nil }
func main() { deleteFiles := flag.Bool("delete", false, "enable deletion of files from source backend") concurrency := flag.Int("concurrency", 4, "number of parallel file moves to run at a time") prefix := flag.String("prefix", "", "only migrate files with a name that starts with this prefix") flag.Parse() db := postgres.Wait(nil, nil) repo, err := data.NewFileRepoFromEnv(db) if err != nil { log.Fatal(err) } files, err := repo.ListFilesExcludingDefaultBackend(*prefix) if err != nil { log.Fatal(err) } var wg sync.WaitGroup tokens := make(chan struct{}, *concurrency) var errorCount int64 dest := repo.DefaultBackend().Name() for i, f := range files { log.Printf("[%d/%d] Moving %s (%s, %d bytes) from %s to %s", i+1, len(files), f.FileInfo.Name, f.ID, f.Size, f.Backend.Name(), dest) tokens <- struct{}{} wg.Add(1) go func(f data.BackendFile) { if err := moveFile(db, repo, f, *deleteFiles); err != nil { log.Printf("Error moving %s (%s): %s", f.FileInfo.Name, f.ID, err) atomic.AddInt64(&errorCount, 1) } <-tokens wg.Done() }(f) } wg.Wait() db.Close() if errorCount > 0 { log.Printf("Finished with %d errors", errorCount) os.Exit(1) } else { log.Printf("Finished with no errors.") } }
func runCleanup(args *docopt.Args) error { concurrency, err := strconv.Atoi(args.String["--concurrency"]) if err != nil { return err } if concurrency < 1 { concurrency = 4 } db := postgres.Wait(nil, nil) repo, err := data.NewFileRepoFromEnv(db) if err != nil { return err } files, err := repo.ListDeletedFilesForCleanup() if err != nil { return err } var wg sync.WaitGroup tokens := make(chan struct{}, concurrency) for i, f := range files { if f.Backend == nil { log.Printf("[%d/%d] Skipping %s (%s) because backend is not configured", i+1, len(files), f.FileInfo.Name, f.ID) continue } tokens <- struct{}{} wg.Add(1) go func(f data.BackendFile) { if err := f.Backend.Delete(nil, f.FileInfo); err == nil { log.Printf("[%d/%d] Successfully deleted %s (%s) from backend %s", i+1, len(files), f.FileInfo.Name, f.ID, f.Backend.Name()) } <-tokens wg.Done() }(f) } wg.Wait() db.Close() log.Printf("Done.") return nil }
func main() { defer shutdown.Exit() flag.Parse() addr := os.Getenv("PORT") if addr == "" { addr = *listenPort } addr = ":" + addr var fs Filesystem var storageDesc string if *storageDir != "" { fs = NewOSFilesystem(*storageDir) storageDesc = *storageDir } else { var err error db := postgres.Wait(nil, nil) fs, err = NewPostgresFilesystem(db) if err != nil { shutdown.Fatal(err) } storageDesc = "Postgres" } if *serviceDiscovery { hb, err := discoverd.AddServiceAndRegister("blobstore", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) } log.Println("Blobstore serving files on " + addr + " from " + storageDesc) mux := http.NewServeMux() mux.Handle("/", handler(fs)) mux.Handle(status.Path, status.Handler(fs.Status)) h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux)) shutdown.Fatal(http.ListenAndServe(addr, h)) }
func main() { concurrency := flag.Int("concurrency", 4, "number of parallel file deletions to run at a time") flag.Parse() db := postgres.Wait(nil, nil) repo, err := data.NewFileRepoFromEnv(db) if err != nil { log.Fatal(err) } files, err := repo.ListDeletedFilesForCleanup() if err != nil { log.Fatal(err) } var wg sync.WaitGroup tokens := make(chan struct{}, *concurrency) for i, f := range files { if f.Backend == nil { log.Printf("[%d/%d] Skipping %s (%s) because backend is not configured", i+1, len(files), f.FileInfo.Name, f.ID) continue } tokens <- struct{}{} wg.Add(1) go func(f data.BackendFile) { if err := f.Backend.Delete(nil, f.FileInfo); err == nil { log.Printf("[%d/%d] Successfully deleted %s (%s) from backend %s", i+1, len(files), f.FileInfo.Name, f.ID, f.Backend.Name()) } <-tokens wg.Done() }(f) } wg.Wait() db.Close() log.Printf("Done.") }
func migrate() error { db := postgres.Wait(nil, nil) artifacts, err := getActiveImageArtifacts(db) if err != nil { log.Printf("error getting active Docker image artifacts: %s", err) return err } log.Printf("converting %d active Docker images to Flynn images", len(artifacts)) for i, artifact := range artifacts { log.Printf("converting Docker image %s (%d/%d)", artifact.ID, i+1, len(artifacts)) newID, err := convert(artifact.URI) if err != nil { log.Printf("error converting Docker image %s (%d/%d): %s", artifact.ID, i+1, len(artifacts), err) return err } tx, err := db.Begin() if err != nil { return err } if err := tx.Exec(`UPDATE release_artifacts SET artifact_id = $1 WHERE artifact_id = $2`, newID, artifact.ID); err != nil { tx.Rollback() return err } if err := tx.Exec(`UPDATE artifacts SET deleted_at = now() WHERE artifact_id = $1`, artifact.ID); err != nil { tx.Rollback() return err } if err := tx.Commit(); err != nil { return err } } return nil }
func main() { defer shutdown.Exit() port := os.Getenv("PORT") if port == "" { port = "3000" } addr := ":" + port if seed := os.Getenv("NAME_SEED"); seed != "" { s, err := hex.DecodeString(seed) if err != nil { log.Fatalln("error decoding NAME_SEED:", err) } name.SetSeed(s) } db := postgres.Wait("", "") if err := migrateDB(db.DB); err != nil { shutdown.Fatal(err) } pgxcfg, err := pgx.ParseURI(fmt.Sprintf("http://%s:%s@%s/%s", os.Getenv("PGUSER"), os.Getenv("PGPASSWORD"), db.Addr(), os.Getenv("PGDATABASE"))) if err != nil { log.Fatal(err) } pgxcfg.Dial = dialer.Retry.Dial pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ ConnConfig: pgxcfg, AfterConnect: que.PrepareStatements, }) if err != nil { log.Fatal(err) } shutdown.BeforeExit(func() { pgxpool.Close() }) lc, err := logaggc.New("") if err != nil { shutdown.Fatal(err) } rc := routerc.New() hb, err := discoverd.DefaultClient.AddServiceAndRegisterInstance("flynn-controller", &discoverd.Instance{ Addr: addr, Proto: "http", Meta: map[string]string{ "AUTH_KEY": os.Getenv("AUTH_KEY"), }, }) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) handler := appHandler(handlerConfig{ db: db, cc: clusterClientWrapper{cluster.NewClient()}, lc: lc, rc: rc, pgxpool: pgxpool, keys: strings.Split(os.Getenv("AUTH_KEY"), ","), }) shutdown.Fatal(http.ListenAndServe(addr, handler)) }
func main() { log := logger.New("fn", "main") log.Info("creating controller client") client, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal() } log.Info("connecting to postgres") db := postgres.Wait("", "") log.Info("creating postgres connection pool") pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ ConnConfig: pgx.ConnConfig{ Host: os.Getenv("PGHOST"), User: os.Getenv("PGUSER"), Password: os.Getenv("PGPASSWORD"), Database: os.Getenv("PGDATABASE"), }, AfterConnect: que.PrepareStatements, MaxConnections: workerCount, }) if err != nil { log.Error("error creating postgres connection pool", "err", err) shutdown.Fatal() } shutdown.BeforeExit(func() { pgxpool.Close() }) go func() { status.AddHandler(func() status.Status { _, err := pgxpool.Exec("SELECT 1") if err != nil { return status.Unhealthy } return status.Healthy }) addr := ":" + os.Getenv("PORT") hb, err := discoverd.AddServiceAndRegister("flynn-controller-worker", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) shutdown.Fatal(http.ListenAndServe(addr, nil)) }() workers := que.NewWorkerPool( que.NewClient(pgxpool), que.WorkMap{ "deployment": deployment.JobHandler(db, client, logger), "app_deletion": app_deletion.JobHandler(db, client, logger), }, workerCount, ) workers.Interval = 5 * time.Second log.Info("starting workers", "count", workerCount, "interval", workers.Interval) workers.Start() shutdown.BeforeExit(func() { workers.Shutdown() }) select {} // block and keep running }
query := r.flynn("pg", "psql", "--", "-c", "SELECT * FROM foos") t.Assert(query, SuccessfulOutputContains, "foobar") t.Assert(r.flynn("resource", "remove", "postgres", id), Succeeds) } var sireniaPostgres = sireniaDatabase{ appName: "postgres", serviceKey: "FLYNN_POSTGRES", hostKey: "PGHOST", initDb: func(t *c.C, r *ct.Release, d *sireniaDeploy) { db := postgres.Wait(&postgres.Conf{ Discoverd: discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", routerIP)), Service: d.name, User: "******", Password: r.Env["PGPASSWORD"], Database: "postgres", }, nil) dbname := "deploy-test" t.Assert(db.Exec(fmt.Sprintf(`CREATE DATABASE "%s" WITH OWNER = "flynn"`, dbname)), c.IsNil) db.Close() db = postgres.Wait(&postgres.Conf{ Discoverd: discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", routerIP)), Service: d.name, User: "******", Password: r.Env["PGPASSWORD"], Database: dbname, }, nil) defer db.Close() t.Assert(db.Exec(`CREATE TABLE deploy_test ( data text)`), c.IsNil)
func main() { defer shutdown.Exit() var cookieKey *[32]byte if key := os.Getenv("COOKIE_KEY"); key != "" { res, err := base64.StdEncoding.DecodeString(key) if err != nil { shutdown.Fatalf("error decoding COOKIE_KEY: %s", err) } if len(res) != 32 { shutdown.Fatalf("decoded %d bytes from COOKIE_KEY, expected 32", len(res)) } var k [32]byte copy(k[:], res) cookieKey = &k } if cookieKey == nil { shutdown.Fatal("Missing random 32 byte base64-encoded COOKIE_KEY") } httpPort := flag.String("http-port", "8080", "http listen port") httpsPort := flag.String("https-port", "4433", "https listen port") tcpIP := flag.String("tcp-ip", os.Getenv("LISTEN_IP"), "tcp router listen ip") tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start") tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end") certFile := flag.String("tls-cert", "", "TLS (SSL) cert file in pem format") keyFile := flag.String("tls-key", "", "TLS (SSL) key file in pem format") apiPort := flag.String("api-port", "", "api listen port") flag.Parse() if *apiPort == "" { *apiPort = os.Getenv("PORT") if *apiPort == "" { *apiPort = "5000" } } keypair := tls.Certificate{} var err error if *certFile != "" { if keypair, err = tls.LoadX509KeyPair(*certFile, *keyFile); err != nil { shutdown.Fatal(err) } } else if tlsCert := os.Getenv("TLSCERT"); tlsCert != "" { if tlsKey := os.Getenv("TLSKEY"); tlsKey != "" { os.Setenv("TLSKEY", fmt.Sprintf("md5^(%s)", md5sum(tlsKey))) if keypair, err = tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)); err != nil { shutdown.Fatal(err) } } } log := logger.New("fn", "main") log.Info("connecting to postgres") db := postgres.Wait(nil, nil) log.Info("running DB migrations") if err := migrateDB(db); err != nil { shutdown.Fatal(err) } db.Close() log.Info("reconnecting to postgres with prepared queries") db = postgres.Wait(nil, schema.PrepareStatements) shutdown.BeforeExit(func() { db.Close() }) httpAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpPort) httpsAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpsPort) r := Router{ TCP: &TCPListener{ IP: *tcpIP, startPort: *tcpRangeStart, endPort: *tcpRangeEnd, ds: NewPostgresDataStore("tcp", db.ConnPool), discoverd: discoverd.DefaultClient, }, HTTP: &HTTPListener{ Addr: httpAddr, TLSAddr: httpsAddr, cookieKey: cookieKey, keypair: keypair, ds: NewPostgresDataStore("http", db.ConnPool), discoverd: discoverd.DefaultClient, }, } if err := r.Start(); err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(r.Close) apiAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *apiPort) log.Info("starting API listener") listener, err := listenFunc("tcp4", apiAddr) if err != nil { log.Error("error starting API listener", "err", err) shutdown.Fatal(listenErr{apiAddr, err}) } services := map[string]string{ "router-api": apiAddr, "router-http": httpAddr, } for service, addr := range services { log.Info("registering service", "name", service, "addr", addr) hb, err := discoverd.AddServiceAndRegister(service, addr) if err != nil { log.Error("error registering service", "name", service, "addr", addr, "err", err) shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) } log.Info("serving API requests") shutdown.Fatal(http.Serve(listener, apiHandler(&r))) }
func (s *PostgresSuite) testDeploy(t *c.C, d *pgDeploy) { // create postgres app client := s.controllerClient(t) app := &ct.App{Name: d.name, Strategy: "postgres"} t.Assert(client.CreateApp(app), c.IsNil) // copy release from default postgres app release, err := client.GetAppRelease("postgres") t.Assert(err, c.IsNil) release.ID = "" proc := release.Processes["postgres"] delete(proc.Env, "SINGLETON") proc.Env["FLYNN_POSTGRES"] = d.name proc.Service = d.name release.Processes["postgres"] = proc t.Assert(client.CreateRelease(release), c.IsNil) t.Assert(client.SetAppRelease(app.ID, release.ID), c.IsNil) oldRelease := release.ID // create formation discEvents := make(chan *discoverd.Event) discStream, err := s.discoverdClient(t).Service(d.name).Watch(discEvents) t.Assert(err, c.IsNil) defer discStream.Close() jobEvents := make(chan *ct.Job) jobStream, err := client.StreamJobEvents(d.name, jobEvents) t.Assert(err, c.IsNil) defer jobStream.Close() t.Assert(client.PutFormation(&ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"postgres": d.pgJobs, "web": d.webJobs}, }), c.IsNil) // watch cluster state changes type stateChange struct { state *state.State err error } stateCh := make(chan stateChange) go func() { for event := range discEvents { if event.Kind != discoverd.EventKindServiceMeta { continue } var state state.State if err := json.Unmarshal(event.ServiceMeta.Data, &state); err != nil { stateCh <- stateChange{err: err} return } primary := "" if state.Primary != nil { primary = state.Primary.Addr } sync := "" if state.Sync != nil { sync = state.Sync.Addr } var async []string for _, a := range state.Async { async = append(async, a.Addr) } debugf(t, "got pg cluster state: index=%d primary=%s sync=%s async=%s", event.ServiceMeta.Index, primary, sync, strings.Join(async, ",")) stateCh <- stateChange{state: &state} } }() // wait for correct cluster state and number of web processes var pgState state.State var webJobs int ready := func() bool { if webJobs != d.webJobs { return false } if pgState.Primary == nil { return false } if d.pgJobs > 1 && pgState.Sync == nil { return false } if d.pgJobs > 2 && len(pgState.Async) != d.pgJobs-2 { return false } return true } for { if ready() { break } select { case s := <-stateCh: t.Assert(s.err, c.IsNil) pgState = *s.state case e, ok := <-jobEvents: if !ok { t.Fatalf("job event stream closed: %s", jobStream.Err()) } debugf(t, "got job event: %s %s %s", e.Type, e.ID, e.State) if e.Type == "web" && e.State == "up" { webJobs++ } case <-time.After(30 * time.Second): t.Fatal("timed out waiting for postgres formation") } } // connect to the db so we can test writes db := postgres.Wait(d.name, fmt.Sprintf("dbname=postgres user=flynn password=%s", release.Env["PGPASSWORD"])) dbname := "deploy-test" t.Assert(db.Exec(fmt.Sprintf(`CREATE DATABASE "%s" WITH OWNER = "flynn"`, dbname)), c.IsNil) db.Close() db, err = postgres.Open(d.name, fmt.Sprintf("dbname=%s user=flynn password=%s", dbname, release.Env["PGPASSWORD"])) t.Assert(err, c.IsNil) defer db.Close() t.Assert(db.Exec(`CREATE TABLE deploy_test ( data text)`), c.IsNil) assertWriteable := func() { debug(t, "writing to postgres database") t.Assert(db.Exec(`INSERT INTO deploy_test (data) VALUES ('data')`), c.IsNil) } // check currently writeable assertWriteable() // check a deploy completes with expected cluster state changes release.ID = "" t.Assert(client.CreateRelease(release), c.IsNil) newRelease := release.ID deployment, err := client.CreateDeployment(app.ID, newRelease) t.Assert(err, c.IsNil) deployEvents := make(chan *ct.DeploymentEvent) deployStream, err := client.StreamDeployment(deployment, deployEvents) t.Assert(err, c.IsNil) defer deployStream.Close() // assertNextState checks that the next state received is in the remaining states // that were expected, so handles the fact that some states don't happen, but the // states that do happen are expected and in-order. assertNextState := func(remaining []expectedPgState) int { var state state.State loop: for { select { case s := <-stateCh: t.Assert(s.err, c.IsNil) if len(s.state.Async) < d.expectedAsyncs() { // we shouldn't usually receive states with less asyncs than // expected, but they can occur as an intermediate state between // two expected states (e.g. when a sync does a takeover at the // same time as a new async is started) so just ignore them. debug(t, "ignoring state with too few asyncs") continue } state = *s.state break loop case <-time.After(60 * time.Second): t.Fatal("timed out waiting for postgres cluster state") } } if state.Primary == nil { t.Fatal("no primary configured") } log := func(format string, v ...interface{}) { debugf(t, "skipping expected state: %s", fmt.Sprintf(format, v...)) } outer: for i, expected := range remaining { if state.Primary.Meta["FLYNN_RELEASE_ID"] != expected.Primary { log("primary has incorrect release") continue } if state.Sync == nil { if expected.Sync == "" { return i } log("state has no sync node") continue } if state.Sync.Meta["FLYNN_RELEASE_ID"] != expected.Sync { log("sync has incorrect release") continue } if state.Async == nil { if expected.Async == nil { return i } log("state has no async nodes") continue } if len(state.Async) != len(expected.Async) { log("expected %d asyncs, got %d", len(expected.Async), len(state.Async)) continue } for i, release := range expected.Async { if state.Async[i].Meta["FLYNN_RELEASE_ID"] != release { log("async[%d] has incorrect release", i) continue outer } } return i } t.Fatal("unexpected pg state") return -1 } expected := d.expected(oldRelease, newRelease) var expectedIndex, newWebJobs int loop: for { select { case e, ok := <-deployEvents: if !ok { t.Fatal("unexpected close of deployment event stream") } switch e.Status { case "complete": break loop case "failed": t.Fatalf("deployment failed: %s", e.Error) } debugf(t, "got deployment event: %s %s", e.JobType, e.JobState) if e.JobState != "up" && e.JobState != "down" { continue } switch e.JobType { case "postgres": // move on if we have seen all the expected events if expectedIndex >= len(expected) { continue } skipped := assertNextState(expected[expectedIndex:]) expectedIndex += 1 + skipped case "web": if e.JobState == "up" && e.ReleaseID == newRelease { newWebJobs++ } } case <-time.After(2 * time.Minute): t.Fatal("timed out waiting for deployment") } } // check we have the correct number of new web jobs t.Assert(newWebJobs, c.Equals, d.webJobs) // check writeable now deploy is complete assertWriteable() }
r.flynn("pg", "restore", "-f", file) query := r.flynn("pg", "psql", "--", "-c", "SELECT * FROM foos") t.Assert(query, SuccessfulOutputContains, "foobar") t.Assert(r.flynn("resource", "remove", "postgres", id), Succeeds) } var sireniaPostgres = sireniaDatabase{ appName: "postgres", serviceKey: "FLYNN_POSTGRES", hostKey: "PGHOST", initDb: func(t *c.C, r *ct.Release, d *sireniaDeploy) { db := postgres.Wait(&postgres.Conf{ Service: d.name, User: "******", Password: r.Env["PGPASSWORD"], Database: "postgres", }, nil) dbname := "deploy-test" t.Assert(db.Exec(fmt.Sprintf(`CREATE DATABASE "%s" WITH OWNER = "flynn"`, dbname)), c.IsNil) db.Close() db = postgres.Wait(&postgres.Conf{ Service: d.name, User: "******", Password: r.Env["PGPASSWORD"], Database: dbname, }, nil) defer db.Close() t.Assert(db.Exec(`CREATE TABLE deploy_test ( data text)`), c.IsNil) }, assertWriteable: func(t *c.C, r *ct.Release, d *sireniaDeploy) {