func runClusterMigrateDomain(args *docopt.Args) error { client, err := getClusterClient() if err != nil { shutdown.Fatal(err) } dm := &ct.DomainMigration{ Domain: args.String["<domain>"], } release, err := client.GetAppRelease("controller") if err != nil { return err } dm.OldDomain = release.Env["DEFAULT_ROUTE_DOMAIN"] if !promptYesNo(fmt.Sprintf("Migrate cluster domain from %q to %q?", dm.OldDomain, dm.Domain)) { fmt.Println("Aborted") return nil } maxDuration := 2 * time.Minute fmt.Printf("Migrating cluster domain (this can take up to %s)...\n", maxDuration) events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) if err != nil { return nil } defer stream.Close() if err := client.PutDomain(dm); err != nil { return err } timeout := time.After(maxDuration) for { select { case event, ok := <-events: if !ok { return stream.Err() } var e *ct.DomainMigrationEvent if err := json.Unmarshal(event.Data, &e); err != nil { return err } if e.Error != "" { fmt.Println(e.Error) } if e.DomainMigration.FinishedAt != nil { fmt.Printf("Changed cluster domain from %q to %q\n", dm.OldDomain, dm.Domain) return nil } case <-timeout: return errors.New("timed out waiting for domain migration to complete") } } }
func (f *ClusterFixer) FixController(instances []*discoverd.Instance, startScheduler bool) error { f.l.Info("found controller instance, checking critical formations") inst := instances[0] client, err := controller.NewClient("http://"+inst.Addr, inst.Meta["AUTH_KEY"]) if err != nil { return fmt.Errorf("unexpected error creating controller client: %s", err) } // check that formations for critical components are expected apps := []string{"controller", "router", "discoverd", "flannel", "postgres"} changes := make(map[string]*ct.Formation, len(apps)) var controllerFormation *ct.Formation for _, app := range apps { release, err := client.GetAppRelease(app) if err != nil { return fmt.Errorf("error getting %s release: %s", app, err) } formation, err := client.GetFormation(app, release.ID) if err != nil { // TODO: handle ErrNotFound return fmt.Errorf("error getting %s formation: %s", app, err) } if app == "controller" { controllerFormation = formation } for typ := range release.Processes { var want int if app == "postgres" && typ == "postgres" && len(f.hosts) > 1 && formation.Processes[typ] < 3 { want = 3 } else if formation.Processes[typ] < 1 { want = 1 } if want > 0 { f.l.Info("found broken formation", "app", app, "process", typ) if _, ok := changes[app]; !ok { if formation.Processes == nil { formation.Processes = make(map[string]int) } changes[app] = formation } changes[app].Processes[typ] = want } } } for app, formation := range changes { f.l.Info("fixing broken formation", "app", app) if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error putting %s formation: %s", app, err) } } if startScheduler { if err := f.StartScheduler(client, controllerFormation); err != nil { return err } } return nil }
func (s *CLISuite) TestDockerPush(t *c.C) { // build image with ENV and CMD repo := "cli-test-push" s.buildDockerImage(t, repo, `ENV FOO=BAR`, `CMD ["/bin/pingserv"]`, ) // create app client := s.controllerClient(t) app := &ct.App{Name: "cli-test-docker-push"} t.Assert(client.CreateApp(app), c.IsNil) // flynn docker push image t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds) // check app was released with correct env, meta and process type release, err := client.GetAppRelease(app.ID) t.Assert(err, c.IsNil) t.Assert(release.Env["FOO"], c.Equals, "BAR") t.Assert(release.Meta["docker-receive"], c.Equals, "true") t.Assert(release.Processes, c.HasLen, 1) proc, ok := release.Processes["app"] if !ok { t.Fatal(`release missing "app" process type`) } t.Assert(proc.Args, c.DeepEquals, []string{"/bin/pingserv"}) // check updated env vars are not overwritten // // need to remove the tag before pushing as we are using Docker 1.9 // which does not overwrite tags. // TODO: remove this when upgrading Docker > 1.9 u, err := url.Parse(s.clusterConf(t).DockerPushURL) t.Assert(err, c.IsNil) tag := fmt.Sprintf("%s/%s:latest", u.Host, app.Name) t.Assert(run(t, exec.Command("docker", "rmi", tag)), Succeeds) t.Assert(flynn(t, "/", "-a", app.Name, "env", "set", "FOO=BAZ"), Succeeds) t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds) t.Assert(flynn(t, "/", "-a", app.Name, "env", "get", "FOO"), Outputs, "BAZ\n") // check the release can be scaled up t.Assert(flynn(t, "/", "-a", app.Name, "scale", "app=1"), Succeeds) // check the job is reachable with the app's name in discoverd instances, err := s.discoverdClient(t).Instances(app.Name+"-web", 10*time.Second) t.Assert(err, c.IsNil) res, err := hh.RetryClient.Get("http://" + instances[0].Addr) t.Assert(err, c.IsNil) defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) t.Assert(err, c.IsNil) t.Assert(string(body), c.Equals, "OK") }
// CheckScale examines sirenia cluster formation to check if cluster // has been scaled up yet. // Returns true if scaled, false if not. func CheckScale(app, controllerKey, procName string, logger log15.Logger) (bool, error) { logger = logger.New("fn", "CheckScale") // Connect to controller. logger.Info("connecting to controller") client, err := controller.NewClient("", controllerKey) if err != nil { logger.Error("controller client error", "err", err) return false, err } // Retrieve app release. logger.Info("retrieving app release", "app", app) release, err := client.GetAppRelease(app) if err == controller.ErrNotFound { logger.Error("release not found", "app", app) return false, err } else if err != nil { logger.Error("get release error", "app", app, "err", err) return false, err } // Retrieve current formation. logger.Info("retrieving formation", "app", app, "release_id", release.ID) formation, err := client.GetFormation(app, release.ID) if err == controller.ErrNotFound { logger.Error("formation not found", "app", app, "release_id", release.ID) return false, err } else if err != nil { logger.Error("formation error", "app", app, "release_id", release.ID, "err", err) return false, err } // Database hasn't been scaled up yet if formation.Processes[procName] == 0 { return false, nil } return true, nil }
func (s *CLISuite) TestDockerExportImport(t *c.C) { // release via docker-receive client := s.controllerClient(t) app := &ct.App{Name: "cli-test-docker-export"} t.Assert(client.CreateApp(app), c.IsNil) repo := "cli-test-export" s.buildDockerImage(t, repo, `CMD ["/bin/pingserv"]`) t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "app=1"), Succeeds) defer flynn(t, "/", "-a", app.Name, "scale", "app=0") // export the app file := filepath.Join(t.MkDir(), "export.tar") t.Assert(flynn(t, "/", "-a", app.Name, "export", "-f", file), Succeeds) // delete the image from the registry release, err := client.GetAppRelease(app.Name) t.Assert(err, c.IsNil) artifact, err := client.GetArtifact(release.ImageArtifactID()) t.Assert(err, c.IsNil) u, err := url.Parse(s.clusterConf(t).DockerPushURL) t.Assert(err, c.IsNil) uri := fmt.Sprintf("http://%s/v2/%s/manifests/%s", u.Host, app.Name, artifact.Meta["docker-receive.digest"]) req, err := http.NewRequest("DELETE", uri, nil) req.SetBasicAuth("", s.clusterConf(t).Key) t.Assert(err, c.IsNil) res, err := http.DefaultClient.Do(req) t.Assert(err, c.IsNil) res.Body.Close() // import to another app importApp := "cli-test-docker-import" t.Assert(flynn(t, "/", "import", "--name", importApp, "--file", file), Succeeds) defer flynn(t, "/", "-a", importApp, "scale", "app=0") // wait for it to start _, err = s.discoverdClient(t).Instances(importApp+"-web", 10*time.Second) t.Assert(err, c.IsNil) }
func runClusterMigrateDomain(args *docopt.Args) error { cluster, err := getCluster() if err != nil { shutdown.Fatal(err) } client, err := cluster.Client() if err != nil { shutdown.Fatal(err) } dm := &ct.DomainMigration{ Domain: args.String["<domain>"], } release, err := client.GetAppRelease("controller") if err != nil { return err } dm.OldDomain = release.Env["DEFAULT_ROUTE_DOMAIN"] if !promptYesNo(fmt.Sprintf("Migrate cluster domain from %q to %q?", dm.OldDomain, dm.Domain)) { fmt.Println("Aborted") return nil } maxDuration := 2 * time.Minute fmt.Printf("Migrating cluster domain (this can take up to %s)...\n", maxDuration) events := make(chan *ct.Event) stream, err := client.StreamEvents(ct.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) if err != nil { return nil } defer stream.Close() if err := client.PutDomain(dm); err != nil { return err } timeout := time.After(maxDuration) for { select { case event, ok := <-events: if !ok { return stream.Err() } var e *ct.DomainMigrationEvent if err := json.Unmarshal(event.Data, &e); err != nil { return err } if e.Error != "" { fmt.Println(e.Error) } if e.DomainMigration.FinishedAt != nil { dm = e.DomainMigration fmt.Printf("Changed cluster domain from %q to %q\n", dm.OldDomain, dm.Domain) // update flynnrc cluster.TLSPin = dm.TLSCert.Pin cluster.ControllerURL = fmt.Sprintf("https://controller.%s", dm.Domain) cluster.GitURL = fmt.Sprintf("https://git.%s", dm.Domain) cluster.DockerPushURL = fmt.Sprintf("https://docker.%s", dm.Domain) if err := config.SaveTo(configPath()); err != nil { return fmt.Errorf("Error saving config: %s", err) } // update git config caFile, err := cfg.CACertFile(cluster.Name) if err != nil { return err } defer caFile.Close() if _, err := caFile.Write([]byte(dm.TLSCert.CACert)); err != nil { return err } if err := cfg.WriteGlobalGitConfig(cluster.GitURL, caFile.Name()); err != nil { return err } cfg.RemoveGlobalGitConfig(fmt.Sprintf("https://git.%s", dm.OldDomain)) // try to run "docker login" for the new domain, but just print a warning // if it fails so the user can fix it later if host, err := cluster.DockerPushHost(); err == nil { if err := dockerLogin(host, cluster.Key); err == ErrDockerTLSError { printDockerTLSWarning(host, caFile.Name()) } } dockerLogout(dm.OldDomain) fmt.Println("Updated local CLI configuration") return nil } case <-timeout: return errors.New("timed out waiting for domain migration to complete") } } }
// ScaleUp scales up a dormant Sirenia cluster func ScaleUp(app, controllerKey, serviceAddr, procName, singleton string, logger log15.Logger) error { logger = logger.New("fn", "ScaleUp") sc := sirenia.NewClient(serviceAddr) logger.Info("checking status", "host", serviceAddr) if status, err := sc.Status(); err == nil && status.Database != nil && status.Database.ReadWrite { logger.Info("database is up, skipping scale") // Skip the rest, the database is already available return nil } else if err != nil { logger.Info("error checking status", "err", err) } else { logger.Info("got status, but database is not read-write") } // Connect to controller. logger.Info("connecting to controller") client, err := controller.NewClient("", controllerKey) if err != nil { logger.Error("controller client error", "err", err) return err } // Retrieve the app release. logger.Info("retrieving app release", "app", app) release, err := client.GetAppRelease(app) if err == controller.ErrNotFound { logger.Error("release not found", "app", app) return errors.New("release not found") } else if err != nil { logger.Error("get release error", "app", app, "err", err) return err } // Retrieve current formation. logger.Info("retrieving formation", "app", app, "release_id", release.ID) formation, err := client.GetFormation(app, release.ID) if err == controller.ErrNotFound { logger.Error("formation not found", "app", app, "release_id", release.ID) return errors.New("formation not found") } else if err != nil { logger.Error("formation error", "app", app, "release_id", release.ID, "err", err) return err } // If database is running then exit. if formation.Processes[procName] > 0 { logger.Info("database is running, scaling not necessary") return nil } // Copy processes and increase database processes. processes := make(map[string]int, len(formation.Processes)) for k, v := range formation.Processes { processes[k] = v } if singleton == "true" { processes[procName] = 1 } else { processes[procName] = 3 } // Update formation. logger.Info("updating formation", "app", app, "release_id", release.ID) formation.Processes = processes if err := client.PutFormation(formation); err != nil { logger.Error("put formation error", "app", app, "release_id", release.ID, "err", err) return err } if err := sc.WaitForReadWrite(5 * time.Minute); err != nil { logger.Error("wait for read write", "err", err) return errors.New("timed out while starting sirenia cluster") } logger.Info("scaling complete") return nil }
func runClusterBackup(args *docopt.Args) error { client, err := getClusterClient() if err != nil { return err } var bar *pb.ProgressBar if term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() } var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() dest = f } fmt.Fprintln(os.Stderr, "Creating cluster backup...") tw := NewTarWriter("flynn-backup-"+time.Now().UTC().Format("2006-01-02_150405"), dest) defer tw.Close() // get app and release details for key apps data := make(map[string]*ct.ExpandedFormation, 4) for _, name := range []string{"postgres", "discoverd", "flannel", "controller"} { app, err := client.GetApp(name) if err != nil { return fmt.Errorf("error getting %s app details: %s", name, err) } release, err := client.GetAppRelease(app.ID) if err != nil { return fmt.Errorf("error getting %s app release: %s", name, err) } formation, err := client.GetFormation(app.ID, release.ID) if err != nil { return fmt.Errorf("error getting %s app formation: %s", name, err) } artifact, err := client.GetArtifact(release.ArtifactID) if err != nil { return fmt.Errorf("error getting %s app artifact: %s", name, err) } data[name] = &ct.ExpandedFormation{ App: app, Release: release, Artifact: artifact, Processes: formation.Processes, } } if err := tw.WriteJSON("flynn.json", data); err != nil { return err } config := &runConfig{ App: "postgres", Release: data["postgres"].Release.ID, Entrypoint: []string{"sh"}, Args: []string{"-c", "pg_dumpall --clean --if-exists | gzip -9"}, Env: map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGPASSWORD": data["postgres"].Release.Env["PGPASSWORD"], }, DisableLog: true, } if err := tw.WriteCommandOutput(client, "postgres.sql.gz", config, bar); err != nil { return fmt.Errorf("error dumping database: %s", err) } if bar != nil { bar.Finish() } fmt.Fprintln(os.Stderr, "Backup complete.") return nil }
func (s *DomainMigrationSuite) migrateDomain(t *c.C, dm *ct.DomainMigration) { debugf(t, "migrating domain from %s to %s", dm.OldDomain, dm.Domain) client := s.controllerClient(t) events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) t.Assert(err, c.IsNil) defer stream.Close() prevRouterRelease, err := client.GetAppRelease("router") t.Assert(err, c.IsNil) err = client.PutDomain(dm) t.Assert(err, c.IsNil) waitEvent := func(typ string, timeout time.Duration) (event ct.DomainMigrationEvent) { debugf(t, "waiting for %s domain migration event", typ) var e *ct.Event var ok bool select { case e, ok = <-events: if !ok { t.Fatal("event stream closed unexpectedly") } debugf(t, "got %s domain migration event", typ) case <-time.After(timeout): t.Fatalf("timed out waiting for %s domain migration event", typ) } t.Assert(e.Data, c.NotNil) t.Assert(json.Unmarshal(e.Data, &event), c.IsNil) return } // created event := waitEvent("initial", 2*time.Minute) t.Assert(event.Error, c.Equals, "") t.Assert(event.DomainMigration, c.NotNil) t.Assert(event.DomainMigration.ID, c.Equals, dm.ID) t.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain) t.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain) t.Assert(event.DomainMigration.TLSCert, c.IsNil) t.Assert(event.DomainMigration.OldTLSCert, c.NotNil) t.Assert(event.DomainMigration.CreatedAt, c.NotNil) t.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true) t.Assert(event.DomainMigration.FinishedAt, c.IsNil) // complete event = waitEvent("final", 3*time.Minute) t.Assert(event.Error, c.Equals, "") t.Assert(event.DomainMigration, c.NotNil) t.Assert(event.DomainMigration.ID, c.Equals, dm.ID) t.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain) t.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain) t.Assert(event.DomainMigration.TLSCert, c.NotNil) t.Assert(event.DomainMigration.OldTLSCert, c.NotNil) t.Assert(event.DomainMigration.CreatedAt, c.NotNil) t.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true) t.Assert(event.DomainMigration.FinishedAt, c.NotNil) cert := event.DomainMigration.TLSCert controllerRelease, err := client.GetAppRelease("controller") t.Assert(err, c.IsNil) t.Assert(controllerRelease.Env["DEFAULT_ROUTE_DOMAIN"], c.Equals, dm.Domain) t.Assert(controllerRelease.Env["CA_CERT"], c.Equals, cert.CACert) routerRelease, err := client.GetAppRelease("router") t.Assert(err, c.IsNil) t.Assert(routerRelease.Env["TLSCERT"], c.Equals, cert.Cert) t.Assert(routerRelease.Env["TLSKEY"], c.Not(c.Equals), "") t.Assert(routerRelease.Env["TLSKEY"], c.Not(c.Equals), prevRouterRelease.Env["TLSKEY"]) dashboardRelease, err := client.GetAppRelease("dashboard") t.Assert(err, c.IsNil) t.Assert(dashboardRelease.Env["DEFAULT_ROUTE_DOMAIN"], c.Equals, dm.Domain) t.Assert(dashboardRelease.Env["CONTROLLER_DOMAIN"], c.Equals, fmt.Sprintf("controller.%s", dm.Domain)) t.Assert(dashboardRelease.Env["URL"], c.Equals, fmt.Sprintf("dashboard.%s", dm.Domain)) t.Assert(dashboardRelease.Env["CA_CERT"], c.Equals, cert.CACert) var doPing func(string, int) doPing = func(component string, retriesRemaining int) { url := fmt.Sprintf("http://%s.%s/ping", component, dm.Domain) res, err := (&http.Client{}).Get(url) if (err != nil || res.StatusCode != 200) && retriesRemaining > 0 { time.Sleep(100 * time.Millisecond) doPing(component, retriesRemaining-1) return } t.Assert(err, c.IsNil) t.Assert(res.StatusCode, c.Equals, 200, c.Commentf("failed to ping %s", component)) } doPing("controller", 3) doPing("dashboard", 3) }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) var manifestSteps []struct { ID string Artifact struct { URI string } Release struct { Env map[string]string } } if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } artifactURIs := make(map[string]string) for _, step := range manifestSteps { if step.Artifact.URI != "" { artifactURIs[step.ID] = step.Artifact.URI if step.ID == "gitreceive" { artifactURIs["slugbuilder"] = step.Release.Env["SLUGBUILDER_IMAGE_URI"] artifactURIs["slugrunner"] = step.Release.Env["SLUGRUNNER_IMAGE_URI"] } // update current artifact in database for service sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE artifact_id = (SELECT artifact_id FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = '%s'));`, step.Artifact.URI, step.ID)) } } data.Discoverd.Artifact.URI = artifactURIs["discoverd"] data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.Artifact.URI = artifactURIs["postgres"] data.Flannel.Artifact.URI = artifactURIs["flannel"] data.Controller.Artifact.URI = artifactURIs["controller"] if data.MariaDB != nil { data.MariaDB.Artifact.URI = artifactURIs["mariadb"] if data.MariaDB.Processes["mariadb"] == 0 { // skip mariadb if it wasn't scaled up in the backup data.MariaDB = nil } } sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE uri = (SELECT env->>'SLUGRUNNER_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifactURIs["slugrunner"])) for _, app := range []string{"gitreceive", "taffy"} { for _, env := range []string{"slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, '%s_IMAGE_URI', '%s') WHERE release_id = (SELECT release_id from apps WHERE name = '%s');`, strings.ToUpper(env), artifactURIs[env], app)) } } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // start discoverd/flannel/postgres/mariadb cfg.Singleton = data.Postgres.Release.Env["SINGLETON"] == "true" systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "wait", &bootstrap.WaitAction{ URL: "http://postgres-api.discoverd/ping", }), } // Only run up MariaDB if it's in the backup if data.MariaDB != nil { systemSteps = append(systemSteps, step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, })) systemSteps = append(systemSteps, step("mariadb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mariadb-api.discoverd/ping", })) } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd') `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.Postgres.Artifact.Type, URI: data.Postgres.Artifact.URI}, nil) cmd.Entrypoint = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} var mysqldb io.Reader if data.MariaDB != nil { mysqldb, err = getFile("mysql.sql.gz") if err != nil { return err } } // load data into mariadb if it was present in the backup. if mysqldb != nil && data.MariaDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MariaDB.Artifact.Type, URI: data.MariaDB.Artifact.URI}, nil) cmd.Entrypoint = []string{"mysql"} cmd.Cmd = []string{"-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } // get blobstore config client, err := controller.NewClient("http://"+controllerInstances[0].Addr, data.Controller.Release.Env["AUTH_KEY"]) if err != nil { return err } blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(data.Controller.Release.Env["AUTH_KEY"]) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start blobstore, scheduler, and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 404, }), step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } return nil }
func run() error { client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { return fmt.Errorf("Unable to connect to controller: %s", err) } usage := ` Usage: flynn-receiver <app> <rev> [-e <var>=<val>]... [-m <key>=<val>]... Options: -e,--env <var>=<val> -m,--meta <key>=<val> `[1:] args, _ := docopt.Parse(usage, nil, true, version.String(), false) appName := args.String["<app>"] env, err := parsePairs(args, "--env") if err != nil { return err } meta, err := parsePairs(args, "--meta") if err != nil { return err } slugBuilder, err := client.GetArtifact(os.Getenv("SLUGBUILDER_IMAGE_ID")) if err != nil { return fmt.Errorf("Error getting slugbuilder image: %s", err) } slugRunnerID := os.Getenv("SLUGRUNNER_IMAGE_ID") if _, err := client.GetArtifact(slugRunnerID); err != nil { return fmt.Errorf("Error getting slugrunner image: %s", err) } app, err := client.GetApp(appName) if err == controller.ErrNotFound { return fmt.Errorf("Unknown app %q", appName) } else if err != nil { return fmt.Errorf("Error retrieving app: %s", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { return fmt.Errorf("Error getting current app release: %s", err) } fmt.Printf("-----> Building %s...\n", app.Name) slugImageID := random.UUID() jobEnv := map[string]string{ "BUILD_CACHE_URL": fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID), "CONTROLLER_KEY": os.Getenv("CONTROLLER_KEY"), "SLUG_IMAGE_ID": slugImageID, } if buildpackURL, ok := env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } else if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} { if v := os.Getenv(k); v != "" { jobEnv[k] = v } } job := &host.Job{ Config: host.ContainerConfig{ Args: []string{"/builder/build.sh"}, Env: jobEnv, Stdin: true, DisableLog: true, }, Partition: "background", Metadata: map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.app_name": app.Name, "flynn-controller.release": prevRelease.ID, "flynn-controller.type": "slugbuilder", }, Resources: resource.Defaults(), } if sb, ok := prevRelease.Processes["slugbuilder"]; ok { job.Resources = sb.Resources } else if rawLimit := os.Getenv("SLUGBUILDER_DEFAULT_MEMORY_LIMIT"); rawLimit != "" { if limit, err := resource.ParseLimit(resource.TypeMemory, rawLimit); err == nil { job.Resources[resource.TypeMemory] = resource.Spec{Limit: &limit, Request: &limit} } } cmd := exec.Job(slugBuilder, job) cmd.Volumes = []*ct.VolumeReq{{Path: "/tmp", DeleteOnStop: true}} var output bytes.Buffer cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr releaseEnv := make(map[string]string, len(env)) if prevRelease.Env != nil { for k, v := range prevRelease.Env { releaseEnv[k] = v } } for k, v := range env { releaseEnv[k] = v } if len(releaseEnv) > 0 { stdin, err := cmd.StdinPipe() if err != nil { return err } go func() { if err := appendEnvDir(os.Stdin, stdin, releaseEnv); err != nil { log.Fatalln("ERROR:", err) } }() } else { cmd.Stdin = os.Stdin } shutdown.BeforeExit(func() { cmd.Kill() }) if err := cmd.Run(); err != nil { return fmt.Errorf("Build failed: %s", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") release := &ct.Release{ ArtifactIDs: []string{slugRunnerID, slugImageID}, Env: releaseEnv, Meta: prevRelease.Meta, } if release.Meta == nil { release.Meta = make(map[string]string, len(meta)) } for k, v := range meta { release.Meta[k] = v } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Args = []string{"/runner/init", "start", t} if (t == "web" || strings.HasSuffix(t, "-web")) && proc.Service == "" { proc.Service = app.Name + "-" + t proc.Ports = []ct.Port{{ Port: 8080, Proto: "tcp", Service: &host.Service{ Name: proc.Service, Create: true, Check: &host.HealthCheck{Type: "tcp"}, }, }} } procs[t] = proc } if sb, ok := prevRelease.Processes["slugbuilder"]; ok { procs["slugbuilder"] = sb } release.Processes = procs if err := client.CreateRelease(release); err != nil { return fmt.Errorf("Error creating release: %s", err) } if err := client.DeployAppRelease(app.Name, release.ID, nil); err != nil { return fmt.Errorf("Error deploying app release: %s", err) } // if the app has a web job and has not been scaled before, create a // web=1 formation and wait for the "APPNAME-web" service to start // (whilst also watching job events so the deploy fails if the job // crashes) if needsDefaultScale(app.ID, prevRelease.ID, procs, client) { fmt.Println("=====> Scaling initial release to web=1") formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } jobEvents := make(chan *ct.Job) jobStream, err := client.StreamJobEvents(app.ID, jobEvents) if err != nil { return fmt.Errorf("Error streaming job events: %s", err) } defer jobStream.Close() serviceEvents := make(chan *discoverd.Event) serviceStream, err := discoverd.NewService(app.Name + "-web").Watch(serviceEvents) if err != nil { return fmt.Errorf("Error streaming service events: %s", err) } defer serviceStream.Close() if err := client.PutFormation(formation); err != nil { return fmt.Errorf("Error putting formation: %s", err) } fmt.Println("-----> Waiting for initial web job to start...") err = func() error { for { select { case e, ok := <-serviceEvents: if !ok { return fmt.Errorf("Service stream closed unexpectedly: %s", serviceStream.Err()) } if e.Kind == discoverd.EventKindUp && e.Instance.Meta["FLYNN_RELEASE_ID"] == release.ID { fmt.Println("=====> Initial web job started") return nil } case e, ok := <-jobEvents: if !ok { return fmt.Errorf("Job stream closed unexpectedly: %s", jobStream.Err()) } if e.State == ct.JobStateDown { return errors.New("Initial web job failed to start") } case <-time.After(time.Duration(app.DeployTimeout) * time.Second): return errors.New("Timed out waiting for initial web job to start") } } }() if err != nil { fmt.Println("-----> WARN: scaling initial release down to web=0 due to error") formation.Processes["web"] = 0 if err := client.PutFormation(formation); err != nil { // just print this error and return the original error fmt.Println("-----> WARN: could not scale the initial release down (it may continue to run):", err) } return err } } fmt.Println("=====> Application deployed") return nil }
func (s *CLISuite) TestExportImport(t *c.C) { srcApp := "app-export" + random.String(8) dstApp := "app-import" + random.String(8) // create app r := s.newGitRepo(t, "http") t.Assert(r.flynn("create", srcApp), Succeeds) // exporting the app without a release should work file := filepath.Join(t.MkDir(), "export.tar") t.Assert(r.flynn("export", "-f", file), Succeeds) assertExportContains(t, file, "app.json", "routes.json") // exporting the app with an artifact-less release should work t.Assert(r.flynn("env", "set", "FOO=BAR"), Succeeds) t.Assert(r.flynn("export", "-f", file), Succeeds) assertExportContains(t, file, "app.json", "routes.json", "release.json") // release the app and provision some dbs t.Assert(r.git("push", "flynn", "master"), Succeeds) t.Assert(r.flynn("resource", "add", "postgres"), Succeeds) t.Assert(r.flynn("pg", "psql", "--", "-c", "CREATE table foos (data text); INSERT INTO foos (data) VALUES ('foobar')"), Succeeds) t.Assert(r.flynn("resource", "add", "mysql"), Succeeds) t.Assert(r.flynn("mysql", "console", "--", "-e", "CREATE TABLE foos (data TEXT); INSERT INTO foos (data) VALUES ('foobar')"), Succeeds) // grab the slug details client := s.controllerClient(t) release, err := client.GetAppRelease(srcApp) t.Assert(err, c.IsNil) artifact, err := client.GetArtifact(release.ArtifactIDs[1]) t.Assert(err, c.IsNil) slugLayer := artifact.Manifest().Rootfs[0].Layers[0] // export app t.Assert(r.flynn("export", "-f", file), Succeeds) assertExportContains(t, file, "app.json", "routes.json", "release.json", "artifacts.json", slugLayer.ID+".layer", "formation.json", "postgres.dump", "mysql.dump", ) // remove db tables from source app t.Assert(r.flynn("pg", "psql", "--", "-c", "DROP TABLE foos"), Succeeds) t.Assert(r.flynn("mysql", "console", "--", "-e", "DROP TABLE foos"), Succeeds) // remove the git remote t.Assert(r.git("remote", "remove", "flynn"), Succeeds) // import app t.Assert(r.flynn("import", "--name", dstApp, "--file", file), Succeeds) // test dbs were imported query := r.flynn("-a", dstApp, "pg", "psql", "--", "-c", "SELECT * FROM foos") t.Assert(query, SuccessfulOutputContains, "foobar") query = r.flynn("-a", dstApp, "mysql", "console", "--", "-e", "SELECT * FROM foos") t.Assert(query, SuccessfulOutputContains, "foobar") // wait for it to start _, err = s.discoverdClient(t).Instances(dstApp+"-web", 10*time.Second) t.Assert(err, c.IsNil) }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, MongoDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) type manifestStep struct { ID string Artifacts []*ct.Artifact Artifact *ct.Artifact Release struct { Env map[string]string Processes map[string]ct.ProcessType } } var manifestSteps []*manifestStep if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } manifestStepMap := make(map[string]bootstrap.Step, len(manifestSteps)) steps, err := bootstrap.UnmarshalManifest(manifest, nil) if err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } for _, step := range steps { manifestStepMap[step.StepMeta.ID] = step } artifacts := make(map[string]*ct.Artifact) updateProcArgs := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Args = proc.Args f.Release.Processes[typ] = p } } updateVolumes := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Volumes = proc.Volumes f.Release.Processes[typ] = p } } for _, step := range manifestSteps { switch step.ID { case "discoverd": updateVolumes(data.Discoverd, step) case "postgres": updateProcArgs(data.Postgres, step) updateVolumes(data.Postgres, step) case "controller": updateProcArgs(data.Controller, step) case "mariadb": if data.MariaDB != nil { updateProcArgs(data.MariaDB, step) updateVolumes(data.MariaDB, step) } case "mongodb": if data.MongoDB != nil { updateProcArgs(data.MongoDB, step) updateVolumes(data.MongoDB, step) } } if step.Artifact != nil { artifacts[step.ID] = step.Artifact } else if len(step.Artifacts) > 0 { artifacts[step.ID] = step.Artifacts[0] } } data.Discoverd.Artifacts = []*ct.Artifact{artifacts["discoverd"]} data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.Artifacts = []*ct.Artifact{artifacts["postgres"]} data.Flannel.Artifacts = []*ct.Artifact{artifacts["flannel"]} data.Controller.Artifacts = []*ct.Artifact{artifacts["controller"]} if data.MariaDB != nil { data.MariaDB.Artifacts = []*ct.Artifact{artifacts["mariadb"]} } if data.MongoDB != nil { data.MongoDB.Artifacts = []*ct.Artifact{artifacts["mongodb"]} } // set TELEMETRY_CLUSTER_ID telemetryClusterID := random.UUID() sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{TELEMETRY_CLUSTER_ID}', '%q') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'controller'); `, telemetryClusterID)) data.Controller.Release.Env["TELEMETRY_CLUSTER_ID"] = telemetryClusterID // set TELEMETRY_BOOTSTRAP_ID if unset if data.Controller.Release.Env["TELEMETRY_BOOTSTRAP_ID"] == "" { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{TELEMETRY_BOOTSTRAP_ID}', '%q') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'controller'); `, telemetryClusterID)) data.Controller.Release.Env["TELEMETRY_BOOTSTRAP_ID"] = telemetryClusterID } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // ensure flannel has NETWORK set if required if network := os.Getenv("FLANNEL_NETWORK"); network != "" { data.Flannel.Release.Env["NETWORK"] = network sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'NETWORK', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'flannel'); `, network)) } // ensure controller / gitreceive have tmp volumes sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{web,volumes}', '[{"path": "/tmp", "delete_on_stop": true}]') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'controller'); UPDATE releases SET processes = jsonb_set(processes, '{app,volumes}', '[{"path": "/tmp", "delete_on_stop": true}]') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'gitreceive'); `) // update the SINGLETON environment variable for database appliances // (which includes updating legacy appliances which had SINGLETON set // on the database type rather than the release) singleton := strconv.FormatBool(cfg.Singleton) data.Postgres.Release.Env["SINGLETON"] = singleton sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{SINGLETON}', '%q') WHERE release_id IN (SELECT release_id FROM apps WHERE name IN ('postgres', 'mariadb', 'mongodb')); `, singleton)) if data.MariaDB != nil { data.MariaDB.Release.Env["SINGLETON"] = singleton delete(data.MariaDB.Release.Processes["mariadb"].Env, "SINGLETON") sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{mariadb,env}', (processes #> '{mariadb,env}')::jsonb - 'SINGLETON') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'mariadb'); `) } if data.MongoDB != nil { data.MongoDB.Release.Env["SINGLETON"] = singleton delete(data.MongoDB.Release.Processes["mongodb"].Env, "SINGLETON") sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{mongodb,env}', (processes #> '{mongodb,env}')::jsonb - 'SINGLETON') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'mongodb'); `) } // modify app scale based on whether we are booting // a singleton or HA cluster var scale map[string]map[string]int if cfg.Singleton { scale = map[string]map[string]int{ "postgres": {"postgres": 1, "web": 1}, "mariadb": {"web": 1}, "mongodb": {"web": 1}, "controller": {"web": 1, "worker": 1}, "redis": {"web": 1}, "blobstore": {"web": 1}, "gitreceive": {"app": 1}, "docker-receive": {"app": 1}, "logaggregator": {"app": 1}, "dashboard": {"web": 1}, "status": {"web": 1}, } data.Postgres.Processes["postgres"] = 1 data.Postgres.Processes["web"] = 1 if data.MariaDB != nil { data.MariaDB.Processes["mariadb"] = 1 data.MariaDB.Processes["web"] = 1 } if data.MongoDB != nil { data.MongoDB.Processes["mongodb"] = 1 data.MongoDB.Processes["web"] = 1 } } else { scale = map[string]map[string]int{ "postgres": {"postgres": 3, "web": 2}, "mariadb": {"web": 2}, "mongodb": {"web": 2}, "controller": {"web": 2, "worker": 2}, "redis": {"web": 2}, "blobstore": {"web": 2}, "gitreceive": {"app": 2}, "docker-receive": {"app": 2}, "logaggregator": {"app": 2}, "dashboard": {"web": 2}, "status": {"web": 2}, } data.Postgres.Processes["postgres"] = 3 data.Postgres.Processes["web"] = 2 if data.MariaDB != nil { data.MariaDB.Processes["mariadb"] = 3 data.MariaDB.Processes["web"] = 2 } if data.MongoDB != nil { data.MongoDB.Processes["mongodb"] = 3 data.MongoDB.Processes["web"] = 2 } } for app, procs := range scale { for typ, count := range procs { sqlBuf.WriteString(fmt.Sprintf(` UPDATE formations SET processes = jsonb_set(processes, '{%s}', '%d') WHERE release_id = (SELECT release_id FROM apps WHERE name = '%s'); `, typ, count, app)) } } // start discoverd/flannel/postgres systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "postgres", }), } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd'); `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // make sure STATUS_KEY has the correct value in the dashboard release sqlBuf.WriteString(` UPDATE releases SET env = jsonb_set(env, '{STATUS_KEY}', ( SELECT env->'AUTH_KEY' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'status') )) WHERE release_id = (SELECT release_id FROM apps WHERE name = 'dashboard'); `) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], artifacts["postgres"], nil) cmd.Args = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } controllerKey := data.Controller.Release.Env["AUTH_KEY"] client, err := controller.NewClient("http://"+controllerInstances[0].Addr, controllerKey) if err != nil { return err } // start mariadb and load data if it was present in the backup. mysqldb, err := getFile("mysql.sql.gz") if err == nil && data.MariaDB != nil { _, err = bootstrap.Manifest{ step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, }), step("mariadb-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "mariadb", }), }.RunWithState(ch, state) if err != nil { return err } // ensure the formation is correct in the database if err := client.PutFormation(data.MariaDB.Formation()); err != nil { return fmt.Errorf("error updating mariadb formation: %s", err) } cmd = exec.JobUsingHost(state.Hosts[0], artifacts["mariadb"], nil) cmd.Args = []string{"mysql", "-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start mongodb and load data if it was present in the backup. mongodb, err := getFile("mongodb.archive.gz") if err == nil && data.MongoDB != nil { _, err = bootstrap.Manifest{ step("mongodb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MongoDB, }), step("mongodb-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "mongodb", }), }.RunWithState(ch, state) if err != nil { return err } // ensure the formation is correct in the database if err := client.PutFormation(data.MongoDB.Formation()); err != nil { return fmt.Errorf("error updating mongodb formation: %s", err) } cmd = exec.JobUsingHost(state.Hosts[0], artifacts["mongodb"], nil) cmd.Args = []string{"mongorestore", "-h", "leader.mongodb.discoverd", "-u", "flynn", "-p", data.MongoDB.Release.Env["MONGO_PWD"], "--archive"} cmd.Stdin = mongodb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mongodb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mongodb restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // get blobstore config blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(controllerKey) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start the blobstore blobstoreFormation.Artifacts = []*ct.Artifact{artifacts["blobstore"]} _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 200, }), }.RunWithState(ch, state) if err != nil { return err } // now that the controller and blobstore are up and controller // migrations have run (so we know artifacts have a manifest column), // migrate all artifacts to Flynn images jsonb := func(v interface{}) []byte { data, _ := json.Marshal(v) return data } sqlBuf.Reset() for _, step := range manifestSteps { artifact, ok := artifacts[step.ID] if !ok { continue } // update current artifact in database for service sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s', type = 'flynn', manifest = '%s', hashes = '%s', size = %d, layer_url_template = '%s', meta = '%s' WHERE artifact_id = ( SELECT artifact_id FROM release_artifacts WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%s' ) );`, artifact.URI, jsonb(&artifact.RawManifest), jsonb(artifact.Hashes), artifact.Size, artifact.LayerURLTemplate, jsonb(artifact.Meta), step.ID)) } // create the slugbuilder artifact if gitreceive still references it by // URI (in which case there is no slugbuilder artifact in the database) slugBuilder := artifacts["slugbuilder-image"] sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGBUILDER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN INSERT INTO artifacts (artifact_id, type, uri, manifest, hashes, size, layer_url_template, meta) VALUES ('%s', 'flynn', '%s', '%s', '%s', %d, '%s', '%s'); END IF; END; $$;`, random.UUID(), slugBuilder.URI, jsonb(&slugBuilder.RawManifest), jsonb(slugBuilder.Hashes), slugBuilder.Size, slugBuilder.LayerURLTemplate, jsonb(slugBuilder.Meta))) // create the slugrunner artifact if it doesn't exist (which can be the // case if no apps were deployed with git push in older clusters where // it was created lazily) slugRunner := artifacts["slugrunner-image"] sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGRUNNER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN IF NOT EXISTS (SELECT 1 FROM artifacts WHERE uri = (SELECT env->>'SLUGRUNNER_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'))) THEN INSERT INTO artifacts (artifact_id, type, uri, manifest, hashes, size, layer_url_template, meta) VALUES ('%s', 'flynn', '%s', '%s', '%s', %d, '%s', '%s'); END IF; END IF; END; $$;`, random.UUID(), slugRunner.URI, jsonb(&slugRunner.RawManifest), jsonb(slugRunner.Hashes), slugRunner.Size, slugRunner.LayerURLTemplate, jsonb(slugRunner.Meta))) // update slug artifacts currently being referenced by gitreceive // (which will also update all current user releases to use the // latest slugrunner) for _, name := range []string{"slugbuilder", "slugrunner"} { artifact := artifacts[name+"-image"] sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%[1]s', type = 'flynn', manifest = '%[2]s', hashes = '%[3]s', size = %[4]d, layer_url_template = '%[5]s', meta = '%[6]s' WHERE artifact_id = (SELECT (env->>'%[7]s_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) OR uri = (SELECT env->>'%[7]s_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifact.URI, jsonb(&artifact.RawManifest), jsonb(artifact.Hashes), artifact.Size, artifact.LayerURLTemplate, jsonb(artifact.Meta), strings.ToUpper(name))) } // update the URI of redis artifacts currently being referenced by // the redis app (which will also update all current redis resources // to use the latest redis image) redisImage := artifacts["redis-image"] sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s', type = 'flynn', manifest = '%s', hashes = '%s', size = %d, layer_url_template = '%s', meta = '%s' WHERE artifact_id = (SELECT (env->>'REDIS_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis')) OR uri = (SELECT env->>'REDIS_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis'));`, redisImage.URI, jsonb(&redisImage.RawManifest), jsonb(redisImage.Hashes), redisImage.Size, redisImage.LayerURLTemplate, jsonb(redisImage.Meta))) // ensure the image ID environment variables are set for legacy apps // which use image URI variables for _, name := range []string{"redis", "slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{%[1]s_IMAGE_ID}', ('"' || (SELECT artifact_id::text FROM artifacts WHERE uri = '%[2]s') || '"')::jsonb, true) WHERE env->>'%[1]s_IMAGE_URI' IS NOT NULL;`, strings.ToUpper(name), artifacts[name+"-image"].URI)) } // run the above artifact migration SQL against the controller database cmd = exec.JobUsingHost(state.Hosts[0], artifacts["postgres"], nil) cmd.Args = []string{"psql", "--echo-queries"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } cmd.Stdin = sqlBuf meta = bootstrap.StepMeta{ID: "migrate-artifacts", Action: "migrate-artifacts"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating artifacts: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } // determine if there are any slugs or docker images which need to be // converted to Flynn images migrateSlugs := false migrateDocker := false artifactList, err := client.ArtifactList() if err != nil { return fmt.Errorf("error listing artifacts: %s", err) } for _, artifact := range artifactList { if artifact.Type == ct.DeprecatedArtifactTypeFile { migrateSlugs = true } if artifact.Type == ct.DeprecatedArtifactTypeDocker && artifact.Meta["docker-receive.repository"] != "" { migrateDocker = true } if migrateSlugs && migrateDocker { break } } runMigrator := func(cmd *exec.Cmd) error { out, err := cmd.StdoutPipe() if err != nil { return err } done := make(chan struct{}) go func() { defer close(done) s := bufio.NewScanner(out) for s.Scan() { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "info", StepData: s.Text(), Timestamp: time.Now().UTC(), } } }() err = cmd.Run() select { case <-done: case <-time.After(time.Second): } return err } if migrateSlugs { cmd = exec.JobUsingHost(state.Hosts[0], artifacts["slugbuilder-image"], nil) cmd.Args = []string{"/bin/slug-migrator"} cmd.Env = map[string]string{ "CONTROLLER_KEY": data.Controller.Release.Env["AUTH_KEY"], "FLYNN_POSTGRES": data.Controller.Release.Env["FLYNN_POSTGRES"], "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } if err := runMigrator(cmd); err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating slugs: %s", err), Err: err, Timestamp: time.Now().UTC(), } return err } } if migrateDocker { // start docker-receive dockerRelease, err := client.GetAppRelease("docker-receive") if err != nil { return fmt.Errorf("error getting docker-receive release: %s", err) } dockerFormation, err := client.GetExpandedFormation("docker-receive", dockerRelease.ID) if err != nil { return fmt.Errorf("error getting docker-receive expanded formation: %s", err) } dockerFormation.Artifacts = []*ct.Artifact{artifacts["docker-receive"]} _, err = bootstrap.Manifest{ step("docker-receive", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: dockerFormation, }), step("docker-receive-wait", "wait", &bootstrap.WaitAction{ URL: "http://docker-receive.discoverd/v2/", Status: 401, }), }.RunWithState(ch, state) if err != nil { return err } // run the docker image migrator cmd = exec.JobUsingHost(state.Hosts[0], artifacts["docker-receive"], nil) cmd.Args = []string{"/bin/docker-migrator"} cmd.Env = map[string]string{ "CONTROLLER_KEY": data.Controller.Release.Env["AUTH_KEY"], "FLYNN_POSTGRES": data.Controller.Release.Env["FLYNN_POSTGRES"], "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } cmd.Volumes = []*ct.VolumeReq{{Path: "/tmp", DeleteOnStop: true}} if err := runMigrator(cmd); err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating Docker images: %s", err), Err: err, Timestamp: time.Now().UTC(), } return err } } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start scheduler and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", Timeout: 600, }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } // mariadb and mongodb steps require the controller key state.StepData["controller-key"] = &bootstrap.RandomData{controllerKey} // deploy mariadb if it wasn't restored from the backup if data.MariaDB == nil { steps := bootstrap.Manifest{ manifestStepMap["mariadb-password"], manifestStepMap["mariadb"], manifestStepMap["add-mysql-provider"], manifestStepMap["mariadb-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying mariadb: %s", err) } } // deploy mongodb if it wasn't restored from the backup if data.MongoDB == nil { steps := bootstrap.Manifest{ manifestStepMap["mongodb-password"], manifestStepMap["mongodb"], manifestStepMap["add-mongodb-provider"], manifestStepMap["mongodb-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying mongodb: %s", err) } } // deploy docker-receive if it wasn't in the backup if _, err := client.GetApp("docker-receive"); err == controller.ErrNotFound { routes, err := client.RouteList("controller") if len(routes) == 0 { err = errors.New("no routes found") } if err != nil { return fmt.Errorf("error listing controller routes: %s", err) } for _, r := range routes { if r.Domain == fmt.Sprintf("controller.%s", data.Controller.Release.Env["DEFAULT_ROUTE_DOMAIN"]) { state.StepData["controller-cert"] = &tlscert.Cert{ Cert: r.Certificate.Cert, PrivateKey: r.Certificate.Key, } break } } steps := bootstrap.Manifest{ manifestStepMap["docker-receive-secret"], manifestStepMap["docker-receive"], manifestStepMap["docker-receive-route"], manifestStepMap["docker-receive-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying docker-receive: %s", err) } } return nil }
func (s *ZZBackupSuite) testClusterBackup(t *c.C, index int, path string) { debugf(t, "restoring cluster backup %s", filepath.Base(path)) // boot the cluster using an RFC 5737 TEST-NET IP, avoiding conflicts // with those used by script/bootstrap-flynn so the test can be run in // development ip := fmt.Sprintf("192.0.2.%d", index+100) device := fmt.Sprintf("eth0:%d", index+10) t.Assert(run(t, exec.Command("sudo", "ifconfig", device, ip)), Succeeds) dir := t.MkDir() debugf(t, "using tempdir %s", dir) debug(t, "starting flynn-host") cmd := exec.Command( "sudo", "../host/bin/flynn-host", "daemon", "--id", fmt.Sprintf("backup%d", index), "--external-ip", ip, "--listen-ip", ip, "--bridge-name", fmt.Sprintf("backupbr%d", index), "--state", filepath.Join(dir, "host-state.bolt"), "--volpath", filepath.Join(dir, "volumes"), "--log-dir", filepath.Join(dir, "logs"), "--flynn-init", "../host/bin/flynn-init", ) out, err := os.Create(filepath.Join(dir, "flynn-host.log")) t.Assert(err, c.IsNil) defer out.Close() cmd.Stdout = out cmd.Stderr = out t.Assert(cmd.Start(), c.IsNil) go cmd.Process.Wait() defer func() { // collect-debug-info if the tests failed then kill flynn-host if t.Failed() { cmd := exec.Command( "sudo", "-E", "../host/bin/flynn-host", "collect-debug-info", "--log-dir", filepath.Join(dir, "logs"), ) cmd.Env = []string{fmt.Sprintf("DISCOVERD=%s:1111", ip)} cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Run() } exec.Command("sudo", "kill", strconv.Itoa(cmd.Process.Pid)).Run() }() debugf(t, "bootstrapping flynn from backup") cmd = exec.Command( "../host/bin/flynn-host", "bootstrap", "--peer-ips", ip, "--from-backup", path, "../bootstrap/bin/manifest.json", ) cmd.Env = []string{ "CLUSTER_DOMAIN=1.localflynn.com", fmt.Sprintf("DISCOVERD=%s:1111", ip), fmt.Sprintf("FLANNEL_NETWORK=100.%d.0.0/16", index+101), } logR, logW := io.Pipe() defer logW.Close() go func() { buf := bufio.NewReader(logR) for { line, err := buf.ReadString('\n') if err != nil { return } debug(t, line[0:len(line)-1]) } }() cmd.Stdout = logW cmd.Stderr = logW t.Assert(cmd.Run(), c.IsNil) debug(t, "waiting for nodejs-web service") disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip)) _, err = disc.Instances("nodejs-web", 30*time.Second) t.Assert(err, c.IsNil) debug(t, "checking HTTP requests") req, err := http.NewRequest("GET", "http://"+ip, nil) t.Assert(err, c.IsNil) req.Host = "nodejs.1.localflynn.com" var res *http.Response // try multiple times in case we get a 503 from the router as it has // not seen the service yet err = attempt.Strategy{Total: 10 * time.Second, Delay: 100 * time.Millisecond}.Run(func() (err error) { res, err = http.DefaultClient.Do(req) if err != nil { return err } else if res.StatusCode == http.StatusServiceUnavailable { return errors.New("router returned 503") } return nil }) t.Assert(err, c.IsNil) t.Assert(res.StatusCode, c.Equals, http.StatusOK) debug(t, "getting app release") controllerInstances, err := disc.Instances("controller", 30*time.Second) t.Assert(err, c.IsNil) controllerURL := "http://" + controllerInstances[0].Addr controllerKey := controllerInstances[0].Meta["AUTH_KEY"] client, err := controller.NewClient(controllerURL, controllerKey) t.Assert(err, c.IsNil) release, err := client.GetAppRelease("nodejs") t.Assert(err, c.IsNil) debug(t, "configuring flynn CLI") flynnrc := filepath.Join(dir, ".flynnrc") conf := &config.Config{} t.Assert(conf.Add(&config.Cluster{ Name: "default", ControllerURL: controllerURL, Key: controllerKey, }, true), c.IsNil) t.Assert(conf.SaveTo(flynnrc), c.IsNil) flynn := func(cmdArgs ...string) *CmdResult { cmd := exec.Command(args.CLI, cmdArgs...) cmd.Env = flynnEnv(flynnrc) cmd.Env = append(cmd.Env, "FLYNN_APP=nodejs") return run(t, cmd) } if _, ok := release.Env["FLYNN_REDIS"]; ok { debug(t, "checking redis resource") // try multiple times as the Redis resource is not guaranteed to be up yet var redisResult *CmdResult err = attempt.Strategy{Total: 10 * time.Second, Delay: 100 * time.Millisecond}.Run(func() error { redisResult = flynn("redis", "redis-cli", "--", "PING") return redisResult.Err }) t.Assert(err, c.IsNil) t.Assert(redisResult, SuccessfulOutputContains, "PONG") } debug(t, "checking mysql resource") if _, ok := release.Env["FLYNN_MYSQL"]; ok { t.Assert(flynn("mysql", "console", "--", "-e", "SELECT * FROM foos"), SuccessfulOutputContains, "foobar") } else { t.Assert(flynn("resource", "add", "mysql"), Succeeds) } debug(t, "checking mongodb resource") if _, ok := release.Env["FLYNN_MONGO"]; ok { t.Assert(flynn("mongodb", "mongo", "--", "--eval", "db.foos.find()"), SuccessfulOutputContains, "foobar") } else { t.Assert(flynn("resource", "add", "mongodb"), Succeeds) } debug(t, "checking dashboard STATUS_KEY matches status AUTH_KEY") dashboardStatusKeyResult := flynn("-a", "dashboard", "env", "get", "STATUS_KEY") t.Assert(dashboardStatusKeyResult, Succeeds) statusAuthKeyResult := flynn("-a", "status", "env", "get", "AUTH_KEY") t.Assert(statusAuthKeyResult, Succeeds) t.Assert(dashboardStatusKeyResult.Output, c.Equals, statusAuthKeyResult.Output) }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, MongoDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) type manifestStep struct { ID string Artifact struct { URI string } Release struct { Env map[string]string Processes map[string]ct.ProcessType } } var manifestSteps []*manifestStep if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } artifactURIs := make(map[string]string) updateProcArgs := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Args = proc.Args f.Release.Processes[typ] = p } } for _, step := range manifestSteps { switch step.ID { case "postgres": updateProcArgs(data.Postgres, step) case "controller": updateProcArgs(data.Controller, step) case "mariadb": if data.MariaDB != nil { updateProcArgs(data.MariaDB, step) } case "mongodb": if data.MongoDB != nil { updateProcArgs(data.MongoDB, step) } } if step.Artifact.URI != "" { artifactURIs[step.ID] = step.Artifact.URI // update current artifact in database for service, taking care to // check the database version as migration 15 changed the way // artifacts are related to releases in the database sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT MAX(id) FROM schema_migrations) < 15 THEN UPDATE artifacts SET uri = '%[1]s' WHERE artifact_id = ( SELECT artifact_id FROM releases WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%[2]s' ) ); ELSE UPDATE artifacts SET uri = '%[1]s' WHERE type = 'docker' AND artifact_id = ( SELECT artifact_id FROM release_artifacts WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%[2]s' ) ); END IF; END; $$;`, step.Artifact.URI, step.ID)) } } data.Discoverd.ImageArtifact.URI = artifactURIs["discoverd"] data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.ImageArtifact.URI = artifactURIs["postgres"] data.Flannel.ImageArtifact.URI = artifactURIs["flannel"] data.Controller.ImageArtifact.URI = artifactURIs["controller"] if data.MariaDB != nil { data.MariaDB.ImageArtifact.URI = artifactURIs["mariadb"] if data.MariaDB.Processes["mariadb"] == 0 { // skip mariadb if it wasn't scaled up in the backup data.MariaDB = nil } } if data.MongoDB != nil { data.MongoDB.ImageArtifact.URI = artifactURIs["mongodb"] if data.MongoDB.Processes["mongodb"] == 0 { // skip mongodb if it wasn't scaled up in the backup data.MongoDB = nil } } // create the slugbuilder artifact if gitreceive still references // SLUGBUILDER_IMAGE_URI (in which case there is no slugbuilder // artifact in the database) sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGBUILDER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN INSERT INTO artifacts (artifact_id, type, uri) VALUES ('%s', 'docker', '%s'); END IF; END; $$;`, random.UUID(), artifactURIs["slugbuilder-image"])) // update the URI of slug artifacts currently being referenced by // gitreceive (which will also update all current user releases // to use the latest slugrunner) for _, name := range []string{"slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%[1]s' WHERE artifact_id = (SELECT (env->>'%[2]s_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) OR uri = (SELECT env->>'%[2]s_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifactURIs[name+"-image"], strings.ToUpper(name))) } // update the URI of redis artifacts currently being referenced by // the redis app (which will also update all current redis // resources to use the latest redis image) sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE artifact_id = (SELECT (env->>'REDIS_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis')) OR uri = (SELECT env->>'REDIS_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis'));`, artifactURIs["redis-image"])) // ensure the image ID environment variables are set for legacy apps // which use image URI variables for _, name := range []string{"redis", "slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, '%[1]s_IMAGE_ID', (SELECT artifact_id::text FROM artifacts WHERE uri = '%[2]s')) WHERE env->>'%[1]s_IMAGE_URI' IS NOT NULL;`, strings.ToUpper(name), artifactURIs[name+"-image"])) } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // start discoverd/flannel/postgres/mariadb cfg.Singleton = data.Postgres.Release.Env["SINGLETON"] == "true" systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "wait", &bootstrap.WaitAction{ URL: "http://postgres-api.discoverd/ping", }), } // Only run up MariaDB if it's in the backup if data.MariaDB != nil { systemSteps = append(systemSteps, step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, })) systemSteps = append(systemSteps, step("mariadb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mariadb-api.discoverd/ping", })) } // Only run up MongoDB if it's in the backup if data.MongoDB != nil { systemSteps = append(systemSteps, step("mongodb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MongoDB, })) systemSteps = append(systemSteps, step("mongodb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mongodb-api.discoverd/ping", })) } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd') `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.Postgres.ImageArtifact.Type, URI: data.Postgres.ImageArtifact.URI}, nil) cmd.Args = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} var mysqldb io.Reader if data.MariaDB != nil { mysqldb, err = getFile("mysql.sql.gz") if err != nil { return err } } // load data into mariadb if it was present in the backup. if mysqldb != nil && data.MariaDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MariaDB.ImageArtifact.Type, URI: data.MariaDB.ImageArtifact.URI}, nil) cmd.Args = []string{"mysql", "-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } var mongodb io.Reader if data.MongoDB != nil { mongodb, err = getFile("mongodb.archive.gz") if err != nil { return err } } // load data into mongodb if it was present in the backup. if mongodb != nil && data.MongoDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MongoDB.ImageArtifact.Type, URI: data.MongoDB.ImageArtifact.URI}, nil) cmd.Args = []string{"mongorestore", "-h", "leader.mongodb.discoverd", "-u", "flynn", "-p", data.MongoDB.Release.Env["MONGO_PWD"], "--archive"} cmd.Stdin = mongodb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mongodb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mongodb restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } // get blobstore config client, err := controller.NewClient("http://"+controllerInstances[0].Addr, data.Controller.Release.Env["AUTH_KEY"]) if err != nil { return err } blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(data.Controller.Release.Env["AUTH_KEY"]) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start blobstore, scheduler, and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 200, }), step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", Timeout: 600, }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } return nil }
func (s *ReleaseSuite) TestReleaseImages(t *c.C) { if testCluster == nil { t.Skip("cannot boot release cluster") } // stream script output to t.Log logReader, logWriter := io.Pipe() defer logWriter.Close() go func() { buf := bufio.NewReader(logReader) for { line, err := buf.ReadString('\n') if err != nil { return } debug(t, line[0:len(line)-1]) } }() // boot the release cluster, release components to a blobstore and output the new version.json releaseCluster := s.addReleaseHosts(t) buildHost := releaseCluster.Instances[0] var versionJSON bytes.Buffer t.Assert(buildHost.Run("bash -ex", &tc.Streams{Stdin: releaseScript, Stdout: &versionJSON, Stderr: logWriter}), c.IsNil) var versions map[string]string t.Assert(json.Unmarshal(versionJSON.Bytes(), &versions), c.IsNil) // install Flynn from the blobstore on the vanilla host blobstore := struct{ Blobstore string }{buildHost.IP + ":8080"} installHost := releaseCluster.Instances[3] var script bytes.Buffer installScript.Execute(&script, blobstore) var installOutput bytes.Buffer out := io.MultiWriter(logWriter, &installOutput) t.Assert(installHost.Run("sudo bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check the flynn-host version is correct var hostVersion bytes.Buffer t.Assert(installHost.Run("flynn-host version", &tc.Streams{Stdout: &hostVersion}), c.IsNil) t.Assert(strings.TrimSpace(hostVersion.String()), c.Equals, "v20150131.0-test") // check rebuilt images were downloaded for name, id := range versions { expected := fmt.Sprintf("%s image %s downloaded", name, id) if !strings.Contains(installOutput.String(), expected) { t.Fatalf(`expected install to download %s %s`, name, id) } } // installing on an instance with Flynn running should not fail script.Reset() installScript.Execute(&script, blobstore) t.Assert(buildHost.Run("sudo bash -ex", &tc.Streams{Stdin: &script, Stdout: logWriter, Stderr: logWriter}), c.IsNil) // create a controller client for the release cluster pin, err := base64.StdEncoding.DecodeString(releaseCluster.ControllerPin) t.Assert(err, c.IsNil) client, err := controller.NewClientWithConfig( "https://"+buildHost.IP, releaseCluster.ControllerKey, controller.Config{Pin: pin, Domain: releaseCluster.ControllerDomain}, ) t.Assert(err, c.IsNil) // deploy a slug based app slugApp := &ct.App{} t.Assert(client.CreateApp(slugApp), c.IsNil) gitreceive, err := client.GetAppRelease("gitreceive") t.Assert(err, c.IsNil) imageArtifact := &ct.Artifact{Type: host.ArtifactTypeDocker, URI: gitreceive.Env["SLUGRUNNER_IMAGE_URI"]} t.Assert(client.CreateArtifact(imageArtifact), c.IsNil) slugArtifact := &ct.Artifact{Type: host.ArtifactTypeFile, URI: fmt.Sprintf("http://%s:8080/slug.tgz", buildHost.IP)} t.Assert(client.CreateArtifact(slugArtifact), c.IsNil) release := &ct.Release{ ArtifactIDs: []string{imageArtifact.ID, slugArtifact.ID}, Processes: map[string]ct.ProcessType{"web": {Cmd: []string{"bin/http"}}}, } t.Assert(client.CreateRelease(release), c.IsNil) t.Assert(client.SetAppRelease(slugApp.ID, release.ID), c.IsNil) watcher, err := client.WatchJobEvents(slugApp.ID, release.ID) t.Assert(err, c.IsNil) defer watcher.Close() t.Assert(client.PutFormation(&ct.Formation{ AppID: slugApp.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, }), c.IsNil) err = watcher.WaitFor(ct.JobEvents{"web": {ct.JobStateUp: 1}}, scaleTimeout, nil) t.Assert(err, c.IsNil) // run a cluster update from the blobstore updateHost := releaseCluster.Instances[1] script.Reset() updateScript.Execute(&script, blobstore) var updateOutput bytes.Buffer out = io.MultiWriter(logWriter, &updateOutput) t.Assert(updateHost.Run("bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check rebuilt images were downloaded for name := range versions { for _, host := range releaseCluster.Instances[0:2] { expected := fmt.Sprintf(`"pulled image" host=%s name=%s`, host.ID, name) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to download %s on host %s`, name, host.ID) } } } assertImage := func(uri, image string) { u, err := url.Parse(uri) t.Assert(err, c.IsNil) t.Assert(u.Query().Get("id"), c.Equals, versions[image]) } // check system apps were deployed correctly for _, app := range updater.SystemApps { if app.ImageOnly { continue // we don't deploy ImageOnly updates } if app.Image == "" { app.Image = "flynn/" + app.Name } debugf(t, "checking new %s release is using image %s", app.Name, versions[app.Image]) expected := fmt.Sprintf(`"finished deploy of system app" name=%s`, app.Name) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to deploy %s`, app.Name) } release, err := client.GetAppRelease(app.Name) t.Assert(err, c.IsNil) debugf(t, "new %s release ID: %s", app.Name, release.ID) artifact, err := client.GetArtifact(release.ImageArtifactID()) t.Assert(err, c.IsNil) debugf(t, "new %s artifact: %+v", app.Name, artifact) assertImage(artifact.URI, app.Image) } // check gitreceive has the correct slug env vars gitreceive, err = client.GetAppRelease("gitreceive") t.Assert(err, c.IsNil) assertImage(gitreceive.Env["SLUGBUILDER_IMAGE_URI"], "flynn/slugbuilder") assertImage(gitreceive.Env["SLUGRUNNER_IMAGE_URI"], "flynn/slugrunner") // check slug based app was deployed correctly release, err = client.GetAppRelease(slugApp.Name) t.Assert(err, c.IsNil) imageArtifact, err = client.GetArtifact(release.ImageArtifactID()) t.Assert(err, c.IsNil) assertImage(imageArtifact.URI, "flynn/slugrunner") }
func main() { client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { log.Fatalln("Unable to connect to controller:", err) } usage := ` Usage: flynn-receiver <app> <rev> [-e <var>=<val>]... [-m <key>=<val>]... Options: -e,--env <var>=<val> -m,--meta <key>=<val> `[1:] args, _ := docopt.Parse(usage, nil, true, version.String(), false) appName := args.String["<app>"] env, err := parsePairs(args, "--env") if err != nil { log.Fatal(err) } meta, err := parsePairs(args, "--meta") if err != nil { log.Fatal(err) } app, err := client.GetApp(appName) if err == controller.ErrNotFound { log.Fatalf("Unknown app %q", appName) } else if err != nil { log.Fatalln("Error retrieving app:", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { log.Fatalln("Error getting current app release:", err) } fmt.Printf("-----> Building %s...\n", app.Name) jobEnv := make(map[string]string) jobEnv["BUILD_CACHE_URL"] = fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID) if buildpackURL, ok := env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } else if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { jobEnv["BUILDPACK_URL"] = buildpackURL } for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} { if v := os.Getenv(k); v != "" { jobEnv[k] = v } } slugURL := fmt.Sprintf("%s/%s.tgz", blobstoreURL, random.UUID()) cmd := exec.Job(exec.DockerImage(os.Getenv("SLUGBUILDER_IMAGE_URI")), &host.Job{ Config: host.ContainerConfig{ Cmd: []string{slugURL}, Env: jobEnv, Stdin: true, DisableLog: true, }, Partition: "background", Metadata: map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.app_name": app.Name, "flynn-controller.release": prevRelease.ID, "flynn-controller.type": "slugbuilder", }, }) var output bytes.Buffer cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr if len(prevRelease.Env) > 0 { stdin, err := cmd.StdinPipe() if err != nil { log.Fatalln(err) } go appendEnvDir(os.Stdin, stdin, prevRelease.Env) } else { cmd.Stdin = os.Stdin } if err := cmd.Run(); err != nil { log.Fatalln("Build failed:", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") artifact := &ct.Artifact{Type: "docker", URI: os.Getenv("SLUGRUNNER_IMAGE_URI")} if err := client.CreateArtifact(artifact); err != nil { log.Fatalln("Error creating artifact:", err) } release := &ct.Release{ ArtifactID: artifact.ID, Env: prevRelease.Env, Meta: prevRelease.Meta, } if release.Meta == nil { release.Meta = make(map[string]string, len(meta)) } if release.Env == nil { release.Env = make(map[string]string, len(env)) } for k, v := range env { release.Env[k] = v } for k, v := range meta { release.Meta[k] = v } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Cmd = []string{"start", t} if t == "web" || strings.HasSuffix(t, "-web") { proc.Service = app.Name + "-" + t proc.Ports = []ct.Port{{ Port: 8080, Proto: "tcp", Service: &host.Service{ Name: proc.Service, Create: true, Check: &host.HealthCheck{Type: "tcp"}, }, }} } procs[t] = proc } release.Processes = procs if release.Env == nil { release.Env = make(map[string]string) } release.Env["SLUG_URL"] = slugURL if err := client.CreateRelease(release); err != nil { log.Fatalln("Error creating release:", err) } if err := client.DeployAppRelease(app.Name, release.ID); err != nil { log.Fatalln("Error deploying app release:", err) } fmt.Println("=====> Application deployed") if needsDefaultScale(app.ID, prevRelease.ID, procs, client) { formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } watcher, err := client.WatchJobEvents(app.ID, release.ID) if err != nil { log.Fatalln("Error streaming job events", err) return } defer watcher.Close() if err := client.PutFormation(formation); err != nil { log.Fatalln("Error putting formation:", err) } fmt.Println("=====> Waiting for web job to start...") err = watcher.WaitFor(ct.JobEvents{"web": ct.JobUpEvents(1)}, scaleTimeout, func(e *ct.Job) error { switch e.State { case ct.JobStateUp: fmt.Println("=====> Default web formation scaled to 1") case ct.JobStateDown: return fmt.Errorf("Failed to scale web process type") } return nil }) if err != nil { log.Fatalln(err.Error()) } } }
func main() { client, err := controller.NewClient("", os.Getenv("CONTROLLER_AUTH_KEY")) if err != nil { log.Fatalln("Unable to connect to controller:", err) } // TODO: use discoverd http dialer here? services, err := discoverd.Services("blobstore", discoverd.DefaultTimeout) if err != nil || len(services) < 1 { log.Fatalf("Unable to discover blobstore %q", err) } blobstoreHost := services[0].Addr appName := os.Args[1] app, err := client.GetApp(appName) if err == controller.ErrNotFound { log.Fatalf("Unknown app %q", appName) } else if err != nil { log.Fatalln("Error retrieving app:", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { log.Fatalln("Error creating getting current app release:", err) } fmt.Printf("-----> Building %s...\n", app.Name) var output bytes.Buffer slugURL := fmt.Sprintf("http://%s/%s.tgz", blobstoreHost, random.UUID()) cmd := exec.Command(exec.DockerImage("flynn/slugbuilder", os.Getenv("SLUGBUILDER_IMAGE_ID")), slugURL) cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr stdin, err := cmd.StdinPipe() if err != nil { log.Fatalln(err) } go appendEnvDir(os.Stdin, stdin, prevRelease.Env) if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { cmd.Env = map[string]string{"BUILDPACK_URL": buildpackURL} } if err := cmd.Run(); err != nil { log.Fatalln("Build failed:", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") artifact := &ct.Artifact{Type: "docker", URI: "https://registry.hub.docker.com/flynn/slugrunner?id=" + os.Getenv("SLUGRUNNER_IMAGE_ID")} if err := client.CreateArtifact(artifact); err != nil { log.Fatalln("Error creating artifact:", err) } release := &ct.Release{ ArtifactID: artifact.ID, Env: prevRelease.Env, } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Cmd = []string{"start", t} if t == "web" { proc.Ports = []ct.Port{{Proto: "tcp"}} if proc.Env == nil { proc.Env = make(map[string]string) } proc.Env["SD_NAME"] = app.Name + "-web" } procs[t] = proc } release.Processes = procs if release.Env == nil { release.Env = make(map[string]string) } release.Env["SLUG_URL"] = slugURL if err := client.CreateRelease(release); err != nil { log.Fatalln("Error creating release:", err) } if err := client.SetAppRelease(app.Name, release.ID); err != nil { log.Fatalln("Error setting app release:", err) } fmt.Println("=====> Application deployed") // If the app is new and the web process type exists, // it should scale to one process after the release is created. if _, ok := procs["web"]; ok && prevRelease.ID == "" { formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } if err := client.PutFormation(formation); err != nil { log.Fatalln("Error putting formation:", err) } fmt.Println("=====> Added default web=1 formation") } }
func (a *API) ping(ctx context.Context, w http.ResponseWriter, req *http.Request) { app := os.Getenv("FLYNN_APP_ID") logger := a.logger().New("fn", "ping") logger.Info("checking status", "host", serviceHost) if status, err := sirenia.NewClient(serviceHost + ":3306").Status(); err == nil && status.Database != nil && status.Database.ReadWrite { logger.Info("database is up, skipping scale check") } else { // Connect to controller. logger.Info("connecting to controller") client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { logger.Error("controller client error", "err", err) httphelper.Error(w, err) return } // Retrieve mariadb release. logger.Info("retrieving app release", "app", app) release, err := client.GetAppRelease(app) if err == controller.ErrNotFound { logger.Error("release not found", "app", app) httphelper.Error(w, err) return } else if err != nil { logger.Error("get release error", "app", app, "err", err) httphelper.Error(w, err) return } // Retrieve current formation. logger.Info("retrieving formation", "app", app, "release_id", release.ID) formation, err := client.GetFormation(app, release.ID) if err == controller.ErrNotFound { logger.Error("formation not found", "app", app, "release_id", release.ID) httphelper.Error(w, err) return } else if err != nil { logger.Error("formation error", "app", app, "release_id", release.ID, "err", err) httphelper.Error(w, err) return } // MariaDB isn't running, just return healthy if formation.Processes["mariadb"] == 0 { w.WriteHeader(200) return } } db, err := a.connect() if err != nil { httphelper.Error(w, err) return } defer db.Close() if _, err := db.Exec("SELECT 1"); err != nil { httphelper.Error(w, err) return } w.WriteHeader(200) }
func (s *ReleaseSuite) TestReleaseImages(t *c.C) { if testCluster == nil { t.Skip("cannot boot release cluster") } // stream script output to t.Log logReader, logWriter := io.Pipe() defer logWriter.Close() go func() { buf := bufio.NewReader(logReader) for { line, err := buf.ReadString('\n') if err != nil { return } debug(t, line[0:len(line)-1]) } }() // boot the release cluster, release components to a blobstore and output the new images.json releaseCluster := s.addReleaseHosts(t) buildHost := releaseCluster.Instances[0] var imagesJSON bytes.Buffer var script bytes.Buffer slugImageID := random.UUID() releaseScript.Execute(&script, struct{ ControllerKey, SlugImageID string }{releaseCluster.ControllerKey, slugImageID}) t.Assert(buildHost.Run("bash -ex", &tc.Streams{Stdin: &script, Stdout: &imagesJSON, Stderr: logWriter}), c.IsNil) var images map[string]*ct.Artifact t.Assert(json.Unmarshal(imagesJSON.Bytes(), &images), c.IsNil) // install Flynn from the blobstore on the vanilla host blobstoreAddr := buildHost.IP + ":8080" installHost := releaseCluster.Instances[3] script.Reset() installScript.Execute(&script, map[string]string{"Blobstore": blobstoreAddr}) var installOutput bytes.Buffer out := io.MultiWriter(logWriter, &installOutput) t.Assert(installHost.Run("sudo bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check the flynn-host version is correct var hostVersion bytes.Buffer t.Assert(installHost.Run("flynn-host version", &tc.Streams{Stdout: &hostVersion}), c.IsNil) t.Assert(strings.TrimSpace(hostVersion.String()), c.Equals, "v20161108.0-test") // check rebuilt images were downloaded assertInstallOutput := func(format string, v ...interface{}) { expected := fmt.Sprintf(format, v...) if !strings.Contains(installOutput.String(), expected) { t.Fatalf(`expected install to output %q`, expected) } } for name, image := range images { assertInstallOutput("pulling %s image", name) for _, layer := range image.Manifest().Rootfs[0].Layers { assertInstallOutput("pulling %s layer %s", name, layer.ID) } } // installing on an instance with Flynn running should fail script.Reset() installScript.Execute(&script, map[string]string{"Blobstore": blobstoreAddr}) installOutput.Reset() err := buildHost.Run("sudo bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}) if err == nil || !strings.Contains(installOutput.String(), "ERROR: Flynn is already installed.") { t.Fatal("expected Flynn install to fail but it didn't") } // create a controller client for the release cluster pin, err := base64.StdEncoding.DecodeString(releaseCluster.ControllerPin) t.Assert(err, c.IsNil) client, err := controller.NewClientWithConfig( "https://"+buildHost.IP, releaseCluster.ControllerKey, controller.Config{Pin: pin, Domain: releaseCluster.ControllerDomain}, ) t.Assert(err, c.IsNil) // deploy a slug based app + Redis resource slugApp := &ct.App{} t.Assert(client.CreateApp(slugApp), c.IsNil) gitreceive, err := client.GetAppRelease("gitreceive") t.Assert(err, c.IsNil) imageArtifact, err := client.GetArtifact(gitreceive.Env["SLUGRUNNER_IMAGE_ID"]) t.Assert(err, c.IsNil) slugArtifact, err := client.GetArtifact(slugImageID) t.Assert(err, c.IsNil) resource, err := client.ProvisionResource(&ct.ResourceReq{ProviderID: "redis", Apps: []string{slugApp.ID}}) t.Assert(err, c.IsNil) release := &ct.Release{ ArtifactIDs: []string{imageArtifact.ID, slugArtifact.ID}, Processes: map[string]ct.ProcessType{"web": {Args: []string{"/runner/init", "bin/http"}}}, Meta: map[string]string{"git": "true"}, Env: resource.Env, } t.Assert(client.CreateRelease(release), c.IsNil) t.Assert(client.SetAppRelease(slugApp.ID, release.ID), c.IsNil) watcher, err := client.WatchJobEvents(slugApp.ID, release.ID) t.Assert(err, c.IsNil) defer watcher.Close() t.Assert(client.PutFormation(&ct.Formation{ AppID: slugApp.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, }), c.IsNil) err = watcher.WaitFor(ct.JobEvents{"web": {ct.JobStateUp: 1}}, scaleTimeout, nil) t.Assert(err, c.IsNil) // run a cluster update from the blobstore updateHost := releaseCluster.Instances[1] script.Reset() updateScript.Execute(&script, map[string]string{"Blobstore": blobstoreAddr, "Discoverd": updateHost.IP + ":1111"}) var updateOutput bytes.Buffer out = io.MultiWriter(logWriter, &updateOutput) t.Assert(updateHost.Run("bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check rebuilt images were downloaded for name := range images { for _, host := range releaseCluster.Instances[0:2] { expected := fmt.Sprintf(`"pulling %s image" host=%s`, name, host.ID) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to download %s on host %s`, name, host.ID) } } } assertImage := func(uri, image string) { t.Assert(uri, c.Equals, images[image].URI) } // check system apps were deployed correctly for _, app := range updater.SystemApps { if app.ImageOnly { continue // we don't deploy ImageOnly updates } debugf(t, "checking new %s release is using image %s", app.Name, images[app.Name].URI) expected := fmt.Sprintf(`"finished deploy of system app" name=%s`, app.Name) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to deploy %s`, app.Name) } release, err := client.GetAppRelease(app.Name) t.Assert(err, c.IsNil) debugf(t, "new %s release ID: %s", app.Name, release.ID) artifact, err := client.GetArtifact(release.ArtifactIDs[0]) t.Assert(err, c.IsNil) debugf(t, "new %s artifact: %+v", app.Name, artifact) assertImage(artifact.URI, app.Name) } // check gitreceive has the correct slug env vars gitreceive, err = client.GetAppRelease("gitreceive") t.Assert(err, c.IsNil) for _, name := range []string{"slugbuilder", "slugrunner"} { artifact, err := client.GetArtifact(gitreceive.Env[strings.ToUpper(name)+"_IMAGE_ID"]) t.Assert(err, c.IsNil) assertImage(artifact.URI, name) } // check slug based app was deployed correctly release, err = client.GetAppRelease(slugApp.Name) t.Assert(err, c.IsNil) imageArtifact, err = client.GetArtifact(release.ArtifactIDs[0]) t.Assert(err, c.IsNil) assertImage(imageArtifact.URI, "slugrunner") // check Redis app was deployed correctly release, err = client.GetAppRelease(resource.Env["FLYNN_REDIS"]) t.Assert(err, c.IsNil) imageArtifact, err = client.GetArtifact(release.ArtifactIDs[0]) t.Assert(err, c.IsNil) assertImage(imageArtifact.URI, "redis") }
func (a *API) scaleUp() error { a.mtx.Lock() defer a.mtx.Unlock() // Ignore if already scaled up. if a.scaledUp { return nil } app := os.Getenv("FLYNN_APP_ID") logger := a.logger().New("fn", "scaleUp") sc := sirenia.NewClient(serviceHost + ":3306") logger.Info("checking status", "host", serviceHost) if status, err := sc.Status(); err == nil && status.Database != nil && status.Database.ReadWrite { logger.Info("database is up, skipping scale") // Skip the rest, the database is already available a.scaledUp = true return nil } else if err != nil { logger.Info("error checking status", "err", err) } else { logger.Info("got status, but database is not read-write") } // Connect to controller. logger.Info("connecting to controller") client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { logger.Error("controller client error", "err", err) return err } // Retrieve mariadb release. logger.Info("retrieving app release", "app", app) release, err := client.GetAppRelease(app) if err == controller.ErrNotFound { logger.Error("release not found", "app", app) return errors.New("mariadb release not found") } else if err != nil { logger.Error("get release error", "app", app, "err", err) return err } // Retrieve current formation. logger.Info("retrieving formation", "app", app, "release_id", release.ID) formation, err := client.GetFormation(app, release.ID) if err == controller.ErrNotFound { logger.Error("formation not found", "app", app, "release_id", release.ID) return errors.New("mariadb formation not found") } else if err != nil { logger.Error("formation error", "app", app, "release_id", release.ID, "err", err) return err } // If mariadb is running then exit. if formation.Processes["mariadb"] > 0 { logger.Info("database is running, scaling not necessary") return nil } // Copy processes and increase database processes. processes := make(map[string]int, len(formation.Processes)) for k, v := range formation.Processes { processes[k] = v } if os.Getenv("SINGLETON") == "true" { processes["mariadb"] = 1 } else { processes["mariadb"] = 3 } // Update formation. logger.Info("updating formation", "app", app, "release_id", release.ID) formation.Processes = processes if err := client.PutFormation(formation); err != nil { logger.Error("put formation error", "app", app, "release_id", release.ID, "err", err) return err } if err := sc.WaitForReadWrite(5 * time.Minute); err != nil { logger.Error("wait for read write", "err", err) return errors.New("timed out while starting mariadb cluster") } logger.Info("scaling complete") // Mark as successfully scaled up. a.scaledUp = true return nil }
func (s *CLISuite) TestDockerExportImport(t *c.C) { // release via docker-receive client := s.controllerClient(t) app := &ct.App{Name: "cli-test-docker-export"} t.Assert(client.CreateApp(app), c.IsNil) repo := "cli-test-export" s.buildDockerImage(t, repo, `CMD ["/bin/pingserv"]`) t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "app=1"), Succeeds) defer flynn(t, "/", "-a", app.Name, "scale", "app=0") // grab the Flynn image layers release, err := client.GetAppRelease(app.ID) t.Assert(err, c.IsNil) artifact, err := client.GetArtifact(release.ArtifactIDs[0]) t.Assert(err, c.IsNil) layers := artifact.Manifest().Rootfs[0].Layers layerNames := make([]string, len(layers)) for i, layer := range layers { layerNames[i] = layer.ID + ".layer" } // check exporting to stdout works file := filepath.Join(t.MkDir(), "export.tar") cmd := exec.Command("sh", "-c", fmt.Sprintf("%s -a %s export > %s", args.CLI, app.Name, file)) cmd.Env = flynnEnv(flynnrc) var stderr bytes.Buffer cmd.Stderr = &stderr if args.Stream { cmd.Stderr = io.MultiWriter(os.Stderr, &stderr) } if err := cmd.Run(); err != nil { t.Fatalf("error exporting docker app to stdout: %s: %s", err, stderr.String()) } exportFiles := append([]string{ "app.json", "routes.json", "release.json", "artifacts.json", }, append(layerNames, "formation.json")...) assertExportContains(t, file, exportFiles...) // export the app directly to the file t.Assert(flynn(t, "/", "-a", app.Name, "export", "-f", file), Succeeds) assertExportContains(t, file, exportFiles...) // delete the image from the registry u, err := url.Parse(s.clusterConf(t).DockerPushURL) t.Assert(err, c.IsNil) uri := fmt.Sprintf("http://%s/v2/%s/manifests/%s", u.Host, app.Name, artifact.Meta["docker-receive.digest"]) req, err := http.NewRequest("DELETE", uri, nil) req.SetBasicAuth("", s.clusterConf(t).Key) t.Assert(err, c.IsNil) res, err := http.DefaultClient.Do(req) t.Assert(err, c.IsNil) res.Body.Close() // import to another app importApp := "cli-test-docker-import" t.Assert(flynn(t, "/", "import", "--name", importApp, "--file", file), Succeeds) defer flynn(t, "/", "-a", importApp, "scale", "app=0") // wait for it to start _, err = s.discoverdClient(t).Instances(importApp+"-web", 10*time.Second) t.Assert(err, c.IsNil) }
func main() { client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY")) if err != nil { log.Fatalln("Unable to connect to controller:", err) } appName := os.Args[1] app, err := client.GetApp(appName) if err == controller.ErrNotFound { log.Fatalf("Unknown app %q", appName) } else if err != nil { log.Fatalln("Error retrieving app:", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { log.Fatalln("Error getting current app release:", err) } fmt.Printf("-----> Building %s...\n", app.Name) var output bytes.Buffer slugURL := fmt.Sprintf("%s/%s.tgz", blobstoreURL, random.UUID()) cmd := exec.Command(exec.DockerImage(os.Getenv("SLUGBUILDER_IMAGE_URI")), slugURL) cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr cmd.Meta = map[string]string{ "flynn-controller.app": app.ID, "flynn-controller.app_name": app.Name, "flynn-controller.release": prevRelease.ID, "flynn-controller.type": "slugbuilder", } if len(prevRelease.Env) > 0 { stdin, err := cmd.StdinPipe() if err != nil { log.Fatalln(err) } go appendEnvDir(os.Stdin, stdin, prevRelease.Env) } else { cmd.Stdin = os.Stdin } cmd.Env = make(map[string]string) cmd.Env["BUILD_CACHE_URL"] = fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID) if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { cmd.Env["BUILDPACK_URL"] = buildpackURL } for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} { if v := os.Getenv(k); v != "" { cmd.Env[k] = v } } if err := cmd.Run(); err != nil { log.Fatalln("Build failed:", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") artifact := &ct.Artifact{Type: "docker", URI: os.Getenv("SLUGRUNNER_IMAGE_URI")} if err := client.CreateArtifact(artifact); err != nil { log.Fatalln("Error creating artifact:", err) } release := &ct.Release{ ArtifactID: artifact.ID, Env: prevRelease.Env, } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Cmd = []string{"start", t} if t == "web" { proc.Ports = []ct.Port{{ Port: 8080, Proto: "tcp", Service: &host.Service{ Name: app.Name + "-web", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }, }} } procs[t] = proc } release.Processes = procs if release.Env == nil { release.Env = make(map[string]string) } release.Env["SLUG_URL"] = slugURL if err := client.CreateRelease(release); err != nil { log.Fatalln("Error creating release:", err) } if err := client.DeployAppRelease(app.Name, release.ID); err != nil { log.Fatalln("Error deploying app release:", err) } fmt.Println("=====> Application deployed") if needsDefaultScale(app.ID, prevRelease.ID, procs, client) { formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } watcher, err := client.WatchJobEvents(app.ID, release.ID) if err != nil { log.Fatalln("Error streaming job events", err) return } defer watcher.Close() if err := client.PutFormation(formation); err != nil { log.Fatalln("Error putting formation:", err) } fmt.Println("=====> Waiting for web job to start...") err = watcher.WaitFor(ct.JobEvents{"web": {"up": 1}}, scaleTimeout, func(e *ct.Job) error { switch e.State { case "up": fmt.Println("=====> Default web formation scaled to 1") case "down", "crashed": return fmt.Errorf("Failed to scale web process type") } return nil }) if err != nil { log.Fatalln(err.Error()) } } }
func main() { client, err := controller.NewClient("", os.Getenv("CONTROLLER_AUTH_KEY")) if err != nil { log.Fatalln("Unable to connect to controller:", err) } appName := os.Args[1] app, err := client.GetApp(appName) if err == controller.ErrNotFound { log.Fatalf("Unknown app %q", appName) } else if err != nil { log.Fatalln("Error retrieving app:", err) } prevRelease, err := client.GetAppRelease(app.Name) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { log.Fatalln("Error getting current app release:", err) } fmt.Printf("-----> Building %s...\n", app.Name) var output bytes.Buffer slugURL := fmt.Sprintf("%s/%s.tgz", blobstoreURL, random.UUID()) cmd := exec.Command(exec.DockerImage(os.Getenv("SLUGBUILDER_IMAGE_URI")), slugURL) cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr if len(prevRelease.Env) > 0 { stdin, err := cmd.StdinPipe() if err != nil { log.Fatalln(err) } go appendEnvDir(os.Stdin, stdin, prevRelease.Env) } else { cmd.Stdin = os.Stdin } cmd.Env = make(map[string]string) cmd.Env["BUILD_CACHE_URL"] = fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID) if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { cmd.Env["BUILDPACK_URL"] = buildpackURL } if err := cmd.Run(); err != nil { log.Fatalln("Build failed:", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") artifact := &ct.Artifact{Type: "docker", URI: os.Getenv("SLUGRUNNER_IMAGE_URI")} if err := client.CreateArtifact(artifact); err != nil { log.Fatalln("Error creating artifact:", err) } release := &ct.Release{ ArtifactID: artifact.ID, Env: prevRelease.Env, } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Cmd = []string{"start", t} if t == "web" { proc.Ports = []ct.Port{{ Port: 8080, Proto: "tcp", Service: &host.Service{ Name: app.Name + "-web", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }, }} } procs[t] = proc } release.Processes = procs if release.Env == nil { release.Env = make(map[string]string) } release.Env["SLUG_URL"] = slugURL if err := client.CreateRelease(release); err != nil { log.Fatalln("Error creating release:", err) } if err := client.DeployAppRelease(app.Name, release.ID); err != nil { log.Fatalln("Error deploying app release:", err) } fmt.Println("=====> Application deployed") // If the app is new and the web process type exists, // it should scale to one process after the release is created. if _, ok := procs["web"]; ok && prevRelease.ID == "" { formation := &ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: map[string]int{"web": 1}, } if err := client.PutFormation(formation); err != nil { log.Fatalln("Error putting formation:", err) } fmt.Println("=====> Added default web=1 formation") } }
func (s *ReleaseSuite) TestReleaseImages(t *c.C) { if testCluster == nil { t.Skip("cannot boot release cluster") } // stream script output to t.Log logReader, logWriter := io.Pipe() defer logWriter.Close() go func() { buf := bufio.NewReader(logReader) for { line, err := buf.ReadString('\n') if err != nil { return } debug(t, line[0:len(line)-1]) } }() // boot the release cluster, release components to a blobstore and output the new version.json releaseCluster := s.addReleaseHosts(t) buildHost := releaseCluster.Instances[0] var versionJSON bytes.Buffer t.Assert(buildHost.Run("bash -ex", &tc.Streams{Stdin: releaseScript, Stdout: &versionJSON, Stderr: logWriter}), c.IsNil) var versions map[string]string t.Assert(json.Unmarshal(versionJSON.Bytes(), &versions), c.IsNil) // install Flynn from the blobstore on the vanilla host blobstore := struct{ Blobstore string }{buildHost.IP + ":8080"} installHost := releaseCluster.Instances[3] var script bytes.Buffer installScript.Execute(&script, blobstore) var installOutput bytes.Buffer out := io.MultiWriter(logWriter, &installOutput) t.Assert(installHost.Run("sudo bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check the flynn-host version is correct var hostVersion bytes.Buffer t.Assert(installHost.Run("flynn-host version", &tc.Streams{Stdout: &hostVersion}), c.IsNil) t.Assert(strings.TrimSpace(hostVersion.String()), c.Equals, "v20150131.0-test") // check rebuilt images were downloaded for name, id := range versions { expected := fmt.Sprintf("%s image %s downloaded", name, id) if !strings.Contains(installOutput.String(), expected) { t.Fatalf(`expected install to download %s %s`, name, id) } } // run a cluster update from the blobstore updateHost := releaseCluster.Instances[1] script = bytes.Buffer{} updateScript.Execute(&script, blobstore) var updateOutput bytes.Buffer out = io.MultiWriter(logWriter, &updateOutput) t.Assert(updateHost.Run("bash -ex", &tc.Streams{Stdin: &script, Stdout: out, Stderr: out}), c.IsNil) // check rebuilt images were downloaded for name := range versions { for _, host := range releaseCluster.Instances[0:2] { expected := fmt.Sprintf(`"pulled image" host=%s name=%s`, host.ID, name) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to download %s on host %s`, name, host.ID) } } } // create a controller client for the new cluster pin, err := base64.StdEncoding.DecodeString(releaseCluster.ControllerPin) t.Assert(err, c.IsNil) client, err := controller.NewClientWithConfig( "https://"+buildHost.IP, releaseCluster.ControllerKey, controller.Config{Pin: pin, Domain: releaseCluster.ControllerDomain}, ) t.Assert(err, c.IsNil) // check system apps were deployed correctly for _, app := range updater.SystemApps { image := "flynn/" + app if app == "gitreceive" { image = "flynn/receiver" } debugf(t, "checking new %s release is using image %s", app, versions[image]) expected := fmt.Sprintf(`"finished deploy of system app" name=%s`, app) if !strings.Contains(updateOutput.String(), expected) { t.Fatalf(`expected update to deploy %s`, app) } release, err := client.GetAppRelease(app) t.Assert(err, c.IsNil) debugf(t, "new %s release ID: %s", app, release.ID) artifact, err := client.GetArtifact(release.ArtifactID) t.Assert(err, c.IsNil) debugf(t, "new %s artifact: %+v", app, artifact) uri, err := url.Parse(artifact.URI) t.Assert(err, c.IsNil) t.Assert(uri.Query().Get("id"), c.Equals, versions[image]) } }
func main() { client, err := controller.NewClient("", os.Getenv("CONTROLLER_AUTH_KEY")) if err != nil { log.Fatalln("Unable to connect to controller:", err) } // TODO: use discoverd http dialer here? services, err := discoverd.Services("shelf", discoverd.DefaultTimeout) if err != nil || len(services) < 1 { log.Fatalf("Unable to discover shelf %q", err) } shelfHost := services[0].Addr app := os.Args[1] commit := os.Args[2] _, err = client.GetApp(app) if err == controller.ErrNotFound { log.Fatalf("Unknown app %q", app) } else if err != nil { log.Fatalln("Error retrieving app:", err) } prevRelease, err := client.GetAppRelease(app) if err == controller.ErrNotFound { prevRelease = &ct.Release{} } else if err != nil { log.Fatalln("Error creating getting current app release:", err) } fmt.Printf("-----> Building %s...\n", app) var output bytes.Buffer slugURL := fmt.Sprintf("http://%s/%s.tgz", shelfHost, commit) cmd := exec.Command("flynn/slugbuilder", slugURL) cmd.Stdout = io.MultiWriter(os.Stdout, &output) cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok { cmd.Env = map[string]string{"BUILDPACK_URL": buildpackURL} } if err := cmd.Run(); err != nil { log.Fatalln("Build failed:", err) } var types []string if match := typesPattern.FindSubmatch(output.Bytes()); match != nil { types = strings.Split(string(match[1]), ", ") } fmt.Printf("-----> Creating release...\n") artifact := &ct.Artifact{URI: "docker://flynn/slugrunner"} if err := client.CreateArtifact(artifact); err != nil { log.Fatalln("Error creating artifact:", err) } release := &ct.Release{ ArtifactID: artifact.ID, Env: prevRelease.Env, } procs := make(map[string]ct.ProcessType) for _, t := range types { proc := prevRelease.Processes[t] proc.Cmd = []string{"start", t} if t == "web" { proc.Ports.TCP = 1 if proc.Env == nil { proc.Env = make(map[string]string) } proc.Env["SD_NAME"] = app + "-web" } procs[t] = proc } release.Processes = procs if release.Env == nil { release.Env = make(map[string]string) } release.Env["SLUG_URL"] = slugURL if err := client.CreateRelease(release); err != nil { log.Fatalln("Error creating release:", err) } if err := client.SetAppRelease(app, release.ID); err != nil { log.Fatalln("Error setting app release:", err) } fmt.Println("=====> Application deployed") }
func (s *SchedulerSuite) TestDeployController(t *c.C) { if testCluster == nil { t.Skip("cannot determine test cluster size") } // get the current controller release client := s.controllerClient(t) app, err := client.GetApp("controller") t.Assert(err, c.IsNil) release, err := client.GetAppRelease(app.ID) t.Assert(err, c.IsNil) // create a controller deployment release.ID = "" t.Assert(client.CreateRelease(release), c.IsNil) deployment, err := client.CreateDeployment(app.ID, release.ID) t.Assert(err, c.IsNil) // use a function to create the event stream as a new stream will be needed // after deploying the controller var events chan *ct.DeploymentEvent var eventStream stream.Stream connectStream := func() { events = make(chan *ct.DeploymentEvent) err := attempt.Strategy{ Total: 10 * time.Second, Delay: 500 * time.Millisecond, }.Run(func() (err error) { eventStream, err = client.StreamDeployment(deployment.ID, events) return }) t.Assert(err, c.IsNil) } connectStream() defer eventStream.Close() // wait for the deploy to complete (this doesn't wait for specific events // due to the fact that when the deployer deploys itself, some events will // not get sent) loop: for { select { case e, ok := <-events: if !ok { // reconnect the stream as it may of been closed // due to the controller being deployed debug(t, "reconnecting deployment event stream") connectStream() continue } debugf(t, "got deployment event: %s %s", e.JobType, e.JobState) switch e.Status { case "complete": break loop case "failed": t.Fatal("the deployment failed") } case <-time.After(60 * time.Second): t.Fatal("timed out waiting for the deploy to complete") } } // check the correct controller jobs are running hosts, err := s.clusterClient(t).ListHosts() t.Assert(err, c.IsNil) actual := make(map[string]map[string]int) for _, host := range hosts { for _, job := range host.Jobs { appID := job.Metadata["flynn-controller.app"] if appID != app.ID { continue } releaseID := job.Metadata["flynn-controller.release"] if _, ok := actual[releaseID]; !ok { actual[releaseID] = make(map[string]int) } typ := job.Metadata["flynn-controller.type"] actual[releaseID][typ]++ } } expected := map[string]map[string]int{release.ID: { "web": 2, "deployer": 2, "scheduler": testCluster.Size(), }} t.Assert(actual, c.DeepEquals, expected) }